git-subtree-dir: qcom/opensource/eva-kernel git-subtree-mainline: caab746e9f7ab5e05abbc14067bbfb83b7837739 git-subtree-split: c1ff9cd986e7dd66ecf6b385b92ac3de85c76f4e Change-Id: repo: https://git.codelinaro.org/clo/la/platform/vendor/opensource/eva-kernel tag: CV.LA.2.0.r1-04800-lanai.0
@@ -0,0 +1,5 @@
+cc_library_headers {
+ name: "qti_eva_kernel_headers",
+ export_include_dirs: ["include/uapi/eva/media"],
+ vendor_available: true
+}
@@ -0,0 +1,67 @@
+ENABLE_EVA_KERNEL := true
+ifeq ($(TARGET_USES_QMAA), true)
+ifneq ($(TARGET_USES_QMAA_OVERRIDE_CVP), true)
+ENABLE_EVA_KERNEL := false
+endif
+ifeq ($(call is-board-platform-in-list,volcano),true)
+ifeq ($(ENABLE_EVA_KERNEL), true)
+ifneq ($(TARGET_BOARD_PLATFORM), qssi)
+ifeq ($(call is-board-platform-in-list, $(TARGET_BOARD_PLATFORM)),true)
+
+DLKM_DIR := device/qcom/common/dlkm
+LOCAL_PATH := $(call my-dir)
+# For DDK
+LOCAL_MODULE_DDK_BUILD := true
+LOCAL_MODULE_KO_DIRS := msm/msm-eva.ko
+include $(CLEAR_VARS)
+# For incremental compilation
+LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+LOCAL_MODULE := msm-eva.ko
+LOCAL_MODULE_KBUILD_NAME := msm/msm-eva.ko
+LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
+LOCAL_ADDITIONAL_DEPENDENCY := synx-driver.ko
+# export to kbuild
+# Setup mmrm dependency
+LOCAL_REQUIRED_MODULES := mmrm-module-symvers
+LOCAL_ADDITIONAL_DEPENDENCIES := $(call intermediates-dir-for,DLKM,mmrm-module-symvers)/Module.symvers
+KBUILD_REQUIRED_KOS += msm-mmrm.ko
+# Setup SynX dependency
+CONFIG_SYNX := y
+#ifdef CONFIG_SYNX
+ifeq ($(CONFIG_SYNX), y)
+$(warning Compiling SynX)
+LOCAL_REQUIRED_MODULES += synx-driver-symvers
+LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,synx-driver-symvers)/synx-driver-symvers
+KBUILD_REQUIRED_KOS += synx-driver.ko
+# Setup fastRPC dependency
+CONFIG_FASTRPC := y
+ifeq ($(CONFIG_FASTRPC), y)
+$(warning Compiling FastRPC)
+LOCAL_REQUIRED_MODULES += dsp-module-symvers
+LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,dsp-module-symvers)/Module.symvers
+KBUILD_REQUIRED_KOS += frpc-adsprpc.ko
+# print out variables
+$(info KBUILD_OPTIONS = $(KBUILD_OPTIONS))
+$(info intermediates mmrm symvers path = $(call intermediates-dir-for,DLKM,mmrm-module-symvers))
+$(info LOCAL_ADDITIONAL_DEPENDENCY = $(LOCAL_ADDITIONAL_DEPENDENCY))
+$(info LOCAL_ADDITIONAL_DEPENDENCIES = $(LOCAL_ADDITIONAL_DEPENDENCIES))
+$(info LOCAL_REQUIRED_MODULES = $(LOCAL_REQUIRED_MODULES))
+$(info DLKM_DIR = $(DLKM_DIR))
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+endif # End of check for board platform
+endif # End of check for target product
+endif # End of enable eva kernel check
@@ -0,0 +1,38 @@
+load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
+package(
+ default_visibility = [
+ "//visibility:public"],
+)
+ddk_headers(
+ name = "eva_drivers_configs",
+ hdrs = [
+ "config/waipioevaconf.h"
+ ],
+ includes = ["config"]
+ name = "uapi_headers",
+ hdrs = glob([
+ "include/uapi/eva/media/*.h",
+ ]),
+ includes = ["include/uapi/eva"]
+ name = "msm_headers",
+ "msm/eva/*.h",
+ "msm/eva/vm/*.h",
+ includes = ["msm","msm/eva"]
+ name = "eva_drivers_headers",
+ hdrs = [":eva_drivers_configs", ":uapi_headers", ":msm_headers"]
+load(":pineapple.bzl", "define_pineapple")
+define_pineapple()
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+CONFIG_BUILD_VENDORSI := true
+# auto-detect subdirs
+ifneq ($(CONFIG_BUILD_VENDORSI), true)
+ifneq ($(CONFIG_ARCH_QTI_VM), y)
+include $(srctree)/techpack/eva/config/waipioeva.conf
+LINUXINCLUDE += -include $(srctree)/techpack/eva/config/waipioevaconf.h
+LINUXINCLUDE += -I$(srctree)/techpack/eva/include \
+ -I$(srctree)/techpack/eva/include/uapi \
+ -I$(srctree)/techpack/eva/include/uapi/eva
+obj-y +=msm/
@@ -0,0 +1,14 @@
+KBUILD_OPTIONS+= EVA_ROOT=$(KERNEL_SRC)/$(M)
+all:
+ $(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS)
+modules_install:
+ $(MAKE) M=$(M) -C $(KERNEL_SRC) modules_install
+%:
+ $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
+clean:
+ rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
+ rm -rf .tmp_versions
+ifeq ($(CONFIG_QGKI),y)
+export CONFIG_MSM_EVA=y
+else
+export CONFIG_MSM_EVA=m
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+#define CONFIG_MSM_EVA 1
@@ -0,0 +1,18 @@
+# Build eva kernel driver
+ifeq ($(TARGET_BOARD_PLATFORM),volcano)
+ifneq ($(TARGET_BOARD_AUTO),true)
+ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true)
+BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm-eva.ko
@@ -0,0 +1,12 @@
+PRODUCT_PACKAGES += msm-eva.ko
@@ -0,0 +1,130 @@
+load(
+ "//build/kernel/kleaf:kernel.bzl",
+ "ddk_module",
+ "ddk_submodule",
+ "kernel_module",
+ "kernel_modules_install",
+load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
+def _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps, config_deps):
+ processed_config_srcs = {}
+ processed_config_deps = {}
+ for config_src_name in config_srcs:
+ config_src = config_srcs[config_src_name]
+ if type(config_src) == "list":
+ processed_config_srcs[config_src_name] = {True: config_src}
+ else:
+ processed_config_srcs[config_src_name] = config_src
+ for config_deps_name in config_deps:
+ config_dep = config_deps[config_deps_name]
+ if type(config_dep) == "list":
+ processed_config_deps[config_deps_name] = {True: config_dep}
+ processed_config_deps[config_deps_name] = config_dep
+ module = struct(
+ name = name,
+ path = path,
+ srcs = srcs,
+ config_srcs = processed_config_srcs,
+ config_option = config_option,
+ deps = deps,
+ config_deps = processed_config_deps,
+ )
+ module_map[name] = module
+def _get_config_choices(map, options):
+ choices = []
+ for option in map:
+ choices.extend(map[option].get(option in options, []))
+ return choices
+def _get_kernel_build_options(modules, config_options):
+ all_options = {option: True for option in config_options}
+ all_options = all_options | {module.config_option: True for module in modules if module.config_option}
+ return all_options
+def _get_kernel_build_module_srcs(module, options, formatter):
+ srcs = module.srcs + _get_config_choices(module.config_srcs, options)
+ module_path = "{}/".format(module.path) if module.path else ""
+ globbed_srcs = native.glob(["{}{}".format(module_path, formatter(src)) for src in srcs])
+ return globbed_srcs
+def _get_kernel_build_module_deps(module, options, formatter):
+ deps = module.deps + _get_config_choices(module.config_deps, options)
+ deps = [formatter(dep) for dep in deps]
+ return deps
+def create_module_registry(hdrs = []):
+ module_map = {}
+ def register(name, path = None, config_option = None, srcs = [], config_srcs = {}, deps = [], config_deps = {}):
+ _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps, config_deps)
+ return struct(
+ register = register,
+ get = module_map.get,
+ hdrs = hdrs,
+ module_map = module_map,
+def define_target_variant_modules(target, variant, registry, modules, config_options = []):
+ kernel_build = "{}_{}".format(target, variant)
+ kernel_build_label = "//msm-kernel:{}".format(kernel_build)
+ modules = [registry.get(module_name) for module_name in modules]
+ options = _get_kernel_build_options(modules, config_options)
+ build_print = lambda message: print("{}: {}".format(kernel_build, message))
+ formatter = lambda s: s.replace("%b", kernel_build).replace("%t", target)
+ headers = ["//msm-kernel:all_headers"] + registry.hdrs
+ all_module_rules = []
+ for module in modules:
+ rule_name = "{}_{}".format(kernel_build, module.name)
+ module_srcs = _get_kernel_build_module_srcs(module, options, formatter)
+ if not module_srcs:
+ continue
+ ddk_submodule(
+ name = rule_name,
+ srcs = module_srcs,
+ out = "{}.ko".format(module.name),
+ copts = ["-Wno-format"],
+ deps = headers + _get_kernel_build_module_deps(module, options, formatter),
+ local_defines = options.keys(),
+ all_module_rules.append(rule_name)
+ ddk_module(
+ name = "{}_modules".format(kernel_build),
+ kernel_build = kernel_build_label,
+ deps = all_module_rules,
+ copy_to_dist_dir(
+ name = "{}_modules_dist".format(kernel_build),
+ data = [":{}_modules".format(kernel_build)],
+ dist_dir = "out/target/product/{}/dlkm/lib/modules/".format(kernel_build),
+ flat = True,
+ wipe_dist_dir = False,
+ allow_duplicate_filenames = False,
+ mode_overrides = {"**/*": "644"},
+def define_consolidate_gki_modules(target, registry, modules, config_options = []):
+ define_target_variant_modules(target, "consolidate", registry, modules, config_options)
+ define_target_variant_modules(target, "gki", registry, modules, config_options)
@@ -0,0 +1,45 @@
+load(":eva_module_build.bzl", "create_module_registry")
+EVA_KERNEL_ROOT = "eva-kernel"
+eva_modules = create_module_registry([":eva_drivers_headers"])
+register_eva_module = eva_modules.register
+register_eva_module(
+ name = "msm-eva",
+ path = "msm",
+ srcs = [
+ "eva/cvp.c",
+ "eva/cvp_core_hfi.c",
+ "eva/cvp_dump.c",
+ "eva/cvp_fw_load.c",
+ "eva/cvp_hfi.c",
+ "eva/cvp_power.c",
+ "eva/cvp_smem.c",
+ "eva/hfi_packetization.c",
+ "eva/hfi_response_handler.c",
+ "eva/msm_cvp.c",
+ "eva/msm_cvp_buf.c",
+ "eva/msm_cvp_clocks.c",
+ "eva/msm_cvp_common.c",
+ "eva/msm_cvp_core.c",
+ "eva/msm_cvp_debug.c",
+ "eva/msm_cvp_dsp.c",
+ "eva/msm_cvp_ioctl.c",
+ "eva/msm_cvp_platform.c",
+ "eva/msm_cvp_res_parse.c",
+ "eva/msm_cvp_synx.c",
+ "eva/vm/cvp_vm_main.c",
+ "eva/vm/cvp_vm_msgq.c",
+ "eva/vm/cvp_vm_resource.c",
+ config_deps = {
+ "TARGET_SYNX_ENABLE": [
+ "//vendor/qcom/opensource/synx-kernel:synx_headers",
+ "//vendor/qcom/opensource/synx-kernel:%b_modules"
+ "TARGET_DSP_ENABLE": [
+ "//vendor/qcom/opensource/dsp-kernel:%b_frpc-adsprpc"
+ },
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+#ifndef __MSM_EVA_PRIVATE_H__
+#define __MSM_EVA_PRIVATE_H__
+#include <linux/types.h>
+/* Commands type */
+#define EVA_KMD_CMD_BASE 0x10000000
+#define EVA_KMD_CMD_START (EVA_KMD_CMD_BASE + 0x1000)
+ * userspace clients pass one of the below arguments type
+ * in struct eva_kmd_arg (@type field).
+ * EVA_KMD_GET_SESSION_INFO - this argument type is used to
+ * get the session information from driver. it passes
+ * struct eva_kmd_session_info {}
+#define EVA_KMD_GET_SESSION_INFO (EVA_KMD_CMD_START + 1)
+ * EVA_KMD_REGISTER_BUFFER - this argument type is used to
+ * register the buffer to driver. it passes
+ * struct eva_kmd_buffer {}
+#define EVA_KMD_REGISTER_BUFFER (EVA_KMD_CMD_START + 3)
+ * unregister the buffer to driver. it passes
+#define EVA_KMD_UNREGISTER_BUFFER (EVA_KMD_CMD_START + 4)
+#define EVA_KMD_UPDATE_POWER (EVA_KMD_CMD_START + 17)
+#define EVA_KMD_SEND_CMD_PKT (EVA_KMD_CMD_START + 64)
+#define EVA_KMD_RECEIVE_MSG_PKT (EVA_KMD_CMD_START + 65)
+#define EVA_KMD_SET_SYS_PROPERTY (EVA_KMD_CMD_START + 66)
+#define EVA_KMD_GET_SYS_PROPERTY (EVA_KMD_CMD_START + 67)
+#define EVA_KMD_SESSION_CONTROL (EVA_KMD_CMD_START + 68)
+#define EVA_KMD_SEND_FENCE_CMD_PKT (EVA_KMD_CMD_START + 69)
+#define EVA_KMD_FLUSH_ALL (EVA_KMD_CMD_START + 70)
+#define EVA_KMD_FLUSH_FRAME (EVA_KMD_CMD_START + 71)
+/* flags */
+#define EVA_KMD_FLAG_UNSECURE 0x00000000
+#define EVA_KMD_FLAG_SECURE 0x00000001
+/* buffer type */
+#define EVA_KMD_BUFTYPE_INPUT 0x00000001
+#define EVA_KMD_BUFTYPE_OUTPUT 0x00000002
+#define EVA_KMD_BUFTYPE_INTERNAL_1 0x00000003
+#define EVA_KMD_BUFTYPE_INTERNAL_2 0x00000004
+/**
+ * struct eva_kmd_session_info - session information
+ * @session_id: current session id
+struct eva_kmd_session_info {
+ __u32 session_id;
+ __u32 reserved[10];
+};
+ * struct eva_kmd_buffer - buffer information to be registered
+ * @index: index of buffer
+ * @type: buffer type
+ * @fd: file descriptor of buffer
+ * @size: allocated size of buffer
+ * @offset: offset in fd from where usable data starts
+ * @pixelformat: fourcc format
+ * @flags: buffer flags
+struct eva_kmd_buffer {
+ __u32 index;
+ __u32 type;
+ __u32 fd;
+ __u32 size;
+ __u32 offset;
+ __u32 pixelformat;
+ __u32 flags;
+ __u32 reserved[5];
+ * struct eva_kmd_send_cmd - sending generic HFI command
+ * @cmd_address_fd: file descriptor of cmd_address
+ * @cmd_size: allocated size of buffer
+struct eva_kmd_send_cmd {
+ __u32 cmd_address_fd;
+ __u32 cmd_size;
+ * struct eva_kmd_client_data - store generic client
+ * data
+ * @transactionid: transaction id
+ * @client_data1: client data to be used during callback
+ * @client_data2: client data to be used during callback
+struct eva_kmd_client_data {
+ __u32 transactionid;
+ __u32 client_data1;
+ __u32 client_data2;
+ * Structures and macros for KMD arg data
+#define MAX_HFI_PKT_SIZE 490
+struct eva_kmd_hfi_packet {
+ __u32 pkt_data[MAX_HFI_PKT_SIZE];
+ void *oob_buf;
+#define EVA_KMD_PROP_HFI_VERSION 1
+#define EVA_KMD_PROP_SESSION_TYPE 2
+#define EVA_KMD_PROP_SESSION_KERNELMASK 3
+#define EVA_KMD_PROP_SESSION_PRIORITY 4
+#define EVA_KMD_PROP_SESSION_SECURITY 5
+#define EVA_KMD_PROP_SESSION_DSPMASK 6
+#define EVA_KMD_PROP_SESSION_DUMPOFFSET 7
+#define EVA_KMD_PROP_SESSION_DUMPSIZE 8
+#define EVA_KMD_PROP_SESSION_ERROR 9
+#define EVA_KMD_PROP_PWR_FDU 0x10
+#define EVA_KMD_PROP_PWR_ICA 0x11
+#define EVA_KMD_PROP_PWR_OD 0x12
+#define EVA_KMD_PROP_PWR_MPU 0x13
+#define EVA_KMD_PROP_PWR_FW 0x14
+#define EVA_KMD_PROP_PWR_DDR 0x15
+#define EVA_KMD_PROP_PWR_SYSCACHE 0x16
+#define EVA_KMD_PROP_PWR_FDU_OP 0x17
+#define EVA_KMD_PROP_PWR_ICA_OP 0x18
+#define EVA_KMD_PROP_PWR_OD_OP 0x19
+#define EVA_KMD_PROP_PWR_MPU_OP 0x1A
+#define EVA_KMD_PROP_PWR_FW_OP 0x1B
+#define EVA_KMD_PROP_PWR_DDR_OP 0x1C
+#define EVA_KMD_PROP_PWR_SYSCACHE_OP 0x1D
+#define EVA_KMD_PROP_PWR_FPS_FDU 0x1E
+#define EVA_KMD_PROP_PWR_FPS_MPU 0x1F
+#define EVA_KMD_PROP_PWR_FPS_OD 0x20
+#define EVA_KMD_PROP_PWR_FPS_ICA 0x21
+#define EVA_KMD_PROP_PWR_VADL 0x22
+#define EVA_KMD_PROP_PWR_VADL_OP 0x23
+#define EVA_KMD_PROP_PWR_FPS_VADL 0x24
+#define EVA_KMD_PROP_PWR_TOF 0x25
+#define EVA_KMD_PROP_PWR_TOF_OP 0x26
+#define EVA_KMD_PROP_PWR_FPS_TOF 0x27
+#define EVA_KMD_PROP_PWR_RGE 0x28
+#define EVA_KMD_PROP_PWR_RGE_OP 0x29
+#define EVA_KMD_PROP_PWR_FPS_RGE 0x2A
+#define EVA_KMD_PROP_PWR_XRA 0x2B
+#define EVA_KMD_PROP_PWR_XRA_OP 0x2C
+#define EVA_KMD_PROP_PWR_FPS_XRA 0x2D
+#define EVA_KMD_PROP_PWR_LSR 0x2E
+#define EVA_KMD_PROP_PWR_LSR_OP 0x2F
+#define EVA_KMD_PROP_PWR_FPS_LSR 0x30
+#define MAX_KMD_PROP_NUM_PER_PACKET 64
+#define MAX_KMD_PROP_TYPE (EVA_KMD_PROP_PWR_FPS_ICA + 1)
+struct eva_kmd_sys_property {
+ __u32 prop_type;
+ __u32 data;
+struct eva_kmd_sys_properties {
+ __u32 prop_num;
+ struct eva_kmd_sys_property prop_data[MAX_KMD_PROP_NUM_PER_PACKET];
+#define SESSION_CREATE 1
+#define SESSION_DELETE 2
+#define SESSION_START 3
+#define SESSION_STOP 4
+#define SESSION_INFO 5
+struct eva_kmd_session_control {
+ __u32 ctrl_type;
+ __u32 ctrl_data[8];
+#define MAX_HFI_FENCE_SIZE 64
+#define MAX_HFI_FENCE_OFFSET MAX_HFI_PKT_SIZE
+struct eva_kmd_hfi_fence_packet {
+ __u32 pkt_data[MAX_HFI_FENCE_OFFSET];
+ __u32 fence_data[MAX_HFI_FENCE_SIZE];
+ __u64 frame_id;
+struct eva_kmd_fence {
+ __u32 h_synx;
+struct eva_kmd_fence_ctrl {
+ __u32 magic;
+ __u32 reserved;
+ __u32 num_fences;
+ __u32 output_index;
+ struct eva_kmd_fence fences[MAX_HFI_FENCE_SIZE];
+#define MAX_FENCE_DATA_SIZE (MAX_HFI_FENCE_SIZE + 6)
+struct eva_kmd_hfi_synx_packet {
+ union {
+ __u32 fence_data[MAX_FENCE_DATA_SIZE];
+ struct eva_kmd_fence_ctrl fc;
+ };
+ struct eva_kmd_oob_buf* oob_buf;
+ * struct eva_kmd_arg
+ *
+ * @type: command type
+ * @buf_offset: offset to buffer list in the command
+ * @buf_num: number of buffers in the command
+ * @session: session information
+ * @req_power: power information
+ * @regbuf: buffer to be registered
+ * @unregbuf: buffer to be unregistered
+ * @send_cmd: sending generic HFI command
+ * @hfi_pkt: HFI packet created by user library
+ * @sys_properties System properties read or set by user library
+ * @hfi_fence_pkt: HFI fence packet created by user library
+struct eva_kmd_arg {
+ __u32 buf_offset;
+ __u32 buf_num;
+ union eva_data_t {
+ struct eva_kmd_session_info session;
+ struct eva_kmd_buffer regbuf;
+ struct eva_kmd_buffer unregbuf;
+ struct eva_kmd_send_cmd send_cmd;
+ struct eva_kmd_hfi_packet hfi_pkt;
+ struct eva_kmd_sys_properties sys_properties;
+ struct eva_kmd_hfi_fence_packet hfi_fence_pkt;
+ struct eva_kmd_hfi_synx_packet hfi_synx_pkt;
+ struct eva_kmd_session_control session_ctrl;
+ } data;
+struct eva_kmd_request_power {
+ __u32 deprecated;
+#endif
@@ -0,0 +1,69 @@
+LINUXINCLUDE += -I$(EVA_ROOT)/include \
+ -I$(EVA_ROOT)/include/uapi \
+ -I$(EVA_ROOT)/include/uapi/eva
+#srctree is /kernel_platform/common/
+ccflags-y += -I$(EVA_ROOT)/msm/eva/ \
+ -I$(srctree)/drivers/media/platform/msm/synx/
+# add flag to compile mmrm actual implementatio instead of stub version.
+# to follow up with mmrm team if techpack users need to define this for long term?
+KBUILD_CPPFLAGS += -DCONFIG_MSM_MMRM
+# ported from Android.mk
+$(info within KBUILD file KBUILD_EXTRA_SYMBOLS = $(KBUILD_EXTRA_SYMBOLS))
+ifeq ($(CONFIG_ARCH_WAIPIO), y)
+$(info within KBUILD file CONFIG_ARCH_WAIPIO = $(CONFIG_ARCH_WAIPIO))
+# include $(EVA_ROOT)/config/waipio.mk
+KBUILD_CPPFLAGS += -DCONFIG_EVA_WAIPIO=1
+ccflags-y += -DCONFIG_EVA_WAIPIO=1
+ifeq ($(CONFIG_ARCH_KALAMA), y)
+$(info within KBUILD file CONFIG_ARCH_KALAMA = $(CONFIG_ARCH_KALAMA))
+KBUILD_CPPFLAGS += -DCONFIG_EVA_KALAMA=1
+ccflags-y += -DCONFIG_EVA_KALAMA=1
+ifeq ($(CONFIG_ARCH_PINEAPPLE), y)
+$(info within KBUILD file CONFIG_ARCH_PINEAPPLE = $(CONFIG_ARCH_PINEAPPLE))
+KBUILD_CPPFLAGS += -DCONFIG_EVA_PINEAPPLE=1 -DCVP_CONFIG_SYNX_V2=1
+ccflags-y += -DCONFIG_EVA_PINEAPPLE=1 -DCVP_CONFIG_SYNX_V2=1
+ccflags-y += -I$(EVA_ROOT)/../synx-kernel/msm/synx/ \
+ -I$(EVA_ROOT)/../dsp-kernel/include/ \
+ -I$(EVA_ROOT)/../synx-kernel/include/uapi/synx/media/
+ifeq ($(CONFIG_EVA_LE), 1)
+ccflags-y += -DCONFIG_EVA_TVM=1
+msm-eva-objs := eva/cvp.o \
+ eva/msm_cvp_ioctl.o \
+ eva/msm_cvp_platform.o \
+ eva/msm_cvp_common.o \
+ eva/msm_cvp_core.o \
+ eva/msm_cvp.o \
+ eva/cvp_smem.o \
+ eva/msm_cvp_debug.o \
+ eva/msm_cvp_res_parse.o \
+ eva/cvp_dump.o \
+ eva/cvp_hfi.o \
+ eva/hfi_response_handler.o \
+ eva/hfi_packetization.o \
+ eva/cvp_core_hfi.o \
+ eva/msm_cvp_clocks.o\
+ eva/msm_cvp_dsp.o \
+ eva/msm_cvp_buf.o \
+ eva/msm_cvp_synx.o \
+ eva/cvp_fw_load.o \
+ eva/cvp_power.o \
+ eva/vm/cvp_vm_main.o \
+ eva/vm/cvp_vm_msgq.o \
+ eva/vm/cvp_vm_resource.o
+obj-m += msm-eva.o
@@ -0,0 +1,27 @@
+ccflags-y += -I$(srctree)/techpack/eva/msm/eva/ \
+ eva/msm_smem.o \
+ eva/cvp_power.o
+obj-$(CONFIG_MSM_EVA) := msm-eva.o
@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/io.h>
+#include "msm_cvp_core.h"
+#include "msm_cvp_common.h"
+#include "msm_cvp_debug.h"
+#include "msm_cvp_internal.h"
+#include "msm_cvp_res_parse.h"
+#include "msm_cvp_resources.h"
+#include "msm_cvp_buf.h"
+#include "cvp_hfi_api.h"
+#include "cvp_private.h"
+#include "msm_cvp_clocks.h"
+#include "msm_cvp_dsp.h"
+#include "msm_cvp.h"
+#include "vm/cvp_vm.h"
+#define CLASS_NAME "cvp"
+#define DRIVER_NAME "cvp"
+struct msm_cvp_drv *cvp_driver;
+static int cvp_open(struct inode *inode, struct file *filp)
+{
+ struct msm_cvp_inst *inst;
+ dprintk(CVP_SESS, "%s\n", __func__);
+ inst = msm_cvp_open(MSM_CVP_USER, current);
+ if (!inst) {
+ dprintk(CVP_ERR, "Failed to create cvp instance\n");
+ return -ENOMEM;
+ }
+ filp->private_data = inst;
+ return 0;
+static int cvp_close(struct inode *inode, struct file *filp)
+ int rc = 0;
+ struct msm_cvp_inst *inst = filp->private_data;
+ rc = msm_cvp_close(inst);
+ filp->private_data = NULL;
+ return rc;
+static unsigned int cvp_poll(struct file *filp, struct poll_table_struct *p)
+ unsigned long flags = 0;
+ poll_wait(filp, &inst->event_handler.wq, p);
+ spin_lock_irqsave(&inst->event_handler.lock, flags);
+ if (inst->event_handler.event == CVP_SSR_EVENT)
+ rc |= POLLPRI;
+ if (inst->event_handler.event == CVP_DUMP_EVENT)
+ rc |= POLLIN;
+ inst->event_handler.event = CVP_NO_EVENT;
+ spin_unlock_irqrestore(&inst->event_handler.lock, flags);
+static const struct file_operations cvp_fops = {
+ .owner = THIS_MODULE,
+ .open = cvp_open,
+ .release = cvp_close,
+ .unlocked_ioctl = cvp_unblocked_ioctl,
+ .compat_ioctl = cvp_compat_ioctl,
+ .poll = cvp_poll,
+static int read_platform_resources(struct msm_cvp_core *core,
+ struct platform_device *pdev)
+ if (!core || !pdev) {
+ dprintk(CVP_ERR, "%s: Invalid params %pK %pK\n",
+ __func__, core, pdev);
+ return -EINVAL;
+ core->hfi_type = CVP_HFI_IRIS;
+ core->resources.pdev = pdev;
+ if (pdev->dev.of_node) {
+ /* Target supports DT, parse from it */
+ rc = cvp_read_platform_resources_from_drv_data(core);
+ rc = cvp_read_platform_resources_from_dt(&core->resources);
+ } else {
+ dprintk(CVP_ERR, "pdev node is NULL\n");
+ rc = -EINVAL;
+static int msm_cvp_initialize_core(struct platform_device *pdev,
+ struct msm_cvp_core *core)
+ int i = 0;
+ if (!core)
+ rc = read_platform_resources(core, pdev);
+ if (rc) {
+ dprintk(CVP_ERR, "Failed to get platform resources\n");
+ INIT_LIST_HEAD(&core->instances);
+ mutex_init(&core->lock);
+ mutex_init(&core->clk_lock);
+ core->state = CVP_CORE_UNINIT;
+ for (i = SYS_MSG_INDEX(SYS_MSG_START);
+ i <= SYS_MSG_INDEX(SYS_MSG_END); i++) {
+ init_completion(&core->completions[i]);
+ INIT_WORK(&core->ssr_work, msm_cvp_ssr_handler);
+ core->ssr_count = 0;
+static ssize_t link_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+ struct msm_cvp_core *core = dev_get_drvdata(dev);
+ if (core)
+ if (dev == core->dev)
+ return snprintf(buf, PAGE_SIZE, "msm_cvp\n");
+ else
+static DEVICE_ATTR_RO(link_name);
+static ssize_t pwr_collapse_delay_store(struct device *dev,
+ const char *buf, size_t count)
+ unsigned long val = 0;
+ struct msm_cvp_core *core = NULL;
+ rc = kstrtoul(buf, 0, &val);
+ if (rc)
+ else if (!val)
+ core = cvp_driver->cvp_core;
+ core->resources.msm_cvp_pwr_collapse_delay = val;
+ return count;
+static ssize_t pwr_collapse_delay_show(struct device *dev,
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ core->resources.msm_cvp_pwr_collapse_delay);
+static DEVICE_ATTR_RW(pwr_collapse_delay);
+static ssize_t thermal_level_show(struct device *dev,
+ return snprintf(buf, PAGE_SIZE, "%d\n", cvp_driver->thermal_level);
+static ssize_t thermal_level_store(struct device *dev,
+ int rc = 0, val = 0;
+ rc = kstrtoint(buf, 0, &val);
+ if (rc || val < 0) {
+ dprintk(CVP_WARN,
+ "Invalid thermal level value: %s\n", buf);
+ dprintk(CVP_PWR, "Thermal level old %d new %d\n",
+ cvp_driver->thermal_level, val);
+ if (val == cvp_driver->thermal_level)
+ cvp_driver->thermal_level = val;
+ msm_cvp_comm_handle_thermal_event();
+static DEVICE_ATTR_RW(thermal_level);
+static ssize_t sku_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ return scnprintf(buf, PAGE_SIZE, "%d",
+ cvp_driver->sku_version);
+static DEVICE_ATTR_RO(sku_version);
+static ssize_t boot_store(struct device *dev,
+ static int booted;
+ "Invalid boot value: %s\n", buf);
+ if (val == 1 && booted == 0) {
+ inst = msm_cvp_open(MSM_CVP_BOOT, current);
+ dprintk(CVP_ERR,
+ "Failed to create cvp instance\n");
+ "Failed to close cvp instance\n");
+ } else if (val == 2) {
+ "Failed to create eva instance\n");
+ rc = msm_cvp_session_create(inst);
+ dprintk(CVP_ERR, "Failed to create eva session\n");
+ "Failed to close eva instance\n");
+ booted = 1;
+static DEVICE_ATTR_WO(boot);
+static struct attribute *msm_cvp_core_attrs[] = {
+ &dev_attr_pwr_collapse_delay.attr,
+ &dev_attr_thermal_level.attr,
+ &dev_attr_sku_version.attr,
+ &dev_attr_link_name.attr,
+ &dev_attr_boot.attr,
+ NULL
+static struct attribute_group msm_cvp_core_attr_group = {
+ .attrs = msm_cvp_core_attrs,
+static const struct of_device_id msm_cvp_plat_match[] = {
+ {.compatible = "qcom,msm-cvp"},
+ {.compatible = "qcom,msm-cvp,context-bank"},
+ {.compatible = "qcom,msm-cvp,bus"},
+ {.compatible = "qcom,msm-cvp,mem-cdsp"},
+ {}
+static int msm_probe_cvp_device(struct platform_device *pdev)
+ struct msm_cvp_core *core;
+ if (!cvp_driver) {
+ dprintk(CVP_ERR, "Invalid cvp driver\n");
+ core = kzalloc(sizeof(*core), GFP_KERNEL);
+ core->platform_data = cvp_get_drv_data(&pdev->dev);
+ dev_set_drvdata(&pdev->dev, core);
+ rc = msm_cvp_initialize_core(pdev, core);
+ dprintk(CVP_ERR, "Failed to init core\n");
+ goto err_core_init;
+ rc = alloc_chrdev_region(&core->dev_num, 0, 1, DRIVER_NAME);
+ if (rc < 0) {
+ dprintk(CVP_ERR, "alloc_chrdev_region failed: %d\n",
+ rc);
+ goto err_alloc_chrdev;
+ core->class = class_create(THIS_MODULE, CLASS_NAME);
+ if (IS_ERR(core->class)) {
+ rc = PTR_ERR(core->class);
+ dprintk(CVP_ERR, "class_create failed: %d\n",
+ goto err_class_create;
+ core->dev = device_create(core->class, NULL,
+ core->dev_num, NULL, DRIVER_NAME);
+ if (IS_ERR(core->dev)) {
+ rc = PTR_ERR(core->dev);
+ dprintk(CVP_ERR, "device_create failed: %d\n",
+ goto err_device_create;
+ dev_set_drvdata(core->dev, core);
+ cdev_init(&core->cdev, &cvp_fops);
+ rc = cdev_add(&core->cdev,
+ MKDEV(MAJOR(core->dev_num), 0), 1);
+ dprintk(CVP_ERR, "cdev_add failed: %d\n",
+ goto error_cdev_add;
+ rc = sysfs_create_group(&core->dev->kobj, &msm_cvp_core_attr_group);
+ "Failed to create attributes\n");
+ goto err_cores_exceeded;
+ /* VM manager shall be started before HFI init */
+ vm_manager.vm_ops->vm_start(core);
+ core->dev_ops = cvp_hfi_initialize(core->hfi_type,
+ &core->resources, &cvp_handle_cmd_response);
+ if (IS_ERR_OR_NULL(core->dev_ops)) {
+ mutex_lock(&cvp_driver->lock);
+ mutex_unlock(&cvp_driver->lock);
+ rc = PTR_ERR(core->dev_ops) ?: -EBADHANDLE;
+ if (rc != -EPROBE_DEFER)
+ dprintk(CVP_ERR, "Failed to create HFI device\n");
+ dprintk(CVP_CORE, "msm_cvp: request probe defer\n");
+ goto err_hfi_initialize;
+ cvp_synx_ftbl_init(core);
+ cvp_driver->cvp_core = core;
+ cvp_driver->debugfs_root = msm_cvp_debugfs_init_drv();
+ if (!cvp_driver->debugfs_root)
+ dprintk(CVP_ERR, "Failed to create debugfs for msm_cvp\n");
+ core->debugfs_root = msm_cvp_debugfs_init_core(
+ core, cvp_driver->debugfs_root);
+ cvp_driver->sku_version = core->resources.sku_version;
+ dprintk(CVP_CORE, "populating sub devices\n");
+ /*
+ * Trigger probe for each sub-device i.e. qcom,msm-cvp,context-bank.
+ * When msm_cvp_probe is called for each sub-device, parse the
+ * context-bank details and store it in core->resources.context_banks
+ * list.
+ rc = of_platform_populate(pdev->dev.of_node, msm_cvp_plat_match, NULL,
+ &pdev->dev);
+ dprintk(CVP_ERR, "Failed to trigger probe for sub-devices\n");
+ goto err_fail_sub_device_probe;
+ atomic64_set(&core->kernel_trans_id, ARRAY_SIZE(cvp_hfi_defs));
+ if (core->resources.dsp_enabled) {
+ rc = cvp_dsp_device_init();
+ dprintk(CVP_WARN, "Failed to initialize DSP driver\n");
+ dprintk(CVP_DSP, "DSP interface not enabled\n");
+err_fail_sub_device_probe:
+ cvp_hfi_deinitialize(core->hfi_type, core->dev_ops);
+ debugfs_remove_recursive(cvp_driver->debugfs_root);
+err_hfi_initialize:
+err_cores_exceeded:
+ cdev_del(&core->cdev);
+error_cdev_add:
+ device_destroy(core->class, core->dev_num);
+err_device_create:
+ class_destroy(core->class);
+err_class_create:
+ unregister_chrdev_region(core->dev_num, 1);
+err_alloc_chrdev:
+ sysfs_remove_group(&pdev->dev.kobj, &msm_cvp_core_attr_group);
+err_core_init:
+ dev_set_drvdata(&pdev->dev, NULL);
+ kfree(core);
+static int msm_cvp_probe_mem_cdsp(struct platform_device *pdev)
+ return cvp_read_mem_cdsp_resources_from_dt(pdev);
+static int msm_cvp_probe_context_bank(struct platform_device *pdev)
+ return cvp_read_context_bank_resources_from_dt(pdev);
+static int msm_cvp_probe_bus(struct platform_device *pdev)
+ return cvp_read_bus_resources_from_dt(pdev);
+static int msm_cvp_probe(struct platform_device *pdev)
+ * Sub devices probe will be triggered by of_platform_populate() towards
+ * the end of the probe function after msm-cvp device probe is
+ * completed. Return immediately after completing sub-device probe.
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-cvp")) {
+ return msm_probe_cvp_device(pdev);
+ } else if (of_device_is_compatible(pdev->dev.of_node,
+ "qcom,msm-cvp,bus")) {
+ return msm_cvp_probe_bus(pdev);
+ "qcom,msm-cvp,context-bank")) {
+ return msm_cvp_probe_context_bank(pdev);
+ "qcom,msm-cvp,mem-cdsp")) {
+ return msm_cvp_probe_mem_cdsp(pdev);
+ /* How did we end up here? */
+ MSM_CVP_ERROR(1);
+static int msm_cvp_remove(struct platform_device *pdev)
+ if (!pdev) {
+ dprintk(CVP_ERR, "%s invalid input %pK", __func__, pdev);
+ core = dev_get_drvdata(&pdev->dev);
+ if (!core) {
+ dprintk(CVP_ERR, "%s invalid core", __func__);
+ msm_cvp_free_platform_resources(&core->resources);
+ mutex_destroy(&core->lock);
+ mutex_destroy(&core->clk_lock);
+static int msm_cvp_pm_suspend(struct device *dev)
+ * Bail out if
+ * - driver possibly not probed yet
+ * - not the main device. We don't support power management on
+ * subdevices (e.g. context banks)
+ if (!dev || !dev->driver ||
+ !of_device_is_compatible(dev->of_node, "qcom,msm-cvp"))
+ core = dev_get_drvdata(dev);
+ dprintk(CVP_ERR, "%s invalid core\n", __func__);
+ rc = msm_cvp_suspend();
+ if (rc == -ENOTSUPP)
+ rc = 0;
+ else if (rc)
+ dprintk(CVP_WARN, "Failed to suspend: %d\n", rc);
+static int msm_cvp_pm_resume(struct device *dev)
+ dprintk(CVP_INFO, "%s\n", __func__);
+static const struct dev_pm_ops msm_cvp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(msm_cvp_pm_suspend, msm_cvp_pm_resume)
+MODULE_DEVICE_TABLE(of, msm_cvp_plat_match);
+static struct platform_driver msm_cvp_driver = {
+ .probe = msm_cvp_probe,
+ .remove = msm_cvp_remove,
+ .driver = {
+ .name = "msm_cvp",
+ .of_match_table = msm_cvp_plat_match,
+ .pm = &msm_cvp_pm_ops,
+static int __init msm_cvp_init(void)
+ cvp_driver = kzalloc(sizeof(*cvp_driver), GFP_KERNEL);
+ "Failed to allocate memroy for msm_cvp_drv\n");
+ mutex_init(&cvp_driver->lock);
+ rc = platform_driver_register(&msm_cvp_driver);
+ "Failed to register platform driver\n");
+ kfree(cvp_driver);
+ cvp_driver = NULL;
+ cvp_driver->msg_cache.cache = KMEM_CACHE(cvp_session_msg, 0);
+ cvp_driver->frame_cache.cache = KMEM_CACHE(msm_cvp_frame, 0);
+ cvp_driver->buf_cache.cache = KMEM_CACHE(cvp_internal_buf, 0);
+ cvp_driver->smem_cache.cache = KMEM_CACHE(msm_cvp_smem, 0);
+ mutex_init(&wncc_buf_pool.lock);
+static void __exit msm_cvp_exit(void)
+ cvp_dsp_device_exit();
+ kmem_cache_destroy(cvp_driver->msg_cache.cache);
+ kmem_cache_destroy(cvp_driver->frame_cache.cache);
+ kmem_cache_destroy(cvp_driver->buf_cache.cache);
+ kmem_cache_destroy(cvp_driver->smem_cache.cache);
+ platform_driver_unregister(&msm_cvp_driver);
+ mutex_destroy(&cvp_driver->lock);
+ mutex_destroy(&wncc_buf_pool.lock);
+module_init(msm_cvp_init);
+module_exit(msm_cvp_exit);
+MODULE_SOFTDEP("pre: msm-mmrm");
+MODULE_SOFTDEP("pre: synx-driver");
+MODULE_SOFTDEP("pre: frpc-adsprpc");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(DMA_BUF);
@@ -0,0 +1,50 @@
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+#ifndef _MSM_COMM_DEF_H_
+#define _MSM_COMM_DEF_H_
+#include <linux/gunyah/gh_rm_drv.h>
+enum op_mode {
+ OP_NORMAL,
+ OP_DRAINING,
+ OP_FLUSH,
+ OP_INVALID,
+enum queue_state {
+ QUEUE_INIT,
+ QUEUE_ACTIVE = 1,
+ QUEUE_START,
+ QUEUE_STOP,
+ QUEUE_INVALID,
+#ifdef CONFIG_EVA_TVM
+#else /* LA target starts here */
+#ifdef CONFIG_EVA_KALAMA
+#define CVP_SYNX_ENABLED 1
+#define CVP_MMRM_ENABLED 1
+#define CVP_FASTRPC_ENABLED 1
+#endif /* End of CONFIG_EVA_KALAMA */
+#ifdef CONFIG_EVA_PINEAPPLE
+#endif /* End of CONFIG_EVA_PINEAPPLE */
+#ifdef CONFIG_EVA_WAIPIO
+#define CVP_MINIDUMP_ENABLED 1
+#endif /* End CONFIG_EVA_TVM */
@@ -0,0 +1,53 @@
+#include "cvp_core_hfi.h"
+struct cvp_hfi_ops *cvp_hfi_initialize(enum msm_cvp_hfi_type hfi_type,
+ struct msm_cvp_platform_resources *res,
+ hfi_cmd_response_callback callback)
+ struct cvp_hfi_ops *ops_tbl = NULL;
+ ops_tbl = kzalloc(sizeof(struct cvp_hfi_ops), GFP_KERNEL);
+ if (!ops_tbl) {
+ dprintk(CVP_ERR, "%s: failed to allocate ops_tbl\n", __func__);
+ return NULL;
+ rc = cvp_iris_hfi_initialize(ops_tbl, res, callback);
+ dprintk(CVP_ERR, "%s device init failed rc = %d",
+ __func__, rc);
+ goto err_hfi_init;
+ return ops_tbl;
+err_hfi_init:
+ kfree(ops_tbl);
+ return ERR_PTR(rc);
+void cvp_hfi_deinitialize(enum msm_cvp_hfi_type hfi_type,
+ struct cvp_hfi_ops *ops_tbl)
+ dprintk(CVP_ERR, "%s invalid device %pK", __func__, ops_tbl);
+ return;
+ cvp_iris_hfi_delete_device(ops_tbl->hfi_device_data);
@@ -0,0 +1,302 @@
+#ifndef __H_CVP_CORE_HFI_H__
+#define __H_CVP_CORE_HFI_H__
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/mutex.h>
+#include <linux/pm_qos.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/soc/qcom/msm_mmrm.h>
+#include "cvp_hfi_helper.h"
+#include "cvp_hfi.h"
+#include "hfi_packetization.h"
+#define HFI_MASK_QHDR_TX_TYPE 0xFF000000
+#define HFI_MASK_QHDR_RX_TYPE 0x00FF0000
+#define HFI_MASK_QHDR_PRI_TYPE 0x0000FF00
+#define HFI_MASK_QHDR_Q_ID_TYPE 0x000000FF
+#define HFI_Q_ID_HOST_TO_CTRL_CMD_Q 0x00
+#define HFI_Q_ID_CTRL_TO_HOST_MSG_Q 0x01
+#define HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q 0x02
+#define HFI_MASK_QHDR_STATUS 0x000000FF
+#define CVP_IFACEQ_NUMQ 3
+#define CVP_IFACEQ_CMDQ_IDX 0
+#define CVP_IFACEQ_MSGQ_IDX 1
+#define CVP_IFACEQ_DBGQ_IDX 2
+#define CVP_IFACEQ_MAX_BUF_COUNT 50
+#define CVP_IFACE_MAX_PARALLEL_CLNTS 16
+#define CVP_IFACEQ_DFLT_QHDR 0x01010000
+#define CVP_MAX_NAME_LENGTH 64
+#define CVP_MAX_PC_SKIP_COUNT 10
+#define CVP_MAX_SUBCACHES 4
+#define CVP_MAX_SUBCACHE_SIZE 52
+struct cvp_hfi_queue_table_header {
+ u32 qtbl_version;
+ u32 qtbl_size;
+ u32 qtbl_qhdr0_offset;
+ u32 qtbl_qhdr_size;
+ u32 qtbl_num_q;
+ u32 qtbl_num_active_q;
+ void *device_addr;
+ char name[256];
+struct cvp_hfi_queue_header {
+ u32 qhdr_status;
+ u32 qhdr_start_addr;
+ u32 qhdr_type;
+ u32 qhdr_q_size;
+ u32 qhdr_pkt_size;
+ u32 qhdr_pkt_drop_cnt;
+ u32 qhdr_rx_wm;
+ u32 qhdr_tx_wm;
+ u32 qhdr_rx_req;
+ u32 qhdr_tx_req;
+ u32 qhdr_rx_irq_status;
+ u32 qhdr_tx_irq_status;
+ u32 qhdr_read_idx;
+ u32 qhdr_write_idx;
+struct cvp_hfi_mem_map_table {
+ u32 mem_map_num_entries;
+ u32 mem_map_table_base_addr;
+struct cvp_hfi_mem_map {
+ u32 virtual_addr;
+ u32 physical_addr;
+ u32 size;
+ u32 attr;
+#define CVP_IFACEQ_TABLE_SIZE (sizeof(struct cvp_hfi_queue_table_header) \
+ + sizeof(struct cvp_hfi_queue_header) * CVP_IFACEQ_NUMQ)
+#define CVP_IFACEQ_QUEUE_SIZE (CVP_IFACEQ_MAX_PKT_SIZE * \
+ CVP_IFACEQ_MAX_BUF_COUNT * CVP_IFACE_MAX_PARALLEL_CLNTS)
+#define CVP_IFACEQ_GET_QHDR_START_ADDR(ptr, i) \
+ (void *)((ptr + sizeof(struct cvp_hfi_queue_table_header)) + \
+ (i * sizeof(struct cvp_hfi_queue_header)))
+#define QDSS_SIZE 4096
+#define SFR_SIZE 1048576
+#define QUEUE_SIZE (CVP_IFACEQ_TABLE_SIZE + \
+ (CVP_IFACEQ_QUEUE_SIZE * CVP_IFACEQ_NUMQ))
+#define ALIGNED_QDSS_SIZE ALIGN(QDSS_SIZE, SZ_4K)
+#define ALIGNED_SFR_SIZE ALIGN(SFR_SIZE, SZ_4K)
+#define ALIGNED_QUEUE_SIZE ALIGN(QUEUE_SIZE, SZ_4K)
+#define SHARED_QSIZE ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
+ ALIGNED_QDSS_SIZE, SZ_1M)
+struct cvp_mem_addr {
+ u32 align_device_addr;
+ u8 *align_virtual_addr;
+ u32 mem_size;
+ struct msm_cvp_smem mem_data;
+struct cvp_iface_q_info {
+ spinlock_t hfi_lock;
+ void *q_hdr;
+ struct cvp_mem_addr q_array;
+ * These are helper macros to iterate over various lists within
+ * iris_hfi_device->res. The intention is to cut down on a lot of boiler-plate
+ * code
+/* Read as "for each 'thing' in a set of 'thingies'" */
+#define iris_hfi_for_each_thing(__device, __thing, __thingy) \
+ iris_hfi_for_each_thing_continue(__device, __thing, __thingy, 0)
+#define iris_hfi_for_each_thing_reverse(__device, __thing, __thingy) \
+ iris_hfi_for_each_thing_reverse_continue(__device, __thing, __thingy, \
+ (__device)->res->__thingy##_set.count - 1)
+/* TODO: the __from parameter technically not required since we can figure it
+ * out with some pointer magic (i.e. __thing - __thing##_tbl[0]). If this macro
+ * sees extensive use, probably worth cleaning it up but for now omitting it
+ * since it introduces unnecessary complexity.
+#define iris_hfi_for_each_thing_continue(__device, __thing, __thingy, __from) \
+ for (__thing = &(__device)->res->\
+ __thingy##_set.__thingy##_tbl[__from]; \
+ __thing < &(__device)->res->__thingy##_set.__thingy##_tbl[0] + \
+ ((__device)->res->__thingy##_set.count - __from); \
+ ++__thing)
+#define iris_hfi_for_each_thing_reverse_continue(__device, __thing, __thingy, \
+ __from) \
+ __thing >= &(__device)->res->__thingy##_set.__thingy##_tbl[0]; \
+ --__thing)
+/* Regular set helpers */
+#define iris_hfi_for_each_regulator(__device, __rinfo) \
+ iris_hfi_for_each_thing(__device, __rinfo, regulator)
+#define iris_hfi_for_each_regulator_reverse(__device, __rinfo) \
+ iris_hfi_for_each_thing_reverse(__device, __rinfo, regulator)
+#define iris_hfi_for_each_regulator_reverse_continue(__device, __rinfo, \
+ iris_hfi_for_each_thing_reverse_continue(__device, __rinfo, \
+ regulator, __from)
+/* Clock set helpers */
+#define iris_hfi_for_each_clock(__device, __cinfo) \
+ iris_hfi_for_each_thing(__device, __cinfo, clock)
+#define iris_hfi_for_each_clock_reverse(__device, __cinfo) \
+ iris_hfi_for_each_thing_reverse(__device, __cinfo, clock)
+#define iris_hfi_for_each_clock_reverse_continue(__device, __rinfo, \
+ clock, __from)
+/* reset set helpers */
+#define iris_hfi_for_each_reset_clock(__device, __resetinfo) \
+ iris_hfi_for_each_thing(__device, __resetinfo, reset)
+#define iris_hfi_for_each_reset_clock_reverse(__device, __resetinfo) \
+ iris_hfi_for_each_thing_reverse(__device, __resetinfo, reset)
+/* Bus set helpers */
+#define iris_hfi_for_each_bus(__device, __binfo) \
+ iris_hfi_for_each_thing(__device, __binfo, bus)
+#define iris_hfi_for_each_bus_reverse(__device, __binfo) \
+ iris_hfi_for_each_thing_reverse(__device, __binfo, bus)
+/* Subcache set helpers */
+#define iris_hfi_for_each_subcache(__device, __sinfo) \
+ iris_hfi_for_each_thing(__device, __sinfo, subcache)
+#define iris_hfi_for_each_subcache_reverse(__device, __sinfo) \
+ iris_hfi_for_each_thing_reverse(__device, __sinfo, subcache)
+#define call_iris_op(d, op, args...) \
+ (((d) && (d)->hal_ops && (d)->hal_ops->op) ? \
+ ((d)->hal_ops->op(args)):0)
+struct cvp_hal_data {
+ u32 irq;
+ u32 irq_wd;
+ phys_addr_t firmware_base;
+ u8 __iomem *register_base;
+ u8 __iomem *gcc_reg_base;
+ u32 register_size;
+ u32 gcc_reg_size;
+struct iris_resources {
+ struct msm_cvp_fw fw;
+enum iris_hfi_state {
+ IRIS_STATE_DEINIT = 1,
+ IRIS_STATE_INIT,
+enum reset_state {
+ INIT = 1,
+ ASSERT,
+ DEASSERT,
+/* Indices of hfi queues in hfi queue arrays (iface_queues & dsp_iface_queues) */
+enum hfi_queue_idx {
+ CMD_Q, /* Command queue */
+ MSG_Q, /* Message queue */
+ DEBUG_Q, /* Debug queue */
+ MAX_Q
+struct iris_hfi_device;
+struct cvp_hal_ops {
+ void (*interrupt_init)(struct iris_hfi_device *ptr);
+ void (*setup_dsp_uc_memmap)(struct iris_hfi_device *device);
+ void (*clock_config_on_enable)(struct iris_hfi_device *device);
+ void (*power_off)(struct iris_hfi_device *device);
+ void (*noc_error_info)(struct iris_hfi_device *device);
+ int (*reset_control_assert_name)(struct iris_hfi_device *device, const char *name);
+ int (*reset_control_deassert_name)(struct iris_hfi_device *device, const char *name);
+ int (*reset_control_acquire_name)(struct iris_hfi_device *device, const char *name);
+ int (*reset_control_release_name)(struct iris_hfi_device *device, const char *name);
+struct iris_hfi_device {
+ struct list_head sess_head;
+ u32 version;
+ u32 intr_status;
+ u32 clk_freq;
+ u32 last_packet_type;
+ u32 error;
+ unsigned long clk_bitrate;
+ unsigned long scaled_rate;
+ struct msm_cvp_gov_data bus_vote;
+ bool power_enabled;
+ bool reg_dumped;
+ struct mutex lock;
+ msm_cvp_callback callback;
+ struct cvp_mem_addr iface_q_table;
+ struct cvp_mem_addr dsp_iface_q_table;
+ struct cvp_mem_addr qdss;
+ struct cvp_mem_addr sfr;
+ struct cvp_mem_addr mem_addr;
+ struct cvp_iface_q_info iface_queues[CVP_IFACEQ_NUMQ];
+ struct cvp_iface_q_info dsp_iface_queues[CVP_IFACEQ_NUMQ];
+ struct cvp_hal_data *cvp_hal_data;
+ struct workqueue_struct *cvp_workq;
+ struct workqueue_struct *iris_pm_workq;
+ int spur_count;
+ int reg_count;
+ struct iris_resources resources;
+ struct msm_cvp_platform_resources *res;
+ struct mmrm_client_desc mmrm_desc;
+ struct mmrm_client *mmrm_cvp;
+ enum iris_hfi_state state;
+ struct cvp_hfi_packetization_ops *pkt_ops;
+ enum hfi_packetization_type packetization_type;
+ struct msm_cvp_cb_info *response_pkt;
+ u8 *raw_packet;
+ struct pm_qos_request qos;
+ unsigned int skip_pc_count;
+ struct msm_cvp_capability *sys_init_capabilities;
+ struct cvp_hal_ops *hal_ops;
+irqreturn_t cvp_hfi_isr(int irq, void *dev);
+irqreturn_t iris_hfi_core_work_handler(int irq, void *data);
+irqreturn_t iris_hfi_isr_wd(int irq, void *dev);
+void cvp_iris_hfi_delete_device(void *device);
+int cvp_iris_hfi_initialize(struct cvp_hfi_ops *hdev,
+ hfi_cmd_response_callback callback);
+int load_cvp_fw_impl(struct iris_hfi_device *device);
+int unload_cvp_fw_impl(struct iris_hfi_device *device);
+void cvp_dump_csr(struct iris_hfi_device *dev);
@@ -0,0 +1,314 @@
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+#include <asm/memory.h>
+#include <linux/coresight-stm.h>
+#include <linux/delay.h>
+#include <linux/devfreq.h>
+#include <linux/hash.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+#include <linux/qcom_scm.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/reset.h>
+#include "cvp_hfi_io.h"
+#include "cvp_dump.h"
+#ifdef CVP_MINIDUMP_ENABLED
+/*Declare and init the head node of the linked list
+for queue va_md dump*/
+static LIST_HEAD(head_node_hfi_queue);
+ for debug struct va_md dump*/
+static LIST_HEAD(head_node_dbg_struct);
+static int eva_struct_list_notif_handler(struct notifier_block *this,
+ unsigned long event, void *ptr);
+static int eva_hfiq_list_notif_handler(struct notifier_block *this,
+static struct notifier_block eva_struct_list_notif_blk = {
+ .notifier_call = eva_struct_list_notif_handler,
+ .priority = INT_MAX-1,
+static struct notifier_block eva_hfiq_list_notif_blk = {
+ .notifier_call = eva_hfiq_list_notif_handler,
+ .priority = INT_MAX,
+struct list_head *dump_array[CVP_MAX_DUMP] = {
+ [CVP_QUEUE_DUMP] = &head_node_hfi_queue,
+ [CVP_DBG_DUMP] = &head_node_dbg_struct,
+int md_eva_dump(const char* name, u64 virt, u64 phys, u64 size)
+ struct md_region md_entry;
+ if (msm_minidump_enabled()) {
+ dprintk(CVP_INFO, "Minidump is enabled!\n");
+ strlcpy(md_entry.name, name, sizeof(md_entry.name));
+ md_entry.virt_addr = (uintptr_t)virt;
+ md_entry.phys_addr = phys;
+ md_entry.size = size;
+ if (msm_minidump_add_region(&md_entry) < 0) {
+ dprintk(CVP_ERR, "Failed to add \"%s\" data in \
+ Minidump\n", name);
+ return 1;
+ dprintk(CVP_INFO,
+ "add region success for \"%s\" with virt addr:\
+ 0x%x, phy addr: 0x%x, size: %d",
+ md_entry.name, md_entry.virt_addr,
+ md_entry.phys_addr, md_entry.size);
+ dprintk(CVP_ERR, "Minidump is NOT enabled!\n");
+void cvp_va_md_register(char* name, void* notf_blk_ptr)
+ struct notifier_block* notf_blk = (struct notifier_block*)notf_blk_ptr;
+ rc = qcom_va_md_register(name, notf_blk);
+ "\"%s\" : qcom_va_md_register failed rc = %d\n",
+ name, rc);
+ dprintk(CVP_INFO, "\"%s\" : eva_queue qcom_va_md_register \
+ success rc = %d\n", name, rc);
+void cvp_register_va_md_region()
+ if (qcom_va_md_enabled()) {
+ cvp_va_md_register("eva_queues", &eva_hfiq_list_notif_blk);
+ cvp_va_md_register("dbg_struct", &eva_struct_list_notif_blk);
+ dprintk(CVP_ERR, "VA_Minidump is NOT enabled!\n");
+void cvp_free_va_md_list(void)
+ struct eva_va_md_queue *cursor, *temp;
+ list_for_each_entry_safe(cursor, temp, &head_node_hfi_queue, list) {
+ list_del(&cursor->list);
+ kfree(cursor);
+ list_for_each_entry_safe(cursor, temp, &head_node_dbg_struct, list) {
+void add_va_node_to_list(enum cvp_dump_type type, void *buff_va, u32 buff_size,
+ const char *region_name, bool copy)
+ struct list_head *head_node;
+ struct eva_va_md_queue *temp_node = NULL;
+ if (type >= CVP_MAX_DUMP)
+ head_node = dump_array[type];
+ /*Creating Node*/
+ temp_node = kzalloc(sizeof(struct eva_va_md_queue), GFP_KERNEL);
+ if (!temp_node) {
+ dprintk(CVP_ERR, "Memory allocation failed for list node\n");
+ INIT_LIST_HEAD(&temp_node->list);
+ temp_node->va_md_buff = buff_va;
+ temp_node->va_md_buff_size = buff_size;
+ strlcpy(temp_node->region_name, region_name,
+ sizeof(temp_node->region_name));
+ temp_node->copy = copy;
+ list_add_tail(&temp_node->list, head_node);
+ "\"%s\" added to buffer list, vaddr: %px size: 0x%x\n",
+ temp_node->region_name, temp_node->va_md_buff,
+ temp_node->va_md_buff_size);
+void add_hfi_queue_to_va_md_list(void *device)
+ struct cvp_iface_q_info *iface_q;
+ struct iris_hfi_device *dev;
+ dev = (struct iris_hfi_device*)device;
+ iface_q = &dev->iface_queues[CVP_IFACEQ_CMDQ_IDX];
+ add_va_node_to_list(CVP_QUEUE_DUMP,
+ iface_q->q_array.align_virtual_addr,
+ iface_q->q_array.mem_size,
+ "eva_cmdq_cpu", false);
+ iface_q = &dev->iface_queues[CVP_IFACEQ_MSGQ_IDX];
+ "eva_msgq_cpu", false);
+ iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_CMDQ_IDX];
+ "eva_cmdq_dsp", false);
+ iface_q = &dev->dsp_iface_queues[CVP_IFACEQ_MSGQ_IDX];
+ "eva_msgq_dsp", false);
+void add_queue_header_to_va_md_list(void *device)
+ struct cvp_hfi_queue_header *queue;
+ queue = (struct cvp_hfi_queue_header *)iface_q->q_hdr;
+ add_va_node_to_list(CVP_DBG_DUMP,
+ queue, sizeof(struct cvp_hfi_queue_header),
+ "cvp_hfi_queue_header-cpucmdQ", false);
+ "cvp_hfi_queue_header-cpumsgQ", false);
+ "cvp_hfi_queue_header-dspcmdQ", false);
+ "cvp_hfi_queue_header-dspmsgQ", false);
+ unsigned long event, void *ptr)
+ struct va_md_entry entry;
+ void *temp_data;
+ entry.vaddr = (unsigned long)cursor->va_md_buff;
+ if (cursor->copy) {
+ dprintk(CVP_INFO, "Copying \"%s\"(%d Bytes)\
+ to intermediate buffer\n",
+ cursor->region_name, cursor->va_md_buff_size);
+ temp_data = kzalloc(cursor->va_md_buff_size,
+ GFP_KERNEL);
+ if (temp_data) {
+ memcpy(temp_data, cursor->va_md_buff,
+ cursor->va_md_buff_size);
+ entry.vaddr = (unsigned long)temp_data;
+ entry.size = cursor->va_md_buff_size;
+ strlcpy(entry.owner, cursor->region_name, sizeof(entry.owner));
+ entry.cb = NULL;
+ if (msm_cvp_minidump_enable) {
+ rc = qcom_va_md_add_region(&entry);
+ dprintk(CVP_ERR, "Add region \"failed\" for \
+ \"%s\", vaddr: %px size: 0x%x\n", entry.owner,
+ cursor->va_md_buff, entry.size);
+ dprintk(CVP_INFO, "Add region \"success\" for \
+ return NOTIFY_OK;
+ dprintk(CVP_INFO, "Copying \"%s\"(%d Bytes) to \
+ intermediate buffer\n", cursor->region_name,
+ \"%s\", vaddr: %px size: 0x%x\n",
+ entry.owner, cursor->va_md_buff,
+ entry.size);
@@ -0,0 +1,124 @@
+#ifndef __H_CVP_MINIDUMP_H__
+#define __H_CVP_MINIDUMP_H__
+#include <linux/notifier.h>
+#include <linux/kernel.h>
+#include "cvp_comm_def.h"
+enum cvp_dump_type {
+ CVP_QUEUE_DUMP,
+ CVP_DBG_DUMP,
+ CVP_MAX_DUMP,
+#define MAX_REGION_NAME_LEN 32
+#define EVAFW_IMAGE_SIZE 7*1024*1024
+#include <soc/qcom/minidump.h>
+ * wrapper for static minidump
+ * @name: Dump will be collected with this name
+ * @virt: Virtual address of the buffer which needs to be dumped
+ * @phys: Physical address of the buffer which needs to be dumped
+ * @size: Size of the buffer which needs to be dumped
+*/
+int md_eva_dump(const char* name, u64 virt, u64 phys, u64 size);
+ * Fucntion to add dump region to queue
+ * @type: Type of the list node which needs to be updated
+ * @buff_va: Virtual address of the buffer which needs to be dumped
+ * @buff_size: Size of the buffer which needs to be dumped
+ * @region_name: Dump will be collected with this name
+ * @copy: Flag to indicate if the buffer data needs to be copied
+ * to the intermidiate buffer allocated by kzmalloc.
+void add_va_node_to_list(enum cvp_dump_type type, void *buff_va,
+ u32 buff_size, const char *region_name, bool copy);
+ * Registers subsystem to minidump driver
+ * @name: Subsytem name which will get registered
+ * @notf_blk_ptr: notifier block pointer.
+ * notifier_call mentioned in this block will be triggered by
+ * minidump driver in case of crash
+void cvp_va_md_register(char *name, void* notf_blk_ptr);
+/* One function where we will register all the regions */
+void cvp_register_va_md_region(void);
+ * Free up the memory allocated for different va_md_list
+ * Do not forget to add code for any new list in this function
+void cvp_free_va_md_list(void);
+/* Adds the HFI queues(both for CPU and DSP) to the global hfi list head*/
+void add_hfi_queue_to_va_md_list(void *device);
+/*Add queue header structures(both for CPU and DSP)
+to the global struct list head*/
+void add_queue_header_to_va_md_list(void *device);
+ * Node structure for VA_MD Linked List
+ * @list: linux kernel list implementation
+ * @va_md_buff: Virtual address of the buffer which needs to be dumped
+ * @va_md_buff_size: Size of the buffer which needs to be dumped
+struct eva_va_md_queue
+ struct list_head list;
+ void *va_md_buff;
+ u32 va_md_buff_size;
+ char region_name[MAX_REGION_NAME_LEN];
+ bool copy;
+#else
+static inline int md_eva_dump(const char* name, u64 virt, u64 phys, u64 size)
+static inline void add_va_node_to_list(enum cvp_dump_type type, void *buff_va,
+ u32 buff_size, const char *region_name, bool copy)
+static inline void cvp_va_md_register(char *name, void* notf_blk_ptr)
+static inline void cvp_register_va_md_region(void)
+static inline void cvp_free_va_md_list(void)
+static inline void add_hfi_queue_to_va_md_list(void *device)
+static inline void add_queue_header_to_va_md_list(void *device)
+#endif /* End of CVP_MINIDUMP_ENABLED */
@@ -0,0 +1,150 @@
+#include <linux/of_address.h>
+#include <linux/firmware.h>
+#include <linux/soc/qcom/mdt_loader.h>
+#define MAX_FIRMWARE_NAME_SIZE 128
+static int __load_fw_to_memory(struct platform_device *pdev,
+ const char *fw_name)
+ const struct firmware *firmware = NULL;
+ char firmware_name[MAX_FIRMWARE_NAME_SIZE] = {0};
+ struct device_node *node = NULL;
+ struct resource res = {0};
+ phys_addr_t phys = 0;
+ size_t res_size = 0;
+ ssize_t fw_size = 0;
+ void *virt = NULL;
+ int pas_id = 0;
+ if (!fw_name || !(*fw_name) || !pdev) {
+ dprintk(CVP_ERR, "%s: Invalid inputs\n", __func__);
+ if (strlen(fw_name) >= MAX_FIRMWARE_NAME_SIZE - 4) {
+ dprintk(CVP_ERR, "%s: Invalid fw name\n", __func__);
+ scnprintf(firmware_name, ARRAY_SIZE(firmware_name), "%s.mbn", fw_name);
+ rc = of_property_read_u32(pdev->dev.of_node, "pas-id", &pas_id);
+ "%s: error %d while reading DT for \"pas-id\"\n",
+ goto exit;
+ node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
+ if (!node) {
+ "%s: DT error getting \"memory-region\" property\n",
+ __func__);
+ rc = of_address_to_resource(node, 0, &res);
+ "%s: error %d getting \"memory-region\" resource\n",
+ phys = res.start;
+ res_size = (size_t)resource_size(&res);
+ rc = request_firmware(&firmware, firmware_name, &pdev->dev);
+ dprintk(CVP_ERR, "%s: error %d requesting \"%s\"\n",
+ __func__, rc, firmware_name);
+ fw_size = qcom_mdt_get_size(firmware);
+ if (fw_size < 0 || res_size < (size_t)fw_size) {
+ "%s: Corrupted fw image. Alloc size: %lu, fw size: %ld",
+ __func__, res_size, fw_size);
+ virt = memremap(phys, res_size, MEMREMAP_WC);
+ if (!virt) {
+ rc = -ENOMEM;
+ dprintk(CVP_ERR, "%s: unable to remap firmware memory\n",
+ rc = qcom_mdt_load(&pdev->dev, firmware, firmware_name,
+ pas_id, virt, phys, res_size, NULL);
+ dprintk(CVP_ERR, "%s: error %d loading \"%s\"\n",
+ rc = qcom_scm_pas_auth_and_reset(pas_id);
+ dprintk(CVP_ERR, "%s: error %d authenticating \"%s\"\n",
+ rc = md_eva_dump("evafwdata", (uintptr_t)virt, phys, EVAFW_IMAGE_SIZE);
+ dprintk(CVP_ERR, "%s: error %d in dumping \"%s\"\n",
+ memunmap(virt);
+ release_firmware(firmware);
+ dprintk(CVP_CORE, "%s: firmware \"%s\" loaded successfully\n",
+ __func__, firmware_name);
+ return pas_id;
+exit:
+ if (virt)
+ if (firmware)
+int load_cvp_fw_impl(struct iris_hfi_device *device)
+ if (!device->resources.fw.cookie) {
+ device->resources.fw.cookie =
+ __load_fw_to_memory(device->res->pdev,
+ device->res->fw_name);
+ if (device->resources.fw.cookie <= 0) {
+ dprintk(CVP_ERR, "Failed to download firmware\n");
+ device->resources.fw.cookie = 0;
+int unload_cvp_fw_impl(struct iris_hfi_device *device)
+ qcom_scm_pas_shutdown(device->resources.fw.cookie);
@@ -0,0 +1,5805 @@
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+#include <linux/pm_wakeup.h>
+// ysi - added for debug
+#include <linux/clk/qcom.h>
+#define REG_ADDR_OFFSET_BITMASK 0x000FFFFF
+#define QDSS_IOVA_START 0x80001000
+#define MIN_PAYLOAD_SIZE 3
+struct cvp_tzbsp_memprot {
+ u32 cp_start;
+ u32 cp_size;
+ u32 cp_nonpixel_start;
+ u32 cp_nonpixel_size;
+#define TZBSP_CVP_PAS_ID 26
+/* Poll interval in uS */
+#define POLL_INTERVAL_US 50
+enum tzbsp_subsys_state {
+ TZ_SUBSYS_STATE_SUSPEND = 0,
+ TZ_SUBSYS_STATE_RESUME = 1,
+ TZ_SUBSYS_STATE_RESTORE_THRESHOLD = 2,
+const struct msm_cvp_gov_data CVP_DEFAULT_BUS_VOTE = {
+ .data = NULL,
+ .data_count = 0,
+const int cvp_max_packets = 32;
+static void iris_hfi_pm_handler(struct work_struct *work);
+static DECLARE_DELAYED_WORK(iris_hfi_pm_work, iris_hfi_pm_handler);
+static inline int __resume(struct iris_hfi_device *device);
+static inline int __suspend(struct iris_hfi_device *device);
+static int __disable_regulator(struct iris_hfi_device *device,
+ const char *name);
+static int __enable_regulator(struct iris_hfi_device *device,
+static void __flush_debug_queue(struct iris_hfi_device *device, u8 *packet);
+static int __initialize_packetization(struct iris_hfi_device *device);
+static struct cvp_hal_session *__get_session(struct iris_hfi_device *device,
+ u32 session_id);
+static bool __is_session_valid(struct iris_hfi_device *device,
+ struct cvp_hal_session *session, const char *func);
+static int __iface_cmdq_write(struct iris_hfi_device *device,
+ void *pkt);
+static int __load_fw(struct iris_hfi_device *device);
+static int __power_on_init(struct iris_hfi_device *device);
+static void __unload_fw(struct iris_hfi_device *device);
+static int __tzbsp_set_cvp_state(enum tzbsp_subsys_state state);
+static int __enable_subcaches(struct iris_hfi_device *device);
+static int __set_subcaches(struct iris_hfi_device *device);
+static int __release_subcaches(struct iris_hfi_device *device);
+static int __disable_subcaches(struct iris_hfi_device *device);
+static int __power_collapse(struct iris_hfi_device *device, bool force);
+static int iris_hfi_noc_error_info(void *dev);
+static void interrupt_init_iris2(struct iris_hfi_device *device);
+static void setup_dsp_uc_memmap_vpu5(struct iris_hfi_device *device);
+static void clock_config_on_enable_vpu5(struct iris_hfi_device *device);
+static void power_off_iris2(struct iris_hfi_device *device);
+static int __set_ubwc_config(struct iris_hfi_device *device);
+static void __noc_error_info_iris2(struct iris_hfi_device *device);
+static int __enable_hw_power_collapse(struct iris_hfi_device *device);
+static int __disable_hw_power_collapse(struct iris_hfi_device *device);
+static int __power_off_controller(struct iris_hfi_device *device);
+static int __hwfence_regs_map(struct iris_hfi_device *device);
+static int __hwfence_regs_unmap(struct iris_hfi_device *device);
+static int __reset_control_assert_name(struct iris_hfi_device *device, const char *name);
+static int __reset_control_deassert_name(struct iris_hfi_device *device, const char *name);
+static int __reset_control_acquire(struct iris_hfi_device *device, const char *name);
+static int __reset_control_release(struct iris_hfi_device *device, const char *name);
+static bool __is_ctl_power_on(struct iris_hfi_device *device);
+static void __print_sidebandmanager_regs(struct iris_hfi_device *device);
+static void dump_noc_reg(struct iris_hfi_device *device);
+static struct cvp_hal_ops hal_ops = {
+ .interrupt_init = interrupt_init_iris2,
+ .setup_dsp_uc_memmap = setup_dsp_uc_memmap_vpu5,
+ .clock_config_on_enable = clock_config_on_enable_vpu5,
+ .power_off = power_off_iris2,
+ .noc_error_info = __noc_error_info_iris2,
+ .reset_control_assert_name = __reset_control_assert_name,
+ .reset_control_deassert_name = __reset_control_deassert_name,
+ .reset_control_acquire_name = __reset_control_acquire,
+ .reset_control_release_name = __reset_control_release,
+ * Utility function to enforce some of our assumptions. Spam calls to this
+ * in hotspots in code to double check some of the assumptions that we hold.
+static inline void __strict_check(struct iris_hfi_device *device)
+ msm_cvp_res_handle_fatal_hw_error(device->res,
+ !mutex_is_locked(&device->lock));
+static inline void __set_state(struct iris_hfi_device *device,
+ enum iris_hfi_state state)
+ device->state = state;
+static inline bool __core_in_valid_state(struct iris_hfi_device *device)
+ return device->state != IRIS_STATE_DEINIT;
+static inline bool is_sys_cache_present(struct iris_hfi_device *device)
+ return device->res->sys_cache_present;
+static int cvp_synx_recover(void)
+#ifdef CVP_SYNX_ENABLED
+ return synx_recover(SYNX_CLIENT_EVA_CTX0);
+#endif /* End of CVP_SYNX_ENABLED */
+#define ROW_SIZE 32
+unsigned long long get_aon_time(void)
+ unsigned long long val;
+ asm volatile("mrs %0, cntvct_el0" : "=r" (val));
+ return val;
+int get_hfi_version(void)
+ struct iris_hfi_device *hfi;
+ hfi = (struct iris_hfi_device *)core->dev_ops->hfi_device_data;
+ return hfi->version;
+unsigned int get_msg_size(struct cvp_hfi_msg_session_hdr *hdr)
+ struct iris_hfi_device *device;
+ u32 minor_ver;
+ device = core->dev_ops->hfi_device_data;
+ if (!device) {
+ dprintk(CVP_ERR, "%s: NULL device\n", __func__);
+ minor_ver = (device->version & HFI_VERSION_MINOR_MASK) >>
+ HFI_VERSION_MINOR_SHIFT;
+ if (minor_ver < 2)
+ return sizeof(struct cvp_hfi_msg_session_hdr);
+ if (hdr->packet_type == HFI_MSG_SESSION_CVP_FD)
+ return sizeof(struct cvp_hfi_msg_session_hdr_ext);
+unsigned int get_msg_session_id(void *msg)
+ struct cvp_hfi_msg_session_hdr *hdr =
+ (struct cvp_hfi_msg_session_hdr *)msg;
+ return hdr->session_id;
+unsigned int get_msg_errorcode(void *msg)
+ return hdr->error_type;
+int get_msg_opconfigs(void *msg, unsigned int *session_id,
+ unsigned int *error_type, unsigned int *config_id)
+ struct cvp_hfi_msg_session_op_cfg_packet *cfg =
+ (struct cvp_hfi_msg_session_op_cfg_packet *)msg;
+ *session_id = cfg->session_id;
+ *error_type = cfg->error_type;
+ *config_id = cfg->op_conf_id;
+static void __dump_packet(u8 *packet, enum cvp_msg_prio log_level)
+ u32 c = 0, packet_size = *(u32 *)packet;
+ * row must contain enough for 0xdeadbaad * 8 to be converted into
+ * "de ad ba ab " * 8 + '\0'
+ char row[3 * ROW_SIZE];
+ for (c = 0; c * ROW_SIZE < packet_size; ++c) {
+ int bytes_to_read = ((c + 1) * ROW_SIZE > packet_size) ?
+ packet_size % ROW_SIZE : ROW_SIZE;
+ hex_dump_to_buffer(packet + c * ROW_SIZE, bytes_to_read,
+ ROW_SIZE, 4, row, sizeof(row), false);
+ dprintk(log_level, "%s\n", row);
+static int __dsp_suspend(struct iris_hfi_device *device, bool force)
+ int rc;
+ if (msm_cvp_dsp_disable)
+ dprintk(CVP_DSP, "%s: suspend dsp\n", __func__);
+ rc = cvp_dsp_suspend(force);
+ if (rc != -EBUSY)
+ "%s: dsp suspend failed with error %d\n",
+ dprintk(CVP_DSP, "%s: dsp suspended\n", __func__);
+static int __dsp_resume(struct iris_hfi_device *device)
+ dprintk(CVP_DSP, "%s: resume dsp\n", __func__);
+ rc = cvp_dsp_resume();
+ "%s: dsp resume failed with error %d\n",
+ dprintk(CVP_DSP, "%s: dsp resumed\n", __func__);
+static int __dsp_shutdown(struct iris_hfi_device *device)
+ dprintk(CVP_DSP, "%s: shutdown dsp\n", __func__);
+ rc = cvp_dsp_shutdown();
+ "%s: dsp shutdown failed with error %d\n",
+ WARN_ON(1);
+ dprintk(CVP_DSP, "%s: dsp shutdown successful\n", __func__);
+static int __acquire_regulator(struct regulator_info *rinfo,
+ struct iris_hfi_device *device)
+ if (rinfo->has_hw_power_collapse) {
+ /*Acquire XO_RESET to avoid race condition with video*/
+ rc = call_iris_op(device, reset_control_acquire_name, device, "cvp_xo_reset");
+ "XO_RESET could not be acquired: skip acquiring the regulator %s from FW\n",
+ rinfo->name);
+ rc = regulator_set_mode(rinfo->regulator,
+ REGULATOR_MODE_NORMAL);
+ * This is somewhat fatal, but nothing we can do
+ * about it. We can't disable the regulator w/o
+ * getting it back under s/w control
+ "Failed to acquire regulator control: %s\n",
+ dprintk(CVP_PWR,
+ "Acquire regulator control from HW: %s\n",
+ /*Release XO_RESET after regulator is enabled.*/
+ call_iris_op(device, reset_control_release_name, device, "cvp_xo_reset");
+ if (!regulator_is_enabled(rinfo->regulator)) {
+ dprintk(CVP_WARN, "Regulator is not enabled %s\n",
+ msm_cvp_res_handle_fatal_hw_error(device->res, true);
+static int __hand_off_regulator(struct iris_hfi_device *device, struct regulator_info *rinfo)
+ "XO_RESET could not be acquired: skip hand off the regulator %s to FW\n",
+ REGULATOR_MODE_FAST);
+ "Failed to hand off regulator control: %s\n",
+ "Hand off regulator control to HW: %s\n",
+static int __hand_off_regulators(struct iris_hfi_device *device)
+ struct regulator_info *rinfo;
+ int rc = 0, c = 0;
+ iris_hfi_for_each_regulator(device, rinfo) {
+ rc = __hand_off_regulator(device, rinfo);
+ * If one regulator hand off failed, driver should take
+ * the control for other regulators back.
+ goto err_reg_handoff_failed;
+ c++;
+err_reg_handoff_failed:
+ iris_hfi_for_each_regulator_reverse_continue(device, rinfo, c)
+ __acquire_regulator(rinfo, device);
+static int __take_back_regulators(struct iris_hfi_device *device)
+ rc = __acquire_regulator(rinfo, device);
+ * if one regulator hand off failed, driver should take
+static int __write_queue(struct cvp_iface_q_info *qinfo, u8 *packet,
+ bool *rx_req_is_set)
+ struct cvp_hfi_cmd_session_hdr *cmd_pkt;
+ u32 packet_size_in_words, new_write_idx;
+ u32 empty_space, read_idx, write_idx;
+ u32 *write_ptr;
+ if (!qinfo || !packet) {
+ dprintk(CVP_ERR, "Invalid Params\n");
+ } else if (!qinfo->q_array.align_virtual_addr) {
+ dprintk(CVP_WARN, "Queues have already been freed\n");
+ queue = (struct cvp_hfi_queue_header *) qinfo->q_hdr;
+ if (!queue) {
+ dprintk(CVP_ERR, "queue not present\n");
+ return -ENOENT;
+ cmd_pkt = (struct cvp_hfi_cmd_session_hdr *)packet;
+ if (cmd_pkt->size >= sizeof(struct cvp_hfi_cmd_session_hdr))
+ dprintk(CVP_CMD, "%s: pkt_type %08x sess_id %08x trans_id %u ktid %llu\n",
+ __func__, cmd_pkt->packet_type,
+ cmd_pkt->session_id,
+ cmd_pkt->client_data.transaction_id,
+ cmd_pkt->client_data.kdata & (FENCE_BIT - 1));
+ else if (cmd_pkt->size >= 12)
+ dprintk(CVP_CMD, "%s: pkt_type %08x sess_id %08x\n", __func__,
+ cmd_pkt->packet_type, cmd_pkt->session_id);
+ if (msm_cvp_debug & CVP_PKT) {
+ dprintk(CVP_PKT, "%s: %pK\n", __func__, qinfo);
+ __dump_packet(packet, CVP_PKT);
+ packet_size_in_words = (*(u32 *)packet) >> 2;
+ if (!packet_size_in_words || packet_size_in_words >
+ qinfo->q_array.mem_size>>2) {
+ dprintk(CVP_ERR, "Invalid packet size\n");
+ return -ENODATA;
+ spin_lock(&qinfo->hfi_lock);
+ read_idx = queue->qhdr_read_idx;
+ write_idx = queue->qhdr_write_idx;
+ empty_space = (write_idx >= read_idx) ?
+ ((qinfo->q_array.mem_size>>2) - (write_idx - read_idx)) :
+ (read_idx - write_idx);
+ if (empty_space <= packet_size_in_words) {
+ queue->qhdr_tx_req = 1;
+ spin_unlock(&qinfo->hfi_lock);
+ dprintk(CVP_ERR, "Insufficient size (%d) to write (%d)\n",
+ empty_space, packet_size_in_words);
+ return -ENOTEMPTY;
+ queue->qhdr_tx_req = 0;
+ new_write_idx = write_idx + packet_size_in_words;
+ write_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
+ (write_idx << 2));
+ if (write_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
+ write_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
+ qinfo->q_array.mem_size)) {
+ dprintk(CVP_ERR, "Invalid write index\n");
+ if (new_write_idx < (qinfo->q_array.mem_size >> 2)) {
+ memcpy(write_ptr, packet, packet_size_in_words << 2);
+ new_write_idx -= qinfo->q_array.mem_size >> 2;
+ memcpy(write_ptr, packet, (packet_size_in_words -
+ new_write_idx) << 2);
+ memcpy((void *)qinfo->q_array.align_virtual_addr,
+ packet + ((packet_size_in_words - new_write_idx) << 2),
+ new_write_idx << 2);
+ * Memory barrier to make sure packet is written before updating the
+ * write index
+ mb();
+ queue->qhdr_write_idx = new_write_idx;
+ if (rx_req_is_set)
+ *rx_req_is_set = queue->qhdr_rx_req == 1;
+ * Memory barrier to make sure write index is updated before an
+ * interrupt is raised.
+static int __read_queue(struct cvp_iface_q_info *qinfo, u8 *packet,
+ u32 *pb_tx_req_is_set)
+ struct cvp_hfi_msg_session_hdr *msg_pkt;
+ u32 packet_size_in_words, new_read_idx;
+ u32 *read_ptr;
+ u32 receive_request = 0;
+ u32 read_idx, write_idx;
+ if (!qinfo || !packet || !pb_tx_req_is_set) {
+ * Memory barrier to make sure data is valid before
+ *reading it
+ dprintk(CVP_ERR, "Queue memory is not allocated\n");
+ * Do not set receive request for debug queue, if set,
+ * Iris generates interrupt for debug messages even
+ * when there is no response message available.
+ * In general debug queue will not become full as it
+ * is being emptied out for every interrupt from Iris.
+ * Iris will anyway generates interrupt if it is full.
+ if (queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_MSG_Q)
+ receive_request = 1;
+ if (read_idx == write_idx) {
+ queue->qhdr_rx_req = receive_request;
+ * mb() to ensure qhdr is updated in main memory
+ * so that iris reads the updated header values
+ *pb_tx_req_is_set = 0;
+ if (write_idx != queue->qhdr_write_idx) {
+ queue->qhdr_rx_req = 0;
+ dprintk(CVP_HFI,
+ "%s queue is empty, rx_req = %u, tx_req = %u, read_idx = %u\n",
+ receive_request ? "message" : "debug",
+ queue->qhdr_rx_req, queue->qhdr_tx_req,
+ queue->qhdr_read_idx);
+ read_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
+ (read_idx << 2));
+ if (read_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
+ read_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
+ qinfo->q_array.mem_size - sizeof(*read_ptr))) {
+ dprintk(CVP_ERR, "Invalid read index\n");
+ packet_size_in_words = (*read_ptr) >> 2;
+ if (!packet_size_in_words) {
+ dprintk(CVP_ERR, "Zero packet size\n");
+ new_read_idx = read_idx + packet_size_in_words;
+ if (((packet_size_in_words << 2) <= CVP_IFACEQ_VAR_HUGE_PKT_SIZE)
+ && read_idx <= (qinfo->q_array.mem_size >> 2)) {
+ if (new_read_idx < (qinfo->q_array.mem_size >> 2)) {
+ memcpy(packet, read_ptr,
+ packet_size_in_words << 2);
+ new_read_idx -= (qinfo->q_array.mem_size >> 2);
+ (packet_size_in_words - new_read_idx) << 2);
+ memcpy(packet + ((packet_size_in_words -
+ new_read_idx) << 2),
+ (u8 *)qinfo->q_array.align_virtual_addr,
+ new_read_idx << 2);
+ "BAD packet received, read_idx: %#x, pkt_size: %d\n",
+ read_idx, packet_size_in_words << 2);
+ dprintk(CVP_WARN, "Dropping this packet\n");
+ new_read_idx = write_idx;
+ rc = -ENODATA;
+ if (new_read_idx != queue->qhdr_write_idx)
+ queue->qhdr_read_idx = new_read_idx;
+ *pb_tx_req_is_set = (queue->qhdr_tx_req == 1) ? 1 : 0;
+ if (!(queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q)) {
+ msg_pkt = (struct cvp_hfi_msg_session_hdr *)packet;
+ dprintk(CVP_CMD, "%s: "
+ "pkt_type %08x sess_id %08x trans_id %u ktid %llu\n",
+ __func__, msg_pkt->packet_type,
+ msg_pkt->session_id,
+ msg_pkt->client_data.transaction_id,
+ msg_pkt->client_data.kdata & (FENCE_BIT - 1));
+ if ((msm_cvp_debug & CVP_PKT) &&
+ !(queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q)) {
+static int __smem_alloc(struct iris_hfi_device *dev, struct cvp_mem_addr *mem,
+ u32 size, u32 align, u32 flags)
+ struct msm_cvp_smem *alloc = &mem->mem_data;
+ if (!dev || !mem || !size) {
+ dprintk(CVP_INFO, "start to alloc size: %d, flags: %d\n", size, flags);
+ alloc->flags = flags;
+ rc = msm_cvp_smem_alloc(size, align, 1, (void *)dev->res, alloc);
+ dprintk(CVP_ERR, "Alloc failed\n");
+ goto fail_smem_alloc;
+ dprintk(CVP_MEM, "%s: ptr = %pK, size = %d\n", __func__,
+ alloc->kvaddr, size);
+ mem->mem_size = alloc->size;
+ mem->align_virtual_addr = alloc->kvaddr;
+ mem->align_device_addr = alloc->device_addr;
+ alloc->pkt_type = 0;
+ alloc->buf_idx = 0;
+fail_smem_alloc:
+static void __smem_free(struct iris_hfi_device *dev, struct msm_cvp_smem *mem)
+ if (!dev || !mem) {
+ dprintk(CVP_ERR, "invalid param %pK %pK\n", dev, mem);
+ msm_cvp_smem_free(mem);
+static void __write_register(struct iris_hfi_device *device,
+ u32 reg, u32 value)
+ u32 hwiosymaddr = reg;
+ u8 *base_addr;
+ dprintk(CVP_ERR, "Invalid params: %pK\n", device);
+ __strict_check(device);
+ if (!device->power_enabled) {
+ "HFI Write register failed : Power is OFF\n");
+ base_addr = device->cvp_hal_data->register_base;
+ dprintk(CVP_REG, "Base addr: %pK, written to: %#x, Value: %#x...\n",
+ base_addr, hwiosymaddr, value);
+ base_addr += hwiosymaddr;
+ writel_relaxed(value, base_addr);
+ * Memory barrier to make sure value is written into the register.
+ wmb();
+static int __read_gcc_register(struct iris_hfi_device *device, u32 reg)
+ "%s HFI Read register failed : Power is OFF\n",
+ base_addr = device->cvp_hal_data->gcc_reg_base;
+ rc = readl_relaxed(base_addr + reg);
+ * Memory barrier to make sure value is read correctly from the
+ * register.
+ rmb();
+ dprintk(CVP_REG,
+ "GCC Base addr: %pK, read from: %#x, value: %#x...\n",
+ base_addr, reg, rc);
+static int __read_register(struct iris_hfi_device *device, u32 reg)
+ "HFI Read register failed : Power is OFF\n");
+ dprintk(CVP_REG, "Base addr: %pK, read from: %#x, value: %#x...\n",
+static bool __is_ctl_power_on(struct iris_hfi_device *device)
+ u32 reg;
+ reg = __read_register(device, CVP_CC_MVS1C_GDSCR);
+ if (!(reg & 0x80000000))
+ return false;
+ reg = __read_register(device, CVP_CC_MVS1C_CBCR);
+ if (reg & 0x80000000)
+ return true;
+static int __set_registers(struct iris_hfi_device *device)
+ struct msm_cvp_platform_data *pdata;
+ struct reg_set *reg_set;
+ int i;
+ if (!device->res) {
+ "device resources null, cannot set registers\n");
+ return -EINVAL ;
+ pdata = core->platform_data;
+ reg_set = &device->res->reg_set;
+ for (i = 0; i < reg_set->count; i++) {
+ __write_register(device, reg_set->reg_tbl[i].reg,
+ reg_set->reg_tbl[i].value);
+ dprintk(CVP_REG, "write_reg offset=%x, val=%x\n",
+ reg_set->reg_tbl[i].reg,
+ i = call_iris_op(device, reset_control_acquire_name, device, "cvp_xo_reset");
+ if (i) {
+ dprintk(CVP_WARN, "%s Fail acquire xo_reset\n", __func__);
+ __write_register(device, CVP_CPU_CS_AXI4_QOS,
+ pdata->noc_qos->axi_qos);
+ __write_register(device,
+ CVP_NOC_RGE_PRIORITYLUT_LOW +
+ device->res->qos_noc_rge_niu_offset,
+ pdata->noc_qos->prioritylut_low);
+ CVP_NOC_RGE_PRIORITYLUT_HIGH +
+ pdata->noc_qos->prioritylut_high);
+ CVP_NOC_RGE_URGENCY_LOW +
+ pdata->noc_qos->urgency_low);
+ CVP_NOC_RGE_DANGERLUT_LOW +
+ pdata->noc_qos->dangerlut_low);
+ CVP_NOC_RGE_SAFELUT_LOW +
+ pdata->noc_qos->safelut_low);
+ CVP_NOC_GCE_PRIORITYLUT_LOW +
+ device->res->qos_noc_gce_vadl_tof_niu_offset,
+ CVP_NOC_GCE_PRIORITYLUT_HIGH +
+ CVP_NOC_GCE_URGENCY_LOW +
+ CVP_NOC_GCE_DANGERLUT_LOW +
+ CVP_NOC_GCE_SAFELUT_LOW +
+ CVP_NOC_CDM_PRIORITYLUT_LOW +
+ device->res->qos_noc_cdm_niu_offset,
+ CVP_NOC_CDM_PRIORITYLUT_HIGH +
+ CVP_NOC_CDM_URGENCY_LOW +
+ pdata->noc_qos->urgency_low_ro);
+ CVP_NOC_CDM_DANGERLUT_LOW +
+ CVP_NOC_CDM_SAFELUT_LOW +
+ /* Below registers write moved from FW to SW to enable UBWC */
+ CVP_NOC_RGE_NIU_DECCTL_LOW +
+ 0x1);
+ CVP_NOC_RGE_NIU_ENCCTL_LOW +
+ CVP_NOC_GCE_VADL_TOF_NIU_DECCTL_LOW +
+ CVP_NOC_GCE_VADL_TOF_NIU_ENCCTL_LOW +
+ CVP_NOC_CORE_ERR_MAINCTL_LOW_OFFS +
+ device->res->noc_core_err_offset,
+ 0x3);
+ CVP_NOC_MAIN_SIDEBANDMANAGER_FAULTINEN0_LOW +
+ device->res->noc_main_sidebandmanager_offset,
+ * The existence of this function is a hack for 8996 (or certain Iris versions)
+ * to overcome a hardware bug. Whenever the GDSCs momentarily power collapse
+ * (after calling __hand_off_regulators()), the values of the threshold
+ * registers (typically programmed by TZ) are incorrectly reset. As a result
+ * reprogram these registers at certain agreed upon points.
+static void __set_threshold_registers(struct iris_hfi_device *device)
+ u32 version = __read_register(device, CVP_WRAPPER_HW_VERSION);
+ version &= ~GENMASK(15, 0);
+ if (version != (0x3 << 28 | 0x43 << 16))
+ if (__tzbsp_set_cvp_state(TZ_SUBSYS_STATE_RESTORE_THRESHOLD))
+ dprintk(CVP_ERR, "Failed to restore threshold values\n");
+static int __unvote_buses(struct iris_hfi_device *device)
+ struct bus_info *bus = NULL;
+ kfree(device->bus_vote.data);
+ device->bus_vote.data = NULL;
+ device->bus_vote.data_count = 0;
+ iris_hfi_for_each_bus(device, bus) {
+ rc = cvp_set_bw(bus, 0);
+ "%s: Failed unvoting bus\n", __func__);
+ goto err_unknown_device;
+err_unknown_device:
+static int __vote_buses(struct iris_hfi_device *device,
+ struct cvp_bus_vote_data *data, int num_data)
+ struct cvp_bus_vote_data *new_data = NULL;
+ if (!num_data) {
+ dprintk(CVP_PWR, "No vote data available\n");
+ goto no_data_count;
+ } else if (!data) {
+ dprintk(CVP_ERR, "Invalid voting data\n");
+ new_data = kmemdup(data, num_data * sizeof(*new_data), GFP_KERNEL);
+ if (!new_data) {
+ dprintk(CVP_ERR, "Can't alloc memory to cache bus votes\n");
+ goto err_no_mem;
+no_data_count:
+ device->bus_vote.data = new_data;
+ device->bus_vote.data_count = num_data;
+ if (bus) {
+ rc = cvp_set_bw(bus, bus->range[1]);
+ "Failed voting bus %s to ab %u\n",
+ bus->name, bus->range[1]*1000);
+err_no_mem:
+static int iris_hfi_vote_buses(void *dev, struct bus_info *bus, unsigned long bw)
+ struct iris_hfi_device *device = dev;
+ if (!device)
+ mutex_lock(&device->lock);
+ rc = cvp_set_bw(bus, bw);
+ mutex_unlock(&device->lock);
+static int __core_set_resource(struct iris_hfi_device *device,
+ struct cvp_resource_hdr *resource_hdr, void *resource_value)
+ struct cvp_hfi_cmd_sys_set_resource_packet *pkt;
+ u8 packet[CVP_IFACEQ_VAR_SMALL_PKT_SIZE];
+ if (!device || !resource_hdr || !resource_value) {
+ dprintk(CVP_ERR, "set_res: Invalid Params\n");
+ pkt = (struct cvp_hfi_cmd_sys_set_resource_packet *) packet;
+ rc = call_hfi_pkt_op(device, sys_set_resource,
+ pkt, resource_hdr, resource_value);
+ dprintk(CVP_ERR, "set_res: failed to create packet\n");
+ goto err_create_pkt;
+ rc = __iface_cmdq_write(device, pkt);
+ rc = -ENOTEMPTY;
+err_create_pkt:
+static int __core_release_resource(struct iris_hfi_device *device,
+ struct cvp_resource_hdr *resource_hdr)
+ struct cvp_hfi_cmd_sys_release_resource_packet *pkt;
+ if (!device || !resource_hdr) {
+ dprintk(CVP_ERR, "release_res: Invalid Params\n");
+ pkt = (struct cvp_hfi_cmd_sys_release_resource_packet *) packet;
+ rc = call_hfi_pkt_op(device, sys_release_resource,
+ pkt, resource_hdr);
+ dprintk(CVP_ERR, "release_res: failed to create packet\n");
+static int __tzbsp_set_cvp_state(enum tzbsp_subsys_state state)
+ rc = qcom_scm_set_remote_state(state, TZBSP_CVP_PAS_ID);
+ dprintk(CVP_CORE, "Set state %d, resp %d\n", state, rc);
+ dprintk(CVP_ERR, "Failed qcom_scm_set_remote_state %d\n", rc);
+ * Based on fal10_veto, X2RPMh, core_pwr_on and PWAitMode value, infer
+ * value of xtss_sw_reset. xtss_sw_reset is a TZ register bit. Driver
+ * cannot access it directly.
+ * In __boot_firmware() function, the caller of this function. It checks
+ * "core_pwr_on" == false, basically core powered off. So this function
+ * doesn't check core_pwr_on. Assume core_pwr_on = false.
+ * fal10_veto = VPU_CPU_CS_X2RPMh[2] |
+ * ( ~VPU_CPU_CS_X2RPMh[1] & core_pwr_on ) |
+ * ( ~VPU_CPU_CS_X2RPMh[0] & ~( xtss_sw_reset | PWaitMode ) ) ;
+static inline void check_tensilica_in_reset(struct iris_hfi_device *device)
+ u32 X2RPMh, fal10_veto, wait_mode;
+ X2RPMh = __read_register(device, CVP_CPU_CS_X2RPMh);
+ X2RPMh = X2RPMh & 0x7;
+ /* wait_mode = 1: Tensilica is in WFI mode (PWaitMode = true) */
+ wait_mode = __read_register(device, CVP_WRAPPER_CPU_STATUS);
+ wait_mode = wait_mode & 0x1;
+ fal10_veto = __read_register(device, CVP_CPU_CS_X2RPMh_STATUS);
+ fal10_veto = fal10_veto & 0x1;
+ dprintk(CVP_WARN, "tensilica reset check %#x %#x %#x\n",
+ X2RPMh, wait_mode, fal10_veto);
+static const char boot_states[0x40][32] = {
+ "NOT INIT",
+ "RST_START",
+ "INIT_MEMCTL",
+ "INTENABLE_RST",
+ "LITBASE_RST",
+ "PREFETCH_EN",
+ "MPU_INIT",
+ "CTRL_INIT_READ",
+ "MEMCTL_L1_FIX",
+ "RESTORE_EXTRA_NW",
+ "CORE_RESTORE",
+ "COLD_BOOT",
+ "DISABLE_CACHE",
+ "BEFORE_MPU_C",
+ "RET_MPU_C",
+ "IN_MPU_C",
+ "IN_MPU_DEFAULT",
+ "IN_MPU_SYNX",
+ "UCR_SIZE_FAIL",
+ "UCR_ADDR_FAIL",
+ "UCR1_SIZE_FAIL",
+ "UCR1_ADDR_FAIL",
+ "UCR_OVERLAPPED_UCR1",
+ "UCR1_OVERLAPPED_UCR",
+ "UCR_EQ_UCR1",
+ "MPU_CHECK_DONE",
+ "BEFORE_INT_LOCK",
+ "AFTER_INT_LOCK",
+ "BEFORE_INT_UNLOCK",
+ "AFTER_INT_UNLOCK",
+ "CALL_START",
+ "MAIN_ENTRY",
+ "VENUS_INIT_ENTRY",
+ "VSYS_INIT_ENTRY",
+ "BEFORE_XOS_CLK",
+ "AFTER_XOS_CLK",
+ "LOG_MUTEX_INIT",
+ "CREATE_FRAMEWORK_ENTRY",
+ "DTG_INIT",
+ "IDLE_TASK_INIT",
+ "VENUS_CORE_INIT",
+ "HW_CORES_INIT",
+ "RST_THREAD_INIT",
+ "HOST_THREAD_INIT",
+ "ALL_THREADS_INIT",
+ "TASK_MEMPOOL",
+ "SESSION_MUTEX",
+ "SIGNALS_INIT",
+ "RST_SIGNAL_INIT",
+ "INTR_EN_HOST",
+ "INTR_REG_HOST",
+ "INTR_EN_DSP",
+ "INTR_REG_DSP",
+ "X2HSOFTINTEN",
+ "H2XSOFTINTEN",
+ "CPU2DSPINTEN",
+ "DSP2CPUINT_SWRESET",
+ "THREADS_START",
+ "RST_THREAD_START",
+ "HST_THREAD_START",
+ "HST_THREAD_ENTRY"
+static inline int __boot_firmware(struct iris_hfi_device *device)
+ int rc = 0, loop = 10;
+ u32 ctrl_init_val = 0, ctrl_status = 0, count = 0, max_tries = 5000;
+ u32 reg_gdsc;
+ * Hand off control of regulators to h/w _after_ enabling clocks.
+ * Note that the GDSC will turn off when switching from normal
+ * (s/w triggered) to fast (HW triggered) unless the h/w vote is
+ * present. Since Iris isn't up yet, the GDSC will be off briefly.
+ if (__enable_hw_power_collapse(device))
+ dprintk(CVP_ERR, "Failed to enabled inter-frame PC\n");
+ if (!msm_cvp_fw_low_power_mode)
+ goto skip_core_power_check;
+ while (loop) {
+ reg_gdsc = __read_register(device, CVP_CC_MVS1_GDSCR);
+ if (reg_gdsc & 0x80000000) {
+ usleep_range(100, 200);
+ loop--;
+ break;
+ if (!loop)
+ dprintk(CVP_ERR, "fail to power off CORE during resume\n");
+skip_core_power_check:
+ ctrl_init_val = BIT(0);
+ /* RUMI: CVP_CTRL_INIT in MPTest has bit 0 and 3 set */
+ __write_register(device, CVP_CTRL_INIT, ctrl_init_val);
+ while (!(ctrl_status & CVP_CTRL_INIT_STATUS__M) && count < max_tries) {
+ ctrl_status = __read_register(device, CVP_CTRL_STATUS);
+ if ((ctrl_status & CVP_CTRL_ERROR_STATUS__M) == 0x4) {
+ dprintk(CVP_ERR, "invalid setting for UC_REGION\n");
+ /* Reduce to 50, 100 on silicon */
+ usleep_range(50, 100);
+ count++;
+ if (!(ctrl_status & CVP_CTRL_INIT_STATUS__M)) {
+ ctrl_init_val = __read_register(device, CVP_CTRL_INIT);
+ "Failed to boot FW status: %x %x %s\n",
+ ctrl_status, ctrl_init_val,
+ boot_states[(ctrl_status >> 9) & 0x3f]);
+ check_tensilica_in_reset(device);
+ rc = -ENODEV;
+ /* Enable interrupt before sending commands to tensilica */
+ __write_register(device, CVP_CPU_CS_H2XSOFTINTEN, 0x1);
+ __write_register(device, CVP_CPU_CS_X2RPMh, 0x0);
+static int iris_hfi_resume(void *dev)
+ struct iris_hfi_device *device = (struct iris_hfi_device *) dev;
+ dprintk(CVP_ERR, "%s invalid device\n", __func__);
+ dprintk(CVP_CORE, "Resuming Iris\n");
+ rc = __resume(device);
+static int iris_hfi_suspend(void *dev)
+ } else if (!device->res->sw_power_collapsible) {
+ return -ENOTSUPP;
+ dprintk(CVP_CORE, "Suspending Iris\n");
+ rc = __power_collapse(device, true);
+ dprintk(CVP_WARN, "%s: Iris is busy\n", __func__);
+ rc = -EBUSY;
+ /* Cancel pending delayed works if any */
+ if (!rc)
+ cancel_delayed_work(&iris_hfi_pm_work);
+void cvp_dump_csr(struct iris_hfi_device *dev)
+ if (!dev)
+ if (!dev->power_enabled || dev->reg_dumped)
+ reg = __read_register(dev, CVP_WRAPPER_CPU_STATUS);
+ dprintk(CVP_ERR, "CVP_WRAPPER_CPU_STATUS: %x\n", reg);
+ reg = __read_register(dev, CVP_CPU_CS_SCIACMDARG0);
+ dprintk(CVP_ERR, "CVP_CPU_CS_SCIACMDARG0: %x\n", reg);
+ //reg = __read_register(dev, CVP_WRAPPER_INTR_STATUS);
+ //dprintk(CVP_ERR, "CVP_WRAPPER_INTR_STATUS: %x\n", reg);
+ //reg = __read_register(dev, CVP_CPU_CS_H2ASOFTINT);
+ //dprintk(CVP_ERR, "CVP_CPU_CS_H2ASOFTINT: %x\n", reg);
+ reg = __read_register(dev, CVP_CPU_CS_A2HSOFTINT);
+ dprintk(CVP_ERR, "CVP_CPU_CS_A2HSOFTINT: %x\n", reg);
+ reg = __read_register(dev, CVP_CC_MVS1C_GDSCR);
+ dprintk(CVP_ERR, "CVP_CC_MVS1C_GDSCR: %x\n", reg);
+ reg = __read_register(dev, CVP_CC_MVS1C_CBCR);
+ dprintk(CVP_ERR, "CVP_CC_MVS1C_CBCR: %x\n", reg);
+ reg = __read_register(dev, CVP_WRAPPER_CPU_CLOCK_CONFIG);
+ dprintk(CVP_ERR, "CVP_WRAPPER_CPU_CLOCK_CONFIG: %x\n", reg);
+ reg = __read_register(dev, CVP_WRAPPER_CORE_CLOCK_CONFIG);
+ dprintk(CVP_ERR, "CVP_WRAPPER_CORE_CLOCK_CONFIG: %x\n", reg);
+ dump_noc_reg(dev);
+ dev->reg_dumped = true;
+static int iris_hfi_flush_debug_queue(void *dev)
+ dprintk(CVP_WARN, "%s: iris power off\n", __func__);
+ cvp_dump_csr(device);
+ __flush_debug_queue(device, NULL);
+static int iris_hfi_scale_clocks(void *dev, u32 freq)
+ dprintk(CVP_ERR, "Invalid args: %pK\n", device);
+ if (__resume(device)) {
+ dprintk(CVP_ERR, "Resume from power collapse failed\n");
+ rc = msm_cvp_set_clocks_impl(device, freq);
+/* Writes into cmdq without raising an interrupt */
+static int __iface_cmdq_write_relaxed(struct iris_hfi_device *device,
+ void *pkt, bool *requires_interrupt)
+ struct cvp_iface_q_info *q_info;
+ struct cvp_hal_cmd_pkt_hdr *cmd_packet;
+ int result = -E2BIG;
+ if (!device || !pkt) {
+ if (!__core_in_valid_state(device)) {
+ dprintk(CVP_ERR, "%s - fw not in init state\n", __func__);
+ result = -EINVAL;
+ goto err_q_null;
+ cmd_packet = (struct cvp_hal_cmd_pkt_hdr *)pkt;
+ device->last_packet_type = cmd_packet->packet_type;
+ q_info = &device->iface_queues[CVP_IFACEQ_CMDQ_IDX];
+ if (!q_info) {
+ dprintk(CVP_ERR, "cannot write to shared Q's\n");
+ if (!q_info->q_array.align_virtual_addr) {
+ dprintk(CVP_ERR, "cannot write to shared CMD Q's\n");
+ result = -ENODATA;
+ dprintk(CVP_ERR, "%s: Power on failed\n", __func__);
+ goto err_q_write;
+ if (!__write_queue(q_info, (u8 *)pkt, requires_interrupt)) {
+ if (device->res->sw_power_collapsible) {
+ if (!queue_delayed_work(device->iris_pm_workq,
+ &iris_hfi_pm_work,
+ msecs_to_jiffies(
+ device->res->msm_cvp_pwr_collapse_delay))) {
+ "PM work already scheduled\n");
+ result = 0;
+ dprintk(CVP_ERR, "__iface_cmdq_write: queue full\n");
+err_q_write:
+err_q_null:
+ return result;
+static int __iface_cmdq_write(struct iris_hfi_device *device, void *pkt)
+ bool needs_interrupt = false;
+ int rc = __iface_cmdq_write_relaxed(device, pkt, &needs_interrupt);
+ if (!rc && needs_interrupt) {
+ /* Consumer of cmdq prefers that we raise an interrupt */
+ if (!__is_ctl_power_on(device))
+ dprintk(CVP_ERR, "%s power off, don't access reg\n", __func__);
+ dprintk(CVP_WARN, "%s Fail acquire xo_reset at %d\n", __func__, __LINE__);
+ __write_register(device, CVP_CPU_CS_H2ASOFTINT, 1);
+static int __iface_msgq_read(struct iris_hfi_device *device, void *pkt)
+ u32 tx_req_is_set = 0;
+ if (!pkt) {
+ dprintk(CVP_WARN, "%s - fw not in init state\n", __func__);
+ goto read_error_null;
+ q_info = &device->iface_queues[CVP_IFACEQ_MSGQ_IDX];
+ if (q_info->q_array.align_virtual_addr == NULL) {
+ dprintk(CVP_ERR, "cannot read from shared MSG Q's\n");
+ if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
+ if (tx_req_is_set) {
+ i = call_iris_op(device, reset_control_acquire_name, device,
+ "cvp_xo_reset");
+ dprintk(CVP_WARN, "%s Fail acquire xo_reset at %d\n",
+ __func__, __LINE__);
+ } else
+read_error_null:
+static int __iface_dbgq_read(struct iris_hfi_device *device, void *pkt)
+ q_info = &device->iface_queues[CVP_IFACEQ_DBGQ_IDX];
+ dprintk(CVP_ERR, "cannot read from shared DBG Q's\n");
+ goto dbg_error_null;
+dbg_error_null:
+static void __set_queue_hdr_defaults(struct cvp_hfi_queue_header *q_hdr)
+ q_hdr->qhdr_status = 0x1;
+ q_hdr->qhdr_type = CVP_IFACEQ_DFLT_QHDR;
+ q_hdr->qhdr_q_size = CVP_IFACEQ_QUEUE_SIZE / 4;
+ q_hdr->qhdr_pkt_size = 0;
+ q_hdr->qhdr_rx_wm = 0x1;
+ q_hdr->qhdr_tx_wm = 0x1;
+ q_hdr->qhdr_rx_req = 0x1;
+ q_hdr->qhdr_tx_req = 0x0;
+ q_hdr->qhdr_rx_irq_status = 0x0;
+ q_hdr->qhdr_tx_irq_status = 0x0;
+ q_hdr->qhdr_read_idx = 0x0;
+ q_hdr->qhdr_write_idx = 0x0;
+ *Unused, keep for reference
+static void __interface_dsp_queues_release(struct iris_hfi_device *device)
+ struct msm_cvp_smem *mem_data = &device->dsp_iface_q_table.mem_data;
+ struct context_bank_info *cb = mem_data->mapping_info.cb_info;
+ if (!device->dsp_iface_q_table.align_virtual_addr) {
+ dprintk(CVP_ERR, "%s: already released\n", __func__);
+ dma_unmap_single_attrs(cb->dev, mem_data->device_addr,
+ mem_data->size, DMA_BIDIRECTIONAL, 0);
+ dma_free_coherent(device->res->mem_cdsp.dev, mem_data->size,
+ mem_data->kvaddr, mem_data->dma_handle);
+ for (i = 0; i < CVP_IFACEQ_NUMQ; i++) {
+ device->dsp_iface_queues[i].q_hdr = NULL;
+ device->dsp_iface_queues[i].q_array.align_virtual_addr = NULL;
+ device->dsp_iface_queues[i].q_array.align_device_addr = 0;
+ device->dsp_iface_q_table.align_virtual_addr = NULL;
+ device->dsp_iface_q_table.align_device_addr = 0;
+static int __interface_dsp_queues_init(struct iris_hfi_device *dev)
+ u32 i;
+ int offset = 0;
+ phys_addr_t fw_bias = 0;
+ size_t q_size;
+ struct msm_cvp_smem *mem_data;
+ void *kvaddr;
+ dma_addr_t dma_handle;
+ dma_addr_t iova;
+ struct context_bank_info *cb;
+ q_size = ALIGN(QUEUE_SIZE, SZ_1M);
+ mem_data = &dev->dsp_iface_q_table.mem_data;
+ if (mem_data->kvaddr) {
+ memset((void *)mem_data->kvaddr, 0, q_size);
+ cvp_dsp_init_hfi_queue_hdr(dev);
+ /* Allocate dsp queues from CDSP device memory */
+ kvaddr = dma_alloc_coherent(dev->res->mem_cdsp.dev, q_size,
+ &dma_handle, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(kvaddr)) {
+ dprintk(CVP_ERR, "%s: failed dma allocation\n", __func__);
+ goto fail_dma_alloc;
+ cb = msm_cvp_smem_get_context_bank(dev->res, SMEM_CDSP);
+ if (!cb) {
+ "%s: failed to get DSP context bank\n", __func__);
+ goto fail_dma_map;
+ iova = dma_map_single_attrs(cb->dev, phys_to_virt(dma_handle),
+ q_size, DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(cb->dev, iova)) {
+ dprintk(CVP_ERR, "%s: failed dma mapping\n", __func__);
+ dprintk(CVP_DSP,
+ "%s: kvaddr %pK dma_handle %#llx iova %#llx size %zd\n",
+ __func__, kvaddr, dma_handle, iova, q_size);
+ memset(mem_data, 0, sizeof(struct msm_cvp_smem));
+ mem_data->kvaddr = kvaddr;
+ mem_data->device_addr = iova;
+ mem_data->dma_handle = dma_handle;
+ mem_data->size = q_size;
+ mem_data->mapping_info.cb_info = cb;
+ if (!is_iommu_present(dev->res))
+ fw_bias = dev->cvp_hal_data->firmware_base;
+ dev->dsp_iface_q_table.align_virtual_addr = kvaddr;
+ dev->dsp_iface_q_table.align_device_addr = iova - fw_bias;
+ dev->dsp_iface_q_table.mem_size = CVP_IFACEQ_TABLE_SIZE;
+ offset = dev->dsp_iface_q_table.mem_size;
+ iface_q = &dev->dsp_iface_queues[i];
+ iface_q->q_array.align_device_addr = iova + offset - fw_bias;
+ iface_q->q_array.align_virtual_addr = kvaddr + offset;
+ iface_q->q_array.mem_size = CVP_IFACEQ_QUEUE_SIZE;
+ offset += iface_q->q_array.mem_size;
+ spin_lock_init(&iface_q->hfi_lock);
+fail_dma_map:
+ dma_free_coherent(dev->res->mem_cdsp.dev, q_size, kvaddr, dma_handle);
+fail_dma_alloc:
+static void __interface_queues_release(struct iris_hfi_device *device)
+ struct cvp_hfi_mem_map_table *qdss;
+ struct cvp_hfi_mem_map *mem_map;
+ int num_entries = device->res->qdss_addr_set.count;
+ unsigned long mem_map_table_base_addr;
+ if (device->qdss.align_virtual_addr) {
+ qdss = (struct cvp_hfi_mem_map_table *)
+ device->qdss.align_virtual_addr;
+ qdss->mem_map_num_entries = num_entries;
+ mem_map_table_base_addr =
+ device->qdss.align_device_addr +
+ sizeof(struct cvp_hfi_mem_map_table);
+ qdss->mem_map_table_base_addr =
+ (u32)mem_map_table_base_addr;
+ if ((unsigned long)qdss->mem_map_table_base_addr !=
+ mem_map_table_base_addr) {
+ "Invalid mem_map_table_base_addr %#lx",
+ mem_map_table_base_addr);
+ mem_map = (struct cvp_hfi_mem_map *)(qdss + 1);
+ cb = msm_cvp_smem_get_context_bank(device->res, 0);
+ for (i = 0; cb && i < num_entries; i++) {
+ iommu_unmap(cb->domain,
+ mem_map[i].virtual_addr,
+ mem_map[i].size);
+ __smem_free(device, &device->qdss.mem_data);
+ __smem_free(device, &device->iface_q_table.mem_data);
+ __smem_free(device, &device->sfr.mem_data);
+ device->iface_queues[i].q_hdr = NULL;
+ device->iface_queues[i].q_array.align_virtual_addr = NULL;
+ device->iface_queues[i].q_array.align_device_addr = 0;
+ device->iface_q_table.align_virtual_addr = NULL;
+ device->iface_q_table.align_device_addr = 0;
+ device->qdss.align_virtual_addr = NULL;
+ device->qdss.align_device_addr = 0;
+ device->sfr.align_virtual_addr = NULL;
+ device->sfr.align_device_addr = 0;
+ device->mem_addr.align_virtual_addr = NULL;
+ device->mem_addr.align_device_addr = 0;
+static int __get_qdss_iommu_virtual_addr(struct iris_hfi_device *dev,
+ struct cvp_hfi_mem_map *mem_map,
+ struct iommu_domain *domain)
+ dma_addr_t iova = QDSS_IOVA_START;
+ int num_entries = dev->res->qdss_addr_set.count;
+ struct addr_range *qdss_addr_tbl = dev->res->qdss_addr_set.addr_tbl;
+ if (!num_entries)
+ for (i = 0; i < num_entries; i++) {
+ if (domain) {
+ rc = iommu_map(domain, iova,
+ qdss_addr_tbl[i].start,
+ qdss_addr_tbl[i].size,
+ IOMMU_READ | IOMMU_WRITE);
+ "IOMMU QDSS mapping failed for addr %#x\n",
+ qdss_addr_tbl[i].start);
+ iova = qdss_addr_tbl[i].start;
+ mem_map[i].virtual_addr = (u32)iova;
+ mem_map[i].physical_addr = qdss_addr_tbl[i].start;
+ mem_map[i].size = qdss_addr_tbl[i].size;
+ mem_map[i].attr = 0x0;
+ iova += mem_map[i].size;
+ if (i < num_entries) {
+ "QDSS mapping failed, Freeing other entries %d\n", i);
+ for (--i; domain && i >= 0; i--) {
+ iommu_unmap(domain,
+static void __setup_ucregion_memory_map(struct iris_hfi_device *device)
+ __write_register(device, CVP_UC_REGION_ADDR,
+ (u32)device->iface_q_table.align_device_addr);
+ __write_register(device, CVP_UC_REGION_SIZE, SHARED_QSIZE);
+ __write_register(device, CVP_QTBL_ADDR,
+ __write_register(device, CVP_QTBL_INFO, 0x01);
+ if (device->sfr.align_device_addr)
+ __write_register(device, CVP_SFR_ADDR,
+ (u32)device->sfr.align_device_addr);
+ if (device->qdss.align_device_addr)
+ __write_register(device, CVP_MMAP_ADDR,
+ (u32)device->qdss.align_device_addr);
+ call_iris_op(device, setup_dsp_uc_memmap, device);
+static void __hfi_queue_init(struct iris_hfi_device *dev)
+ int i, offset = 0;
+ struct cvp_hfi_queue_table_header *q_tbl_hdr;
+ struct cvp_hfi_queue_header *q_hdr;
+ offset += dev->iface_q_table.mem_size;
+ iface_q = &dev->iface_queues[i];
+ iface_q->q_array.align_device_addr =
+ dev->iface_q_table.align_device_addr + offset;
+ iface_q->q_array.align_virtual_addr =
+ dev->iface_q_table.align_virtual_addr + offset;
+ iface_q->q_hdr = CVP_IFACEQ_GET_QHDR_START_ADDR(
+ dev->iface_q_table.align_virtual_addr, i);
+ __set_queue_hdr_defaults(iface_q->q_hdr);
+ q_tbl_hdr = (struct cvp_hfi_queue_table_header *)
+ dev->iface_q_table.align_virtual_addr;
+ q_tbl_hdr->qtbl_version = 0;
+ q_tbl_hdr->device_addr = (void *)dev;
+ strlcpy(q_tbl_hdr->name, "msm_cvp", sizeof(q_tbl_hdr->name));
+ q_tbl_hdr->qtbl_size = CVP_IFACEQ_TABLE_SIZE;
+ q_tbl_hdr->qtbl_qhdr0_offset =
+ sizeof(struct cvp_hfi_queue_table_header);
+ q_tbl_hdr->qtbl_qhdr_size = sizeof(struct cvp_hfi_queue_header);
+ q_tbl_hdr->qtbl_num_q = CVP_IFACEQ_NUMQ;
+ q_tbl_hdr->qtbl_num_active_q = CVP_IFACEQ_NUMQ;
+ q_hdr = iface_q->q_hdr;
+ q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
+ q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q;
+ q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q;
+ iface_q = &dev->iface_queues[CVP_IFACEQ_DBGQ_IDX];
+ q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q;
+ * Set receive request to zero on debug queue as there is no
+ * need of interrupt from cvp hardware for debug messages
+ q_hdr->qhdr_rx_req = 0;
+static void __sfr_init(struct iris_hfi_device *dev)
+ struct cvp_hfi_sfr_struct *vsfr;
+ vsfr = (struct cvp_hfi_sfr_struct *) dev->sfr.align_virtual_addr;
+ if (vsfr)
+ vsfr->bufSize = ALIGNED_SFR_SIZE;
+static int __interface_queues_init(struct iris_hfi_device *dev)
+ struct cvp_mem_addr *mem_addr;
+ q_size = SHARED_QSIZE - ALIGNED_SFR_SIZE - ALIGNED_QDSS_SIZE;
+ mem_addr = &dev->mem_addr;
+ if (dev->iface_q_table.align_virtual_addr) {
+ memset((void *)dev->iface_q_table.align_virtual_addr,
+ 0, q_size);
+ goto hfi_queue_init;
+ rc = __smem_alloc(dev, mem_addr, q_size, 1, SMEM_UNCACHED);
+ dprintk(CVP_ERR, "iface_q_table_alloc_fail\n");
+ goto fail_alloc_queue;
+ dev->iface_q_table.align_virtual_addr = mem_addr->align_virtual_addr;
+ dev->iface_q_table.align_device_addr = mem_addr->align_device_addr -
+ fw_bias;
+ dev->iface_q_table.mem_size = CVP_IFACEQ_TABLE_SIZE;
+ dev->iface_q_table.mem_data = mem_addr->mem_data;
+hfi_queue_init:
+ __hfi_queue_init(dev);
+ if (dev->sfr.align_virtual_addr) {
+ memset((void *)dev->sfr.align_virtual_addr,
+ 0, ALIGNED_SFR_SIZE);
+ goto sfr_init;
+ rc = __smem_alloc(dev, mem_addr, ALIGNED_SFR_SIZE, 1, SMEM_UNCACHED);
+ dprintk(CVP_WARN, "sfr_alloc_fail: SFR not will work\n");
+ dev->sfr.align_device_addr = 0;
+ dev->sfr.align_device_addr = mem_addr->align_device_addr -
+ dev->sfr.align_virtual_addr = mem_addr->align_virtual_addr;
+ dev->sfr.mem_size = ALIGNED_SFR_SIZE;
+ dev->sfr.mem_data = mem_addr->mem_data;
+sfr_init:
+ __sfr_init(dev);
+ if (dev->qdss.align_virtual_addr)
+ goto dsp_hfi_queue_init;
+ if ((msm_cvp_fw_debug_mode & HFI_DEBUG_MODE_QDSS) && num_entries) {
+ rc = __smem_alloc(dev, mem_addr, ALIGNED_QDSS_SIZE, 1,
+ SMEM_UNCACHED);
+ "qdss_alloc_fail: QDSS messages logging will not work\n");
+ dev->qdss.align_device_addr = 0;
+ dev->qdss.align_device_addr =
+ mem_addr->align_device_addr - fw_bias;
+ dev->qdss.align_virtual_addr =
+ mem_addr->align_virtual_addr;
+ dev->qdss.mem_size = ALIGNED_QDSS_SIZE;
+ dev->qdss.mem_data = mem_addr->mem_data;
+ if (dev->qdss.align_virtual_addr) {
+ qdss =
+ (struct cvp_hfi_mem_map_table *)dev->qdss.align_virtual_addr;
+ mem_map_table_base_addr = dev->qdss.align_device_addr +
+ qdss->mem_map_table_base_addr = mem_map_table_base_addr;
+ cb = msm_cvp_smem_get_context_bank(dev->res, 0);
+ "%s: failed to get context bank\n", __func__);
+ rc = __get_qdss_iommu_virtual_addr(dev, mem_map, cb->domain);
+ "IOMMU mapping failed, Freeing qdss memdata\n");
+ __smem_free(dev, &dev->qdss.mem_data);
+ dev->qdss.align_virtual_addr = NULL;
+dsp_hfi_queue_init:
+ rc = __interface_dsp_queues_init(dev);
+ dprintk(CVP_ERR, "dsp_queues_init failed\n");
+ __setup_ucregion_memory_map(dev);
+fail_alloc_queue:
+static int __sys_set_debug(struct iris_hfi_device *device, u32 debug)
+ struct cvp_hfi_cmd_sys_set_property_packet *pkt =
+ (struct cvp_hfi_cmd_sys_set_property_packet *) &packet;
+ rc = call_hfi_pkt_op(device, sys_debug_config, pkt, debug);
+ "Debug mode setting to FW failed\n");
+ if (__iface_cmdq_write(device, pkt))
+static int __sys_set_idle_indicator(struct iris_hfi_device *device,
+ bool enable)
+ rc = call_hfi_pkt_op(device, sys_set_idle_indicator, pkt, enable);
+static int __sys_set_coverage(struct iris_hfi_device *device, u32 mode)
+ rc = call_hfi_pkt_op(device, sys_coverage_config,
+ pkt, mode);
+ "Coverage mode setting to FW failed\n");
+ if (__iface_cmdq_write(device, pkt)) {
+ dprintk(CVP_WARN, "Failed to send coverage pkt to f/w\n");
+static int __sys_set_power_control(struct iris_hfi_device *device,
+ bool supported = false;
+ supported = true;
+ if (!supported)
+ call_hfi_pkt_op(device, sys_power_control, pkt, enable);
+static void cvp_pm_qos_update(struct iris_hfi_device *device, bool vote_on)
+ u32 latency, off_vote_cnt;
+ int i, err = 0;
+ spin_lock(&device->res->pm_qos.lock);
+ off_vote_cnt = device->res->pm_qos.off_vote_cnt;
+ spin_unlock(&device->res->pm_qos.lock);
+ if (vote_on && off_vote_cnt)
+ latency = vote_on ? device->res->pm_qos.latency_us :
+ PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+ if (device->res->pm_qos.latency_us && device->res->pm_qos.pm_qos_hdls)
+ for (i = 0; i < device->res->pm_qos.silver_count; i++) {
+ if (!cpu_possible(device->res->pm_qos.silver_cores[i]))
+ continue;
+ err = dev_pm_qos_update_request(
+ &device->res->pm_qos.pm_qos_hdls[i],
+ latency);
+ if (err < 0) {
+ if (vote_on) {
+ "pm qos on failed %d\n", err);
+ "pm qos off failed %d\n", err);
+static int iris_pm_qos_update(void *device)
+ dprintk(CVP_ERR, "%s Invalid device\n", __func__);
+ return -ENODEV;
+ dev = device;
+ mutex_lock(&dev->lock);
+ cvp_pm_qos_update(dev, true);
+ mutex_unlock(&dev->lock);
+static int __hwfence_regs_map(struct iris_hfi_device *device)
+ dprintk(CVP_ERR, "%s: fail to get cb\n", __func__);
+ if (device->res->reg_mappings.ipclite_phyaddr != 0) {
+ rc = iommu_map(cb->domain,
+ device->res->reg_mappings.ipclite_iova,
+ device->res->reg_mappings.ipclite_phyaddr,
+ device->res->reg_mappings.ipclite_size,
+ dprintk(CVP_ERR, "map ipclite fail %d %#x %#x %#x\n",
+ rc, device->res->reg_mappings.ipclite_iova,
+ device->res->reg_mappings.ipclite_size);
+ if (device->res->reg_mappings.hwmutex_phyaddr != 0) {
+ device->res->reg_mappings.hwmutex_iova,
+ device->res->reg_mappings.hwmutex_phyaddr,
+ device->res->reg_mappings.hwmutex_size,
+ IOMMU_MMIO | IOMMU_READ | IOMMU_WRITE);
+ dprintk(CVP_ERR, "map hwmutex fail %d %#x %#x %#x\n",
+ rc, device->res->reg_mappings.hwmutex_iova,
+ device->res->reg_mappings.hwmutex_size);
+ if (device->res->reg_mappings.aon_phyaddr != 0) {
+ device->res->reg_mappings.aon_iova,
+ device->res->reg_mappings.aon_phyaddr,
+ device->res->reg_mappings.aon_size,
+ dprintk(CVP_ERR, "map aon fail %d %#x %#x %#x\n",
+ rc, device->res->reg_mappings.aon_iova,
+ device->res->reg_mappings.aon_size);
+ if (device->res->reg_mappings.timer_phyaddr != 0) {
+ device->res->reg_mappings.timer_iova,
+ device->res->reg_mappings.timer_phyaddr,
+ device->res->reg_mappings.timer_size,
+ dprintk(CVP_ERR, "map timer fail %d %#x %#x %#x\n",
+ rc, device->res->reg_mappings.timer_iova,
+ device->res->reg_mappings.timer_size);
+static int __hwfence_regs_unmap(struct iris_hfi_device *device)
+ if (device->res->reg_mappings.ipclite_iova != 0) {
+ if (device->res->reg_mappings.hwmutex_iova != 0) {
+ if (device->res->reg_mappings.aon_iova != 0) {
+ if (device->res->reg_mappings.timer_iova != 0) {
+static int iris_hfi_core_init(void *device)
+ u32 ipcc_iova;
+ struct cvp_hfi_cmd_sys_init_packet pkt;
+ struct cvp_hfi_cmd_sys_get_property_packet version_pkt;
+ dprintk(CVP_ERR, "Invalid device\n");
+ dprintk(CVP_CORE, "Core initializing\n");
+ pm_stay_awake(dev->res->pdev->dev.parent);
+ dev->bus_vote.data =
+ kzalloc(sizeof(struct cvp_bus_vote_data), GFP_KERNEL);
+ if (!dev->bus_vote.data) {
+ dprintk(CVP_ERR, "Bus vote data memory is not allocated\n");
+ dev->bus_vote.data_count = 1;
+ dev->bus_vote.data->power_mode = CVP_POWER_TURBO;
+ __hwfence_regs_map(dev);
+ rc = __power_on_init(dev);
+ dprintk(CVP_ERR, "Failed to power on init EVA\n");
+ goto err_load_fw;
+ rc = cvp_synx_recover();
+ dprintk(CVP_ERR, "Failed to recover synx\n");
+ /* mmrm registration */
+ if (msm_cvp_mmrm_enabled) {
+ rc = msm_cvp_mmrm_register(device);
+ dprintk(CVP_ERR, "Failed to register mmrm client\n");
+ __set_state(dev, IRIS_STATE_INIT);
+ dev->reg_dumped = false;
+ dprintk(CVP_CORE, "Dev_Virt: %pa, Reg_Virt: %pK\n",
+ &dev->cvp_hal_data->firmware_base,
+ dev->cvp_hal_data->register_base);
+ rc = __interface_queues_init(dev);
+ dprintk(CVP_ERR, "failed to init queues\n");
+ cvp_register_va_md_region();
+ // Add node for dev struct
+ add_va_node_to_list(CVP_QUEUE_DUMP, dev,
+ sizeof(struct iris_hfi_device),
+ "iris_hfi_device-dev", false);
+ add_queue_header_to_va_md_list((void*)dev);
+ add_hfi_queue_to_va_md_list((void*)dev);
+ rc = msm_cvp_map_ipcc_regs(&ipcc_iova);
+ if (!rc) {
+ dprintk(CVP_CORE, "IPCC iova 0x%x\n", ipcc_iova);
+ __write_register(dev, CVP_MMAP_ADDR, ipcc_iova);
+ rc = __load_fw(dev);
+ dprintk(CVP_ERR, "Failed to load Iris FW\n");
+ rc = __boot_firmware(dev);
+ dprintk(CVP_ERR, "Failed to start core\n");
+ dev->version = __read_register(dev, CVP_VERSION_INFO);
+ rc = call_hfi_pkt_op(dev, sys_init, &pkt, 0);
+ dprintk(CVP_ERR, "Failed to create sys init pkt\n");
+ if (__iface_cmdq_write(dev, &pkt)) {
+ rc = call_hfi_pkt_op(dev, sys_image_version, &version_pkt);
+ if (rc || __iface_cmdq_write(dev, &version_pkt))
+ dprintk(CVP_WARN, "Failed to send image version pkt to f/w\n");
+ __sys_set_debug(device, msm_cvp_fw_debug);
+ __enable_subcaches(device);
+ __set_subcaches(device);
+ __set_ubwc_config(device);
+ __sys_set_idle_indicator(device, true);
+ if (dev->res->pm_qos.latency_us) {
+ int err = 0;
+ u32 i, cpu;
+ dev->res->pm_qos.pm_qos_hdls = kcalloc(
+ dev->res->pm_qos.silver_count,
+ sizeof(struct dev_pm_qos_request),
+ if (!dev->res->pm_qos.pm_qos_hdls) {
+ dprintk(CVP_WARN, "Failed allocate pm_qos_hdls\n");
+ goto pm_qos_bail;
+ for (i = 0; i < dev->res->pm_qos.silver_count; i++) {
+ cpu = dev->res->pm_qos.silver_cores[i];
+ if (!cpu_possible(cpu))
+ err = dev_pm_qos_add_request(
+ get_cpu_device(cpu),
+ &dev->res->pm_qos.pm_qos_hdls[i],
+ DEV_PM_QOS_RESUME_LATENCY,
+ dev->res->pm_qos.latency_us);
+ if (err < 0)
+ "%s pm_qos_add_req %d failed\n",
+ __func__, i);
+pm_qos_bail:
+ cvp_dsp_send_hfi_queue();
+ pm_relax(dev->res->pdev->dev.parent);
+ dprintk(CVP_CORE, "Core inited successfully\n");
+ __set_state(dev, IRIS_STATE_DEINIT);
+ __unload_fw(dev);
+ if (dev->mmrm_cvp)
+ {
+ msm_cvp_mmrm_deregister(dev);
+err_load_fw:
+ __hwfence_regs_unmap(dev);
+ dprintk(CVP_ERR, "Core init failed\n");
+static int iris_hfi_core_release(void *dev)
+ int rc = 0, i;
+ struct cvp_hal_session *session, *next;
+ struct dev_pm_qos_request *qos_hdl;
+ dprintk(CVP_ERR, "invalid device\n");
+ dprintk(CVP_WARN, "Core releasing\n");
+ if (device->res->pm_qos.latency_us &&
+ device->res->pm_qos.pm_qos_hdls) {
+ qos_hdl = &device->res->pm_qos.pm_qos_hdls[i];
+ if ((qos_hdl != NULL) && dev_pm_qos_request_active(qos_hdl))
+ dev_pm_qos_remove_request(qos_hdl);
+ kfree(device->res->pm_qos.pm_qos_hdls);
+ device->res->pm_qos.pm_qos_hdls = NULL;
+ __resume(device);
+ __set_state(device, IRIS_STATE_DEINIT);
+ rc = __tzbsp_set_cvp_state(TZ_SUBSYS_STATE_SUSPEND);
+ dprintk(CVP_WARN, "Failed to suspend cvp FW%d\n", rc);
+ __dsp_shutdown(device);
+ __disable_subcaches(device);
+ ipcc_iova = __read_register(device, CVP_MMAP_ADDR);
+ msm_cvp_unmap_ipcc_regs(ipcc_iova);
+ __unload_fw(device);
+ __hwfence_regs_unmap(device);
+ rc = msm_cvp_mmrm_deregister(device);
+ "%s: Failed msm_cvp_mmrm_deregister:%d\n",
+ /* unlink all sessions from device */
+ list_for_each_entry_safe(session, next, &device->sess_head, list) {
+ list_del(&session->list);
+ session->device = NULL;
+ dprintk(CVP_CORE, "Core released successfully\n");
+static void __core_clear_interrupt(struct iris_hfi_device *device)
+ u32 intr_status = 0, mask = 0;
+ intr_status = __read_register(device, CVP_WRAPPER_INTR_STATUS);
+ mask = (CVP_WRAPPER_INTR_MASK_A2HCPU_BMSK | CVP_FATAL_INTR_BMSK);
+ if (intr_status & mask) {
+ device->intr_status |= intr_status;
+ device->reg_count++;
+ dprintk(CVP_CORE,
+ "INTERRUPT for device: %pK: times: %d status: %d\n",
+ device, device->reg_count, intr_status);
+ device->spur_count++;
+ __write_register(device, CVP_CPU_CS_A2HSOFTINTCLR, 1);
+static int iris_hfi_core_trigger_ssr(void *device,
+ enum hal_ssr_trigger_type type)
+ struct cvp_hfi_cmd_sys_test_ssr_packet pkt;
+ cvp_free_va_md_list();
+ if (mutex_trylock(&dev->lock)) {
+ rc = call_hfi_pkt_op(dev, ssr_cmd, type, &pkt);
+ dprintk(CVP_ERR, "%s: failed to create packet\n",
+ if (__iface_cmdq_write(dev, &pkt))
+ return -EAGAIN;
+static void __set_default_sys_properties(struct iris_hfi_device *device)
+ if (__sys_set_debug(device, msm_cvp_fw_debug))
+ dprintk(CVP_WARN, "Setting fw_debug msg ON failed\n");
+ if (__sys_set_power_control(device, msm_cvp_fw_low_power_mode))
+ dprintk(CVP_WARN, "Setting h/w power collapse ON failed\n");
+static void __session_clean(struct cvp_hal_session *session)
+ struct cvp_hal_session *temp, *next;
+ if (!session || !session->device) {
+ dprintk(CVP_WARN, "%s: invalid params\n", __func__);
+ device = session->device;
+ dprintk(CVP_SESS, "deleted the session: %pK\n", session);
+ * session might have been removed from the device list in
+ * core_release, so check and remove if it is in the list
+ list_for_each_entry_safe(temp, next, &device->sess_head, list) {
+ if (session == temp) {
+ /* Poison the session handle with zeros */
+ *session = (struct cvp_hal_session){ {0} };
+ kfree(session);
+static int iris_hfi_session_clean(void *session)
+ struct cvp_hal_session *sess_close;
+ if (!session || session == (void *)0xdeadbeef) {
+ dprintk(CVP_ERR, "Invalid Params %s\n", __func__);
+ sess_close = session;
+ device = sess_close->device;
+ dprintk(CVP_ERR, "Invalid device handle %s\n", __func__);
+ __session_clean(sess_close);
+static int iris_debug_hook(void *device)
+ struct iris_hfi_device *dev = device;
+ u32 val;
+ //__write_register(dev, CVP_WRAPPER_CORE_CLOCK_CONFIG, 0x11);
+ //__write_register(dev, CVP_WRAPPER_TZ_CPU_CLOCK_CONFIG, 0x1);
+ dprintk(CVP_ERR, "Halt Tensilica and core and axi\n");
+ /******* FDU & MPU *****/
+#define CVP0_CVP_SS_FDU_SECURE_ENABLE 0x90
+#define CVP0_CVP_SS_MPU_SECURE_ENABLE 0x94
+#define CVP0_CVP_SS_ARP_THREAD_0_SECURE_ENABLE 0xA0
+#define CVP0_CVP_SS_ARP_THREAD_1_SECURE_ENABLE 0xA4
+#define CVP0_CVP_SS_ARP_THREAD_2_SECURE_ENABLE 0xA8
+#define CVP0_CVP_SS_ARP_THREAD_3_SECURE_ENABLE 0xAC
+ val = __read_register(dev, CVP0_CVP_SS_FDU_SECURE_ENABLE);
+ dprintk(CVP_ERR, "FDU_SECURE_ENABLE %#x\n", val);
+ val = __read_register(dev, CVP0_CVP_SS_MPU_SECURE_ENABLE);
+ dprintk(CVP_ERR, "MPU_SECURE_ENABLE %#x\n", val);
+ val = __read_register(dev, CVP0_CVP_SS_ARP_THREAD_0_SECURE_ENABLE);
+ dprintk(CVP_ERR, "ARP_THREAD_0_SECURE_ENABLE %#x\n", val);
+ val = __read_register(dev, CVP0_CVP_SS_ARP_THREAD_1_SECURE_ENABLE);
+ dprintk(CVP_ERR, "ARP_THREAD_1_SECURE_ENABLE %#x\n", val);
+ val = __read_register(dev, CVP0_CVP_SS_ARP_THREAD_2_SECURE_ENABLE);
+ dprintk(CVP_ERR, "ARP_THREAD_2_SECURE_ENABLE %#x\n", val);
+ val = __read_register(dev, CVP0_CVP_SS_ARP_THREAD_3_SECURE_ENABLE);
+ dprintk(CVP_ERR, "ARP_THREAD_3_SECURE_ENABLE %#x\n", val);
+ if (true)
+ /***** GCE *******
+ * Bit 0 of below register is CDM secure enable for GCE
+ * CDM buffer will be in CB4 if set
+#define CVP_GCE_GCE_SS_CP_CTL 0x51100
+ /* STATUS bit0 && CFG bit 4 of below register set,
+ * expect pixel buffers in CB3,
+ * otherwise in CB0
+ * CFG bit 9:8 b01 -> LMC input in CB3
+ * CFG bit 9:8 b10 -> LMC input in CB4
+#define CVP_GCE0_CP_STATUS 0x51080
+#define CVP_GCE0_BIU_RD_INPUT_IF_SECURITY_CFG 0x52020
+ val = __read_register(dev, CVP_GCE_GCE_SS_CP_CTL);
+ dprintk(CVP_ERR, "CVP_GCE_GCE_SS_CP_CTL %#x\n", val);
+ val = __read_register(dev, CVP_GCE0_CP_STATUS);
+ dprintk(CVP_ERR, "CVP_GCE0_CP_STATUS %#x\n", val);
+ val = __read_register(dev, CVP_GCE0_BIU_RD_INPUT_IF_SECURITY_CFG);
+ dprintk(CVP_ERR, "CVP_GCE0_BIU_RD_INPUT_IF_SECURITY_CFG %#x\n", val);
+ /***** RGE *****
+ * Bit 0 of below regiser is CDM secure enable for RGE
+ * CDM buffer to be in CB4 i fset
+#define CVP_RGE0_TOPRGE_CP_CTL 0x31010
+ /* CFG bit 4 && IN bit 0:
+ * if both are set, expect CB3 or CB4 depending on IN 6:4 field
+ * either is clear, expect CB0
+#define CVP_RGE0_BUS_RD_INPUT_IF_SECURITY_CFG 0x32020
+#define CVP_RGE0_TOPSPARE_IN 0x311F4
+ val = __read_register(dev, CVP_RGE0_TOPRGE_CP_CTL);
+ dprintk(CVP_ERR, "CVP_RGE0_TOPRGE_CP_CTL %#x\n", val);
+ val = __read_register(dev, CVP_RGE0_BUS_RD_INPUT_IF_SECURITY_CFG);
+ dprintk(CVP_ERR, "CVP_RGE0_BUS_RD_INPUT_IF_SECURITY_CFG %#x\n", val);
+ val = __read_register(dev, CVP_RGE0_TOPSPARE_IN);
+ dprintk(CVP_ERR, "CVP_RGE0_TOPSPARE_IN %#x\n", val);
+ /****** VADL ******
+ * Bit 0 of below register is CDM secure enable for VADL
+ * CDM buffer will bei in CB4 if set
+#define CVP_VADL0_VADL_SS_CP_CTL 0x21010
+ /* Below registers are used the same way as RGE */
+#define CVP_VADL0_BUS_RD_INPUT_IF_SECURITY_CFG 0x22020
+#define CVP_VADL0_SPARE_IN 0x211F4
+ val = __read_register(dev, CVP_VADL0_VADL_SS_CP_CTL);
+ dprintk(CVP_ERR, "CVP_VADL0_VADL_SS_CP_CTL %#x\n", val);
+ val = __read_register(dev, CVP_VADL0_BUS_RD_INPUT_IF_SECURITY_CFG);
+ dprintk(CVP_ERR, "CVP_VADL0_BUS_RD_INPUT_IF_SECURITY_CFG %#x\n", val);
+ val = __read_register(dev, CVP_VADL0_SPARE_IN);
+ dprintk(CVP_ERR, "CVP_VADL0_SPARE_IN %#x\n", val);
+ /****** ITOF *****
+ * Below registers are used the same way as RGE
+#define CVP_ITOF0_TOF_SS_CP_CTL 0x41010
+#define CVP_ITOF0_BUS_RD_INPUT_IF_SECURITY_CFG 0x42020
+#define CVP_ITOF0_TOF_SS_SPARE_IN 0x411F4
+ val = __read_register(dev, CVP_ITOF0_TOF_SS_CP_CTL);
+ dprintk(CVP_ERR, "CVP_ITOF0_TOF_SS_CP_CTL %#x\n", val);
+ val = __read_register(dev, CVP_ITOF0_BUS_RD_INPUT_IF_SECURITY_CFG);
+ dprintk(CVP_ERR, "CVP_ITOF0_BUS_RD_INPUT_IF_SECURITY_CFG %#x\n", val);
+ val = __read_register(dev, CVP_ITOF0_TOF_SS_SPARE_IN);
+ dprintk(CVP_ERR, "CVP_ITOF0_TOF_SS_SPARE_IN %#x\n", val);
+static int iris_hfi_session_init(void *device, void *session_id,
+ void **new_session)
+ struct cvp_hfi_cmd_sys_session_init_packet pkt;
+ struct cvp_hal_session *s;
+ if (!device || !new_session) {
+ dprintk(CVP_ERR, "%s - invalid input\n", __func__);
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s) {
+ dprintk(CVP_ERR, "new session fail: Out of memory\n");
+ goto err_session_init_fail;
+ s->session_id = session_id;
+ s->device = dev;
+ dprintk(CVP_SESS,
+ "%s: inst %pK, session %pK\n", __func__, session_id, s);
+ list_add_tail(&s->list, &dev->sess_head);
+ __set_default_sys_properties(device);
+ if (call_hfi_pkt_op(dev, session_init, &pkt, s)) {
+ dprintk(CVP_ERR, "session_init: failed to create packet\n");
+ *new_session = s;
+err_session_init_fail:
+ if (s)
+ __session_clean(s);
+ *new_session = NULL;
+static int __send_session_cmd(struct cvp_hal_session *session, int pkt_type)
+ struct cvp_hal_session_cmd_pkt pkt;
+ struct iris_hfi_device *device = session->device;
+ if (!__is_session_valid(device, session, __func__))
+ return -ECONNRESET;
+ rc = call_hfi_pkt_op(device, session_cmd,
+ &pkt, pkt_type, session);
+ if (rc == -EPERM)
+ dprintk(CVP_ERR, "send session cmd: create pkt failed\n");
+ if (__iface_cmdq_write(session->device, &pkt))
+static int iris_hfi_session_end(void *session)
+ struct cvp_hal_session *sess;
+ if (!session) {
+ sess = session;
+ device = sess->device;
+ dprintk(CVP_ERR, "Invalid session %s\n", __func__);
+ if (msm_cvp_fw_coverage) {
+ if (__sys_set_coverage(sess->device, msm_cvp_fw_coverage))
+ dprintk(CVP_WARN, "Fw_coverage msg ON failed\n");
+ rc = __send_session_cmd(session, HFI_CMD_SYS_SESSION_END);
+static int iris_hfi_session_abort(void *sess)
+ struct cvp_hal_session *session = sess;
+ rc = __send_session_cmd(session, HFI_CMD_SYS_SESSION_ABORT);
+static int iris_hfi_session_set_buffers(void *sess, u32 iova, u32 size)
+ struct cvp_hfi_cmd_session_set_buffers_packet pkt;
+ if (!session || !session->device || !iova || !size) {
+ if (!__is_session_valid(device, session, __func__)) {
+ rc = -ECONNRESET;
+ rc = call_hfi_pkt_op(device, session_set_buffers,
+ &pkt, session, iova, size);
+ dprintk(CVP_ERR, "set buffers: failed to create packet\n");
+static int iris_hfi_session_release_buffers(void *sess)
+ struct cvp_session_release_buffers_packet pkt;
+ if (!session || session == (void *)0xdeadbeef || !session->device) {
+ rc = call_hfi_pkt_op(device, session_release_buffers, &pkt, session);
+ dprintk(CVP_ERR, "release buffers: failed to create packet\n");
+static int iris_hfi_session_send(void *sess,
+ struct eva_kmd_hfi_packet *in_pkt)
+ struct eva_kmd_hfi_packet pkt;
+ dprintk(CVP_ERR, "invalid session");
+ goto err_send_pkt;
+ rc = call_hfi_pkt_op(device, session_send,
+ &pkt, session, in_pkt);
+ "failed to create pkt\n");
+err_send_pkt:
+static int iris_hfi_session_flush(void *sess)
+ rc = __send_session_cmd(session, HFI_CMD_SESSION_CVP_FLUSH);
+static int iris_hfi_session_start(void *sess)
+ rc = __send_session_cmd(session, HFI_CMD_SESSION_EVA_START);
+static int iris_hfi_session_stop(void *sess)
+ rc = __send_session_cmd(session, HFI_CMD_SESSION_EVA_STOP);
+static void __process_fatal_error(
+ struct msm_cvp_cb_cmd_done cmd_done = {0};
+ device->callback(HAL_SYS_ERROR, &cmd_done);
+static int __prepare_pc(struct iris_hfi_device *device)
+ struct cvp_hfi_cmd_sys_pc_prep_packet pkt;
+ rc = call_hfi_pkt_op(device, sys_pc_prep, &pkt);
+ dprintk(CVP_ERR, "Failed to create sys pc prep pkt\n");
+ goto err_pc_prep;
+ if (__iface_cmdq_write(device, &pkt))
+ dprintk(CVP_ERR, "Failed to prepare iris for power off");
+err_pc_prep:
+static void iris_hfi_pm_handler(struct work_struct *work)
+ "Entering %s\n", __func__);
+ * It is ok to check this variable outside the lock since
+ * it is being updated in this context only
+ if (device->skip_pc_count >= CVP_MAX_PC_SKIP_COUNT) {
+ dprintk(CVP_WARN, "Failed to PC for %d times\n",
+ device->skip_pc_count);
+ device->skip_pc_count = 0;
+ __process_fatal_error(device);
+ if (gfa_cv.state == DSP_SUSPEND)
+ rc = __power_collapse(device, false);
+ switch (rc) {
+ case 0:
+ dprintk(CVP_PWR, "%s: power collapse successful!\n",
+ case -EBUSY:
+ dprintk(CVP_PWR, "%s: retry PC as cvp is busy\n", __func__);
+ queue_delayed_work(device->iris_pm_workq,
+ &iris_hfi_pm_work, msecs_to_jiffies(
+ device->res->msm_cvp_pwr_collapse_delay));
+ case -EAGAIN:
+ device->skip_pc_count++;
+ dprintk(CVP_WARN, "%s: retry power collapse (count %d)\n",
+ __func__, device->skip_pc_count);
+ default:
+ dprintk(CVP_ERR, "%s: power collapse failed\n", __func__);
+static int __power_collapse(struct iris_hfi_device *device, bool force)
+ u32 wfi_status = 0, idle_status = 0, pc_ready = 0;
+ int count = 0;
+ const int max_tries = 150;
+ dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+ dprintk(CVP_PWR, "%s: Power already disabled\n",
+ rc = __core_in_valid_state(device);
+ "Core is in bad state, Skipping power collapse\n");
+ rc = __dsp_suspend(device, force);
+ if (rc == -EBUSY)
+ goto skip_power_off;
+ __flush_debug_queue(device, device->raw_packet);
+ pc_ready = __read_register(device, CVP_CTRL_STATUS) &
+ CVP_CTRL_STATUS_PC_READY;
+ if (!pc_ready) {
+ wfi_status = __read_register(device,
+ CVP_WRAPPER_CPU_STATUS);
+ idle_status = __read_register(device,
+ CVP_CTRL_STATUS);
+ if (!(wfi_status & BIT(0))) {
+ "Skipping PC as wfi_status (%#x) bit not set\n",
+ wfi_status);
+ if (!(idle_status & BIT(30))) {
+ "Skipping PC as idle_status (%#x) bit not set\n",
+ idle_status);
+ rc = __prepare_pc(device);
+ dprintk(CVP_WARN, "Failed PC %d\n", rc);
+ while (count < max_tries) {
+ pc_ready = __read_register(device,
+ if ((wfi_status & BIT(0)) && (pc_ready &
+ CVP_CTRL_STATUS_PC_READY))
+ usleep_range(150, 250);
+ if (count == max_tries) {
+ "Skip PC. Core is not ready (%#x, %#x)\n",
+ wfi_status, pc_ready);
+ wfi_status = __read_register(device, CVP_WRAPPER_CPU_STATUS);
+ "Skip PC as wfi_status (%#x) bit not set\n",
+ rc = __suspend(device);
+ dprintk(CVP_ERR, "Failed __suspend\n");
+skip_power_off:
+ dprintk(CVP_PWR, "Skip PC(%#x, %#x, %#x)\n",
+ wfi_status, idle_status, pc_ready);
+static void __process_sys_error(struct iris_hfi_device *device)
+ struct cvp_hfi_sfr_struct *vsfr = NULL;
+ vsfr = (struct cvp_hfi_sfr_struct *)device->sfr.align_virtual_addr;
+ if (vsfr) {
+ void *p = memchr(vsfr->rg_data, '\0', vsfr->bufSize);
+ * SFR isn't guaranteed to be NULL terminated
+ * since SYS_ERROR indicates that Iris is in the
+ * process of crashing.
+ if (p == NULL)
+ vsfr->rg_data[vsfr->bufSize - 1] = '\0';
+ dprintk(CVP_ERR, "SFR Message from FW: %s\n",
+ vsfr->rg_data);
+static void __flush_debug_queue(struct iris_hfi_device *device, u8 *packet)
+ bool local_packet = false;
+ enum cvp_msg_prio log_level = CVP_FW;
+ dprintk(CVP_ERR, "%s: Invalid params\n", __func__);
+ if (!packet) {
+ packet = kzalloc(CVP_IFACEQ_VAR_HUGE_PKT_SIZE, GFP_KERNEL);
+ dprintk(CVP_ERR, "In %s() Fail to allocate mem\n",
+ local_packet = true;
+ * Local packek is used when something FATAL occurred.
+ * It is good to print these logs by default.
+ log_level = CVP_ERR;
+#define SKIP_INVALID_PKT(pkt_size, payload_size, pkt_hdr_size) ({ \
+ if (pkt_size < pkt_hdr_size || \
+ payload_size < MIN_PAYLOAD_SIZE || \
+ payload_size > \
+ (pkt_size - pkt_hdr_size + sizeof(u8))) { \
+ dprintk(CVP_ERR, \
+ "%s: invalid msg size - %d\n", \
+ __func__, pkt->msg_size); \
+ continue; \
+ } \
+ })
+ while (!__iface_dbgq_read(device, packet)) {
+ struct cvp_hfi_packet_header *pkt =
+ (struct cvp_hfi_packet_header *) packet;
+ if (pkt->size < sizeof(struct cvp_hfi_packet_header)) {
+ dprintk(CVP_ERR, "Invalid pkt size - %s\n",
+ if (pkt->packet_type == HFI_MSG_SYS_DEBUG) {
+ struct cvp_hfi_msg_sys_debug_packet *pkt =
+ (struct cvp_hfi_msg_sys_debug_packet *) packet;
+ SKIP_INVALID_PKT(pkt->size,
+ pkt->msg_size, sizeof(*pkt));
+ * All fw messages starts with new line character. This
+ * causes dprintk to print this message in two lines
+ * in the kernel log. Ignoring the first character
+ * from the message fixes this to print it in a single
+ * line.
+ pkt->rg_msg_data[pkt->msg_size-1] = '\0';
+ dprintk(log_level, "%s", &pkt->rg_msg_data[1]);
+#undef SKIP_INVALID_PKT
+ if (local_packet)
+ kfree(packet);
+ struct cvp_hal_session *session, const char *func)
+ struct cvp_hal_session *temp = NULL;
+ if (!device || !session)
+ goto invalid;
+ list_for_each_entry(temp, &device->sess_head, list)
+ if (session == temp)
+invalid:
+ dprintk(CVP_WARN, "%s: device %pK, invalid session %pK\n",
+ func, device, session);
+ u32 session_id)
+ list_for_each_entry(temp, &device->sess_head, list) {
+ if (session_id == hash32_ptr(temp))
+ return temp;
+#define _INVALID_MSG_ "Unrecognized MSG (%#x) session (%pK), discarding\n"
+#define _INVALID_STATE_ "Ignore responses from %d to %d invalid state\n"
+#define _DEVFREQ_FAIL_ "Failed to add devfreq device bus %s governor %s: %d\n"
+static void process_system_msg(struct msm_cvp_cb_info *info,
+ struct iris_hfi_device *device,
+ void *raw_packet)
+ struct cvp_hal_sys_init_done sys_init_done = {0};
+ switch (info->response_type) {
+ case HAL_SYS_ERROR:
+ __process_sys_error(device);
+ case HAL_SYS_RELEASE_RESOURCE_DONE:
+ dprintk(CVP_CORE, "Received SYS_RELEASE_RESOURCE\n");
+ case HAL_SYS_INIT_DONE:
+ dprintk(CVP_CORE, "Received SYS_INIT_DONE\n");
+ sys_init_done.capabilities =
+ device->sys_init_capabilities;
+ cvp_hfi_process_sys_init_done_prop_read(
+ (struct cvp_hfi_msg_sys_init_done_packet *)
+ raw_packet, &sys_init_done);
+ info->response.cmd.data.sys_init_done = sys_init_done;
+static void **get_session_id(struct msm_cvp_cb_info *info)
+ void **session_id = NULL;
+ /* For session-related packets, validate session */
+ case HAL_SESSION_INIT_DONE:
+ case HAL_SESSION_END_DONE:
+ case HAL_SESSION_ABORT_DONE:
+ case HAL_SESSION_START_DONE:
+ case HAL_SESSION_STOP_DONE:
+ case HAL_SESSION_FLUSH_DONE:
+ case HAL_SESSION_SET_BUFFER_DONE:
+ case HAL_SESSION_SUSPEND_DONE:
+ case HAL_SESSION_RESUME_DONE:
+ case HAL_SESSION_SET_PROP_DONE:
+ case HAL_SESSION_GET_PROP_DONE:
+ case HAL_SESSION_RELEASE_BUFFER_DONE:
+ case HAL_SESSION_REGISTER_BUFFER_DONE:
+ case HAL_SESSION_UNREGISTER_BUFFER_DONE:
+ case HAL_SESSION_PROPERTY_INFO:
+ case HAL_SESSION_EVENT_CHANGE:
+ case HAL_SESSION_DUMP_NOTIFY:
+ case HAL_SESSION_ERROR:
+ session_id = &info->response.cmd.session_id;
+ case HAL_RESPONSE_UNUSED:
+ session_id = NULL;
+ return session_id;
+static void print_msg_hdr(void *hdr)
+ struct cvp_hfi_msg_session_hdr *new_hdr =
+ (struct cvp_hfi_msg_session_hdr *)hdr;
+ dprintk(CVP_HFI, "HFI MSG received: %x %x %x %x %x %x %x %#llx\n",
+ new_hdr->size, new_hdr->packet_type,
+ new_hdr->session_id,
+ new_hdr->client_data.transaction_id,
+ new_hdr->client_data.data1,
+ new_hdr->client_data.data2,
+ new_hdr->error_type,
+ new_hdr->client_data.kdata);
+static int __response_handler(struct iris_hfi_device *device)
+ struct msm_cvp_cb_info *packets;
+ int packet_count = 0;
+ u8 *raw_packet = NULL;
+ bool requeue_pm_work = true;
+ if (!device || device->state != IRIS_STATE_INIT)
+ packets = device->response_pkt;
+ raw_packet = device->raw_packet;
+ if (!raw_packet || !packets) {
+ "%s: Invalid args : Res pkt = %pK, Raw pkt = %pK\n",
+ __func__, packets, raw_packet);
+ if (device->intr_status & CVP_FATAL_INTR_BMSK) {
+ if (device->intr_status & CVP_WRAPPER_INTR_MASK_CPU_NOC_BMSK)
+ dprintk(CVP_ERR, "Received Xtensa NOC error\n");
+ if (device->intr_status & CVP_WRAPPER_INTR_MASK_CORE_NOC_BMSK)
+ dprintk(CVP_ERR, "Received CVP core NOC error\n");
+ /* Bleed the msg queue dry of packets */
+ while (!__iface_msgq_read(device, raw_packet)) {
+ struct msm_cvp_cb_info *info = &packets[packet_count++];
+ (struct cvp_hfi_msg_session_hdr *)raw_packet;
+ print_msg_hdr(hdr);
+ rc = cvp_hfi_process_msg_packet(0, raw_packet, info);
+ "Corrupt/unknown packet found, discarding\n");
+ --packet_count;
+ } else if (info->response_type == HAL_NO_RESP) {
+ /* Process the packet types that we're interested in */
+ process_system_msg(info, device, raw_packet);
+ session_id = get_session_id(info);
+ * hfi_process_msg_packet provides a session_id that's a hashed
+ * value of struct cvp_hal_session, we need to coerce the hashed
+ * value back to pointer that we can use. Ideally, hfi_process\
+ * _msg_packet should take care of this, but it doesn't have
+ * required information for it
+ if (session_id) {
+ struct cvp_hal_session *session = NULL;
+ if (upper_32_bits((uintptr_t)*session_id) != 0) {
+ "Upper 32-bits != 0 for sess_id=%pK\n",
+ *session_id);
+ session = __get_session(device,
+ (u32)(uintptr_t)*session_id);
+ dprintk(CVP_ERR, _INVALID_MSG_,
+ info->response_type,
+ *session_id = session->session_id;
+ if (packet_count >= cvp_max_packets) {
+ "Too many packets in message queue!\n");
+ /* do not read packets after sys error packet */
+ if (info->response_type == HAL_SYS_ERROR)
+ if (requeue_pm_work && device->res->sw_power_collapsible) {
+ dprintk(CVP_ERR, "PM work already scheduled\n");
+ __flush_debug_queue(device, raw_packet);
+ return packet_count;
+irqreturn_t iris_hfi_core_work_handler(int irq, void *data)
+ int num_responses = 0, i = 0;
+ static bool warning_on = true;
+ return IRQ_HANDLED;
+ if (warning_on) {
+ dprintk(CVP_WARN, "%s Core not in init state\n",
+ warning_on = false;
+ goto err_no_work;
+ warning_on = true;
+ if (!device->callback) {
+ dprintk(CVP_ERR, "No interrupt callback function: %pK\n",
+ device);
+ dprintk(CVP_ERR, "%s: Power enable failed\n", __func__);
+ __core_clear_interrupt(device);
+ num_responses = __response_handler(device);
+ dprintk(CVP_HFI, "%s:: cvp_driver_debug num_responses = %d ",
+ __func__, num_responses);
+err_no_work:
+ /* Keep the interrupt status before releasing device lock */
+ intr_status = device->intr_status;
+ * Issue the callbacks outside of the locked contex to preserve
+ * re-entrancy.
+ for (i = 0; !IS_ERR_OR_NULL(device->response_pkt) &&
+ i < num_responses; ++i) {
+ struct msm_cvp_cb_info *r = &device->response_pkt[i];
+ void *rsp = (void *)&r->response;
+ _INVALID_STATE_, (i + 1), num_responses);
+ dprintk(CVP_HFI, "Processing response %d of %d, type %d\n",
+ (i + 1), num_responses, r->response_type);
+ /* callback = void cvp_handle_cmd_response() */
+ device->callback(r->response_type, rsp);
+ /* We need re-enable the irq which was disabled in ISR handler */
+ if (!(intr_status & CVP_WRAPPER_INTR_STATUS_A2HWD_BMSK))
+ enable_irq(device->cvp_hal_data->irq);
+irqreturn_t cvp_hfi_isr(int irq, void *dev)
+ disable_irq_nosync(irq);
+ return IRQ_WAKE_THREAD;
+static void iris_hfi_wd_work_handler(struct work_struct *work)
+ struct msm_cvp_cb_cmd_done response = {0};
+ enum hal_command_response cmd = HAL_SYS_WATCHDOG_TIMEOUT;
+ if (msm_cvp_hw_wd_recovery) {
+ dprintk(CVP_ERR, "Cleaning up as HW WD recovery is enable %d\n",
+ msm_cvp_hw_wd_recovery);
+ __print_sidebandmanager_regs(device);
+ response.device_id = 0;
+ handle_sys_error(cmd, (void *) &response);
+ enable_irq(device->cvp_hal_data->irq_wd);
+ else {
+ dprintk(CVP_ERR, "Crashing the device as HW WD recovery is disable %d\n",
+ BUG_ON(1);
+static DECLARE_WORK(iris_hfi_wd_work, iris_hfi_wd_work_handler);
+irqreturn_t iris_hfi_isr_wd(int irq, void *dev)
+ dprintk(CVP_ERR, "Got HW WDOG IRQ at %llu! \n", get_aon_time());
+ queue_work(device->cvp_workq, &iris_hfi_wd_work);
+static int __init_reset_clk(struct msm_cvp_platform_resources *res,
+ int reset_index)
+ struct reset_control *rst;
+ struct reset_info *rst_info;
+ struct reset_set *rst_set = &res->reset_set;
+ if (!rst_set->reset_tbl)
+ rst_info = &rst_set->reset_tbl[reset_index];
+ rst = rst_info->rst;
+ dprintk(CVP_PWR, "reset_clk: name %s rst %pK required_stage=%d\n",
+ rst_set->reset_tbl[reset_index].name, rst, rst_info->required_stage);
+ if (rst)
+ goto skip_reset_init;
+ if (rst_info->required_stage == CVP_ON_USE) {
+ rst = reset_control_get_exclusive_released(&res->pdev->dev,
+ rst_set->reset_tbl[reset_index].name);
+ if (IS_ERR(rst)) {
+ rc = PTR_ERR(rst);
+ dprintk(CVP_ERR, "reset get exclusive fail %d\n", rc);
+ dprintk(CVP_PWR, "reset_clk: name %s get exclusive rst %llx\n",
+ rst_set->reset_tbl[reset_index].name, rst);
+ } else if (rst_info->required_stage == CVP_ON_INIT) {
+ rst = devm_reset_control_get(&res->pdev->dev,
+ dprintk(CVP_ERR, "reset get fail %d\n", rc);
+ dprintk(CVP_PWR, "reset_clk: name %s get rst %llx\n",
+ dprintk(CVP_ERR, "Invalid reset stage\n");
+ rst_set->reset_tbl[reset_index].rst = rst;
+ rst_info->state = RESET_INIT;
+skip_reset_init:
+static int __reset_control_assert_name(struct iris_hfi_device *device,
+ const char *name)
+ struct reset_info *rcinfo = NULL;
+ bool found = false;
+ iris_hfi_for_each_reset_clock(device, rcinfo) {
+ if (strcmp(rcinfo->name, name))
+ found = true;
+ rc = reset_control_assert(rcinfo->rst);
+ "%s: failed to assert reset control (%s), rc = %d\n",
+ __func__, rcinfo->name, rc);
+ dprintk(CVP_PWR, "%s: assert reset control (%s)\n",
+ __func__, rcinfo->name);
+ if (!found) {
+ dprintk(CVP_PWR, "%s: reset control (%s) not found\n",
+ __func__, name);
+static int __reset_control_deassert_name(struct iris_hfi_device *device,
+ rc = reset_control_deassert(rcinfo->rst);
+ "%s: deassert reset control for (%s) failed, rc %d\n",
+ dprintk(CVP_PWR, "%s: deassert reset control (%s)\n",
+static int __reset_control_acquire(struct iris_hfi_device *device,
+ int max_retries = 10000;
+ if (rcinfo->state == RESET_ACQUIRED)
+acquire_again:
+ rc = reset_control_acquire(rcinfo->rst);
+ if (rc == -EBUSY) {
+ usleep_range(100, 150);
+ max_retries--;
+ if (max_retries) {
+ goto acquire_again;
+ "%s acquire %s -EBUSY\n",
+ "%s: acquire failed (%s) rc %d\n",
+ dprintk(CVP_PWR, "%s: reset acquire succeed (%s)\n",
+ rcinfo->state = RESET_ACQUIRED;
+static int __reset_control_release(struct iris_hfi_device *device,
+ if (rcinfo->state != RESET_ACQUIRED) {
+ dprintk(CVP_WARN, "Double releasing reset clk?\n");
+ reset_control_release(rcinfo->rst);
+ dprintk(CVP_PWR, "%s: reset release succeed (%s)\n",
+ rcinfo->state = RESET_RELEASED;
+static void __deinit_bus(struct iris_hfi_device *device)
+ device->bus_vote = CVP_DEFAULT_BUS_VOTE;
+ iris_hfi_for_each_bus_reverse(device, bus) {
+ dev_set_drvdata(bus->dev, NULL);
+ icc_put(bus->client);
+ bus->client = NULL;
+static int __init_bus(struct iris_hfi_device *device)
+ * This is stupid, but there's no other easy way to ahold
+ * of struct bus_info in iris_hfi_devfreq_*()
+ WARN(dev_get_drvdata(bus->dev), "%s's drvdata already set\n",
+ dev_name(bus->dev));
+ dev_set_drvdata(bus->dev, device);
+ bus->client = icc_get(&device->res->pdev->dev,
+ bus->master, bus->slave);
+ if (IS_ERR_OR_NULL(bus->client)) {
+ rc = PTR_ERR(bus->client) ?: -EBADHANDLE;
+ dprintk(CVP_ERR, "Failed to register bus %s: %d\n",
+ bus->name, rc);
+ goto err_add_dev;
+err_add_dev:
+ __deinit_bus(device);
+static void __deinit_regulators(struct iris_hfi_device *device)
+ struct regulator_info *rinfo = NULL;
+ iris_hfi_for_each_regulator_reverse(device, rinfo) {
+ if (rinfo->regulator) {
+ regulator_put(rinfo->regulator);
+ rinfo->regulator = NULL;
+static int __init_regulators(struct iris_hfi_device *device)
+ rinfo->regulator = regulator_get(&device->res->pdev->dev,
+ if (IS_ERR_OR_NULL(rinfo->regulator)) {
+ rc = PTR_ERR(rinfo->regulator) ?: -EBADHANDLE;
+ dprintk(CVP_ERR, "Failed to get regulator: %s\n",
+ goto err_reg_get;
+err_reg_get:
+ __deinit_regulators(device);
+static void __deinit_subcaches(struct iris_hfi_device *device)
+ struct subcache_info *sinfo = NULL;
+ dprintk(CVP_ERR, "deinit_subcaches: invalid device %pK\n",
+ if (!is_sys_cache_present(device))
+ iris_hfi_for_each_subcache_reverse(device, sinfo) {
+ if (sinfo->subcache) {
+ dprintk(CVP_CORE, "deinit_subcaches: %s\n",
+ sinfo->name);
+ llcc_slice_putd(sinfo->subcache);
+ sinfo->subcache = NULL;
+static int __init_subcaches(struct iris_hfi_device *device)
+ dprintk(CVP_ERR, "init_subcaches: invalid device %pK\n",
+ iris_hfi_for_each_subcache(device, sinfo) {
+ if (!strcmp("cvp", sinfo->name)) {
+ sinfo->subcache = llcc_slice_getd(LLCC_CVP);
+ } else if (!strcmp("cvpfw", sinfo->name)) {
+ sinfo->subcache = llcc_slice_getd(LLCC_CVPFW);
+ dprintk(CVP_ERR, "Invalid subcache name %s\n",
+ if (IS_ERR_OR_NULL(sinfo->subcache)) {
+ rc = PTR_ERR(sinfo->subcache) ?
+ PTR_ERR(sinfo->subcache) : -EBADHANDLE;
+ "init_subcaches: invalid subcache: %s rc %d\n",
+ sinfo->name, rc);
+ goto err_subcache_get;
+ dprintk(CVP_CORE, "init_subcaches: %s\n",
+err_subcache_get:
+ __deinit_subcaches(device);
+static int __init_resources(struct iris_hfi_device *device,
+ struct msm_cvp_platform_resources *res)
+ int i, rc = 0;
+ rc = __init_regulators(device);
+ dprintk(CVP_ERR, "Failed to get all regulators\n");
+ rc = msm_cvp_init_clocks(device);
+ dprintk(CVP_ERR, "Failed to init clocks\n");
+ goto err_init_clocks;
+ for (i = 0; i < device->res->reset_set.count; i++) {
+ rc = __init_reset_clk(res, i);
+ dprintk(CVP_ERR, "Failed to init reset clocks\n");
+ goto err_init_reset_clk;
+ rc = __init_bus(device);
+ dprintk(CVP_ERR, "Failed to init bus: %d\n", rc);
+ goto err_init_bus;
+ rc = __init_subcaches(device);
+ dprintk(CVP_WARN, "Failed to init subcaches: %d\n", rc);
+ device->sys_init_capabilities =
+ kzalloc(sizeof(struct msm_cvp_capability)
+ * CVP_MAX_SESSIONS, GFP_KERNEL);
+err_init_reset_clk:
+err_init_bus:
+ msm_cvp_deinit_clocks(device);
+err_init_clocks:
+static void __deinit_resources(struct iris_hfi_device *device)
+ kfree(device->sys_init_capabilities);
+ device->sys_init_capabilities = NULL;
+static int __disable_regulator_impl(struct regulator_info *rinfo,
+ dprintk(CVP_PWR, "Disabling regulator %s\n", rinfo->name);
+ * This call is needed. Driver needs to acquire the control back
+ * from HW in order to disable the regualtor. Else the behavior
+ * is unknown.
+ "Failed to acquire control on %s\n",
+ goto disable_regulator_failed;
+ "XO_RESET could not be acquired: skip disabling the regulator %s\n",
+ rc = regulator_disable(rinfo->regulator);
+ "Failed to disable %s: %d\n",
+ rinfo->name, rc);
+disable_regulator_failed:
+ /* Bring attention to this issue */
+static int __disable_hw_power_collapse(struct iris_hfi_device *device)
+ if (!msm_cvp_fw_low_power_mode) {
+ dprintk(CVP_PWR, "Not enabling hardware power collapse\n");
+ rc = __take_back_regulators(device);
+ "%s : Failed to disable HW power collapse %d\n",
+static int __enable_hw_power_collapse(struct iris_hfi_device *device)
+ rc = __hand_off_regulators(device);
+ "%s : Failed to enable HW power collapse %d\n",
+ if (strcmp(rinfo->name, name))
+ "XO_RESET could not be acquired: skip enabling the regulator %s\n",
+ rc = regulator_enable(rinfo->regulator);
+ dprintk(CVP_ERR, "Failed to enable %s: %d\n",
+ dprintk(CVP_ERR,"%s: regulator %s not enabled\n",
+ __func__, rinfo->name);
+ regulator_disable(rinfo->regulator);
+ dprintk(CVP_PWR, "Enabled regulator %s\n", rinfo->name);
+ dprintk(CVP_ERR, "regulator %s not found\n", name);
+ __disable_regulator_impl(rinfo, device);
+ dprintk(CVP_PWR, "%s Disabled regulator %s\n", __func__, name);
+ dprintk(CVP_ERR, "%s regulator %s not found\n", __func__, name);
+static int __enable_subcaches(struct iris_hfi_device *device)
+ u32 c = 0;
+ struct subcache_info *sinfo;
+ if (msm_cvp_syscache_disable || !is_sys_cache_present(device))
+ /* Activate subcaches */
+ rc = llcc_slice_activate(sinfo->subcache);
+ dprintk(CVP_WARN, "Failed to activate %s: %d\n",
+ goto err_activate_fail;
+ sinfo->isactive = true;
+ dprintk(CVP_CORE, "Activated subcache %s\n", sinfo->name);
+ dprintk(CVP_CORE, "Activated %d Subcaches to CVP\n", c);
+err_activate_fail:
+ __release_subcaches(device);
+static int __set_subcaches(struct iris_hfi_device *device)
+ u32 resource[CVP_MAX_SUBCACHE_SIZE];
+ struct cvp_hfi_resource_syscache_info_type *sc_res_info;
+ struct cvp_hfi_resource_subcache_type *sc_res;
+ struct cvp_resource_hdr rhdr;
+ if (device->res->sys_cache_res_set || msm_cvp_syscache_disable) {
+ dprintk(CVP_CORE, "Subcaches already set or disabled\n");
+ memset((void *)resource, 0x0, (sizeof(u32) * CVP_MAX_SUBCACHE_SIZE));
+ sc_res_info = (struct cvp_hfi_resource_syscache_info_type *)resource;
+ sc_res = &(sc_res_info->rg_subcache_entries[0]);
+ if (sinfo->isactive) {
+ sc_res[c].size = sinfo->subcache->slice_size;
+ sc_res[c].sc_id = sinfo->subcache->slice_id;
+ /* Set resource to CVP for activated subcaches */
+ if (c) {
+ dprintk(CVP_CORE, "Setting %d Subcaches\n", c);
+ rhdr.resource_handle = sc_res_info; /* cookie */
+ rhdr.resource_id = CVP_RESOURCE_SYSCACHE;
+ sc_res_info->num_entries = c;
+ rc = __core_set_resource(device, &rhdr, (void *)sc_res_info);
+ dprintk(CVP_WARN, "Failed to set subcaches %d\n", rc);
+ goto err_fail_set_subacaches;
+ if (sinfo->isactive)
+ sinfo->isset = true;
+ dprintk(CVP_CORE, "Set Subcaches done to CVP\n");
+ device->res->sys_cache_res_set = true;
+err_fail_set_subacaches:
+static int __release_subcaches(struct iris_hfi_device *device)
+ /* Release resource command to Iris */
+ if (sinfo->isset) {
+ /* Update the entry */
+ sinfo->isset = false;
+ if (c > 0) {
+ dprintk(CVP_CORE, "Releasing %d subcaches\n", c);
+ rc = __core_release_resource(device, &rhdr);
+ "Failed to release %d subcaches\n", c);
+ device->res->sys_cache_res_set = false;
+static int __disable_subcaches(struct iris_hfi_device *device)
+ /* De-activate subcaches */
+ dprintk(CVP_CORE, "De-activate subcache %s\n",
+ rc = llcc_slice_deactivate(sinfo->subcache);
+ "Failed to de-activate %s: %d\n",
+ sinfo->isactive = false;
+static void interrupt_init_iris2(struct iris_hfi_device *device)
+ u32 mask_val = 0;
+ /* All interrupts should be disabled initially 0x1F6 : Reset value */
+ mask_val = __read_register(device, CVP_WRAPPER_INTR_MASK);
+ /* Write 0 to unmask CPU and WD interrupts */
+ mask_val &= ~(CVP_FATAL_INTR_BMSK | CVP_WRAPPER_INTR_MASK_A2HCPU_BMSK);
+ __write_register(device, CVP_WRAPPER_INTR_MASK, mask_val);
+ dprintk(CVP_REG, "Init irq: reg: %x, mask value %x\n",
+ CVP_WRAPPER_INTR_MASK, mask_val);
+ mask_val = 0;
+ mask_val = __read_register(device, CVP_SS_IRQ_MASK);
+ mask_val &= ~(CVP_SS_INTR_BMASK);
+ __write_register(device, CVP_SS_IRQ_MASK, mask_val);
+ dprintk(CVP_REG, "Init irq_wd: reg: %x, mask value %x\n",
+ CVP_SS_IRQ_MASK, mask_val);
+static void setup_dsp_uc_memmap_vpu5(struct iris_hfi_device *device)
+ /* initialize DSP QTBL & UCREGION with CPU queues */
+ __write_register(device, HFI_DSP_QTBL_ADDR,
+ (u32)device->dsp_iface_q_table.align_device_addr);
+ __write_register(device, HFI_DSP_UC_REGION_ADDR,
+ __write_register(device, HFI_DSP_UC_REGION_SIZE,
+ device->dsp_iface_q_table.mem_data.size);
+static void clock_config_on_enable_vpu5(struct iris_hfi_device *device)
+ __write_register(device, CVP_WRAPPER_CPU_CLOCK_CONFIG, 0);
+static int __set_ubwc_config(struct iris_hfi_device *device)
+ if (!device->res->ubwc_config)
+ rc = call_hfi_pkt_op(device, sys_ubwc_config, pkt,
+ device->res->ubwc_config);
+ "ubwc config setting to FW failed\n");
+ goto fail_to_set_ubwc_config;
+fail_to_set_ubwc_config:
+static int __power_on_controller(struct iris_hfi_device *device)
+ rc = __enable_regulator(device, "cvp");
+ dprintk(CVP_ERR, "Failed to enable ctrler: %d\n", rc);
+ rc = msm_cvp_prepare_enable_clk(device, "sleep_clk");
+ dprintk(CVP_ERR, "Failed to enable sleep clk: %d\n", rc);
+ goto fail_reset_clks;
+ rc = call_iris_op(device, reset_control_assert_name, device, "cvp_axi_reset");
+ dprintk(CVP_ERR, "%s: assert cvp_axi_reset failed\n", __func__);
+ rc = call_iris_op(device, reset_control_assert_name, device, "cvp_core_reset");
+ dprintk(CVP_ERR, "%s: assert cvp_core_reset failed\n", __func__);
+ /* wait for deassert */
+ usleep_range(300, 400);
+ rc = call_iris_op(device, reset_control_deassert_name, device, "cvp_axi_reset");
+ dprintk(CVP_ERR, "%s: de-assert cvp_axi_reset failed\n", __func__);
+ rc = call_iris_op(device, reset_control_deassert_name, device, "cvp_core_reset");
+ dprintk(CVP_ERR, "%s: de-assert cvp_core_reset failed\n", __func__);
+ rc = msm_cvp_prepare_enable_clk(device, "gcc_video_axi1");
+ dprintk(CVP_ERR, "Failed to enable axi1 clk: %d\n", rc);
+ rc = msm_cvp_prepare_enable_clk(device, "cvp_clk");
+ dprintk(CVP_ERR, "Failed to enable cvp_clk: %d\n", rc);
+ goto fail_enable_clk;
+ dprintk(CVP_PWR, "EVA controller powered on\n");
+fail_enable_clk:
+ msm_cvp_disable_unprepare_clk(device, "gcc_video_axi1");
+fail_reset_clks:
+ __disable_regulator(device, "cvp");
+static int __power_on_core(struct iris_hfi_device *device)
+ rc = __enable_regulator(device, "cvp-core");
+ dprintk(CVP_ERR, "Failed to enable core: %d\n", rc);
+ rc = msm_cvp_prepare_enable_clk(device, "video_cc_mvs1_clk_src");
+ dprintk(CVP_ERR, "Failed to enable video_cc_mvs1_clk_src:%d\n",
+ __disable_regulator(device, "cvp-core");
+ rc = msm_cvp_prepare_enable_clk(device, "core_clk");
+ dprintk(CVP_ERR, "Failed to enable core_clk: %d\n", rc);
+/*#ifdef CONFIG_EVA_PINEAPPLE
+ __write_register(device, CVP_AON_WRAPPER_CVP_NOC_ARCG_CONTROL, 0);
+ __write_register(device, CVP_NOC_RCGCONTROLLER_HYSTERESIS_LOW, 0x2f);
+ __write_register(device, CVP_NOC_RCG_VNOC_NOC_CLK_FORCECLOCKON_LOW, 1);
+ __write_register(device, CVP_NOC_RCGCONTROLLER_MAINCTL_LOW, 1);
+ __write_register(device, CVP_NOC_RCG_VNOC_NOC_CLK_FORCECLOCKON_LOW, 0);
+#endif*/
+ dprintk(CVP_PWR, "EVA core powered on\n");
+static int __iris_power_on(struct iris_hfi_device *device)
+ u32 reg_gdsc, reg_cbcr, spare_val;
+ if (device->power_enabled)
+ /* Vote for all hardware resources */
+ rc = __vote_buses(device, device->bus_vote.data,
+ device->bus_vote.data_count);
+ dprintk(CVP_ERR, "Failed to vote buses, err: %d\n", rc);
+ goto fail_vote_buses;
+ rc = __power_on_controller(device);
+ goto fail_enable_controller;
+ rc = __power_on_core(device);
+ goto fail_enable_core;
+ rc = msm_cvp_scale_clocks(device);
+ "Failed to scale clocks, perf may regress\n");
+ dprintk(CVP_PWR, "Done with scaling\n");
+ /*Do not access registers before this point!*/
+ device->power_enabled = true;
+ /* Thomas input to debug CPU NoC hang */
+ __write_register(device, CVP_NOC_SBM_FAULTINEN0_LOW, 0x1);
+ __write_register(device, CVP_NOC_ERR_MAINCTL_LOW_OFFS, 0x3);
+ * Re-program all of the registers that get reset as a result of
+ * regulator_disable() and _enable()
+ * calling below function requires CORE powered on
+ rc = __set_registers(device);
+ dprintk(CVP_CORE, "Done with register set\n");
+ reg_cbcr = __read_register(device, CVP_CC_MVS1_CBCR);
+ if (!(reg_gdsc & 0x80000000) || (reg_cbcr & 0x80000000)) {
+ dprintk(CVP_ERR, "CORE power on failed gdsc %x cbcr %x\n",
+ reg_gdsc, reg_cbcr);
+ reg_gdsc = __read_register(device, CVP_CC_MVS1C_GDSCR);
+ reg_cbcr = __read_register(device, CVP_CC_MVS1C_CBCR);
+ dprintk(CVP_ERR, "CTRL power on failed gdsc %x cbcr %x\n",
+ spare_val = __read_register(device, CVP_AON_WRAPPER_SPARE);
+ if ((spare_val & 0x2) != 0) {
+ usleep_range(2000, 3000);
+ dprintk(CVP_ERR, "WRAPPER_SPARE non-zero %#x\n", spare_val);
+ call_iris_op(device, interrupt_init, device);
+ dprintk(CVP_CORE, "Done with interrupt enabling\n");
+ device->intr_status = 0;
+ CVP_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL, 0x7);
+ pr_info_ratelimited(CVP_DBG_TAG "cvp (eva) powered on\n", "pwr");
+fail_enable_core:
+ __power_off_controller(device);
+fail_enable_controller:
+ __unvote_buses(device);
+fail_vote_buses:
+ device->power_enabled = false;
+static inline int __suspend(struct iris_hfi_device *device)
+ } else if (!device->power_enabled) {
+ dprintk(CVP_PWR, "Power already disabled\n");
+ dprintk(CVP_PWR, "Entering suspend\n");
+ dprintk(CVP_WARN, "Failed to suspend cvp core %d\n", rc);
+ goto err_tzbsp_suspend;
+ call_iris_op(device, power_off, device);
+ cvp_pm_qos_update(device, false);
+err_tzbsp_suspend:
+static void __print_sidebandmanager_regs(struct iris_hfi_device *device)
+ u32 sbm_ln0_low, axi_cbcr, val;
+ u32 main_sbm_ln0_low = 0xdeadbeef, main_sbm_ln0_high = 0xdeadbeef;
+ u32 main_sbm_ln1_high = 0xdeadbeef, cpu_cs_x2rpmh;
+ sbm_ln0_low =
+ __read_register(device, CVP_NOC_SBM_SENSELN0_LOW);
+ cpu_cs_x2rpmh = __read_register(device, CVP_CPU_CS_X2RPMh);
+ __write_register(device, CVP_CPU_CS_X2RPMh,
+ (cpu_cs_x2rpmh | CVP_CPU_CS_X2RPMh_SWOVERRIDE_BMSK));
+ usleep_range(500, 1000);
+ if (!(cpu_cs_x2rpmh & CVP_CPU_CS_X2RPMh_SWOVERRIDE_BMSK)) {
+ "failed set CVP_CPU_CS_X2RPMH mask %x\n",
+ cpu_cs_x2rpmh);
+ axi_cbcr = __read_gcc_register(device, CVP_GCC_VIDEO_AXI1_CBCR);
+ if (axi_cbcr & 0x80000000) {
+ dprintk(CVP_WARN, "failed to turn on AXI clock %x\n",
+ axi_cbcr);
+ /* Added by Thomas to debug CPU NoC hang */
+ val = __read_register(device, CVP_NOC_ERR_ERRVLD_LOW_OFFS);
+ dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRVLD_LOW %#x\n", val);
+ val = __read_register(device, CVP_NOC_SBM_FAULTINSTATUS0_LOW);
+ dprintk(CVP_ERR, "CVP_NOC_SBM_FAULTINSTATUS0_LOW %#x\n", val);
+ val = __read_register(device, CVP_NOC_ERR_ERRLOG0_LOW_OFFS);
+ dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG0_LOW %#x\n", val);
+ val = __read_register(device, CVP_NOC_ERR_ERRLOG0_HIGH_OFFS);
+ dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG0_HIGH %#x\n", val);
+ val = __read_register(device, CVP_NOC_ERR_ERRLOG1_LOW_OFFS);
+ dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG1_LOW %#x\n", val);
+ val = __read_register(device, CVP_NOC_ERR_ERRLOG1_HIGH_OFFS);
+ dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG1_HIGH %#x\n", val);
+ val = __read_register(device, CVP_NOC_ERR_ERRLOG2_LOW_OFFS);
+ dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG2_LOW %#x\n", val);
+ val = __read_register(device, CVP_NOC_ERR_ERRLOG2_HIGH_OFFS);
+ dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG2_HIGH %#x\n", val);
+ val = __read_register(device, CVP_NOC_ERR_ERRLOG3_LOW_OFFS);
+ dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG3_LOW %#x\n", val);
+ val = __read_register(device, CVP_NOC_ERR_ERRLOG3_HIGH_OFFS);
+ dprintk(CVP_ERR, "CVP_NOC_ERL_MAIN_ERRLOG3_HIGH %#x\n", val);
+ /* end of addition */
+ main_sbm_ln0_low = __read_register(device,
+ CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN0_LOW +
+ device->res->noc_main_sidebandmanager_offset);
+ main_sbm_ln0_high = __read_register(device,
+ CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN0_HIGH +
+ main_sbm_ln1_high = __read_register(device,
+ CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN1_HIGH +
+ cpu_cs_x2rpmh = cpu_cs_x2rpmh & (~CVP_CPU_CS_X2RPMh_SWOVERRIDE_BMSK);
+ __write_register(device, CVP_CPU_CS_X2RPMh, cpu_cs_x2rpmh);
+ dprintk(CVP_WARN, "Sidebandmanager regs %x %x %x %x %x\n",
+ sbm_ln0_low, main_sbm_ln0_low,
+ main_sbm_ln0_high, main_sbm_ln1_high,
+static void __enter_cpu_noc_lpi(struct iris_hfi_device *device)
+ u32 lpi_status, count = 0, max_count = 2000;
+ /* New addition to put CPU/Tensilica to low power */
+ count = 0;
+ __write_register(device, CVP_WRAPPER_CPU_NOC_LPI_CONTROL, 0x1);
+ while (count < max_count) {
+ lpi_status = __read_register(device, CVP_WRAPPER_CPU_NOC_LPI_STATUS);
+ if ((lpi_status & BIT(1)) || ((lpi_status & BIT(2)) && (!(lpi_status & BIT(0))))) {
+ * If QDENY == true, or
+ * If QACTIVE == true && QACCEPT == false
+ * Try again
+ __write_register(device, CVP_WRAPPER_CPU_NOC_LPI_CONTROL, 0x0);
+ usleep_range(10, 20);
+ usleep_range(1000, 1200);
+ "%s, CPU Noc: lpi_status %x (count %d)\n", __func__, lpi_status, count);
+ if (count == max_count) {
+ u32 pc_ready, wfi_status;
+ pc_ready = __read_register(device, CVP_CTRL_STATUS);
+ "%s, CPU NOC not in qaccept status %x %x %x\n",
+ __func__, lpi_status, wfi_status, pc_ready);
+static int __power_off_controller(struct iris_hfi_device *device)
+ u32 lpi_status, count = 0, max_count = 1000;
+ u32 spare_val, spare_status;
+ /* HPG 6.2.2 Step 1 */
+ __write_register(device, CVP_CPU_CS_X2RPMh, 0x3);
+ /* HPG 6.2.2 Step 2, noc to low power */
+ __enter_cpu_noc_lpi(device);
+ /* HPG 6.2.2 Step 3, debug bridge to low power BYPASSED */
+ /* HPG 6.2.2 Step 4, debug bridge to lpi release */
+ CVP_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL, 0x0);
+ lpi_status = 0x1;
+ while (lpi_status && count < max_count) {
+ lpi_status = __read_register(device,
+ CVP_WRAPPER_DEBUG_BRIDGE_LPI_STATUS);
+ "DBLP Release: lpi_status %d(count %d)\n",
+ lpi_status, count);
+ "DBLP Release: lpi_status %x\n", lpi_status);
+ /* PDXFIFO reset: addition for Kailua / Lanai */
+ __write_register(device, CVP_WRAPPER_AXI_CLOCK_CONFIG, 0x3);
+ __write_register(device, CVP_WRAPPER_QNS4PDXFIFO_RESET, 0x1);
+ __write_register(device, CVP_WRAPPER_QNS4PDXFIFO_RESET, 0x0);
+ __write_register(device, CVP_WRAPPER_AXI_CLOCK_CONFIG, 0x0);
+ /* HPG 6.2.2 Step 5 */
+ msm_cvp_disable_unprepare_clk(device, "cvp_clk");
+ usleep_range(1000, 1050);
+ /* disable EVA NoC clock */
+ __write_register(device, CVP_AON_WRAPPER_CVP_NOC_CORE_CLK_CONTROL, 0x1);
+ /* enable EVA NoC reset */
+ __write_register(device, CVP_AON_WRAPPER_CVP_NOC_CORE_SW_RESET, 0x1);
+ dprintk(CVP_ERR, "FATAL ERROR, HPG step 17 to 20 will be bypassed\n");
+ goto skip_xo_reset;
+ spare_status = 0x1;
+ while (spare_status != 0x0) {
+ spare_status = spare_val & 0x2;
+ __write_register(device, CVP_AON_WRAPPER_SPARE, 0x1);
+ rc = call_iris_op(device, reset_control_assert_name, device, "cvp_xo_reset");
+ dprintk(CVP_ERR, "%s: assert cvp_xo_reset failed\n", __func__);
+ /* de-assert EVA_NoC reset */
+ __write_register(device, CVP_AON_WRAPPER_CVP_NOC_CORE_SW_RESET, 0x0);
+ /* de-assert EVA video_cc XO reset and enable video_cc XO clock after 80us */
+ usleep_range(80, 100);
+ rc = call_iris_op(device, reset_control_deassert_name, device, "cvp_xo_reset");
+ dprintk(CVP_ERR, "%s: de-assert cvp_xo_reset failed\n", __func__);
+ /* clear XO mask bit - this step was missing in previous sequence */
+ __write_register(device, CVP_AON_WRAPPER_SPARE, 0x0);
+skip_xo_reset:
+ /* enable EVA NoC clock */
+ __write_register(device, CVP_AON_WRAPPER_CVP_NOC_CORE_CLK_CONTROL, 0x0);
+ /* De-assert EVA_CTL Force Sleep Retention */
+ usleep_range(400, 500);
+ /* HPG 6.2.2 Step 6 */
+ /* HPG 6.2.2 Step 7 */
+ rc = msm_cvp_disable_unprepare_clk(device, "gcc_video_axi1");
+ rc = msm_cvp_disable_unprepare_clk(device, "sleep_clk");
+ dprintk(CVP_ERR, "Failed to disable sleep clk: %d\n", rc);
+static int __power_off_core(struct iris_hfi_device *device)
+ u32 reg_status = 0, lpi_status, config, value = 0, count = 0;
+ u32 warn_flag = 0, max_count = 10;
+ value = __read_register(device, CVP_CC_MVS1_GDSCR);
+ if (!(value & 0x80000000)) {
+ * Core has been powered off by f/w.
+ * Check NOC reset registers to ensure
+ * NO outstanding NoC transactions
+ value = __read_register(device, CVP_NOC_RESET_ACK);
+ if (value) {
+ "Core off with NOC RESET ACK non-zero %x\n",
+ value);
+ msm_cvp_disable_unprepare_clk(device, "core_clk");
+ msm_cvp_disable_unprepare_clk(device, "video_cc_mvs1_clk_src");
+ } else if (!(value & 0x2)) {
+ * HW_CONTROL PC disabled, then core is powered on for
+ * CVP NoC access
+ dprintk(CVP_PWR, "Driver controls Core power off now\n");
+ * check to make sure core clock branch enabled else
+ * we cannot read core idle register
+ config = __read_register(device, CVP_WRAPPER_CORE_CLOCK_CONFIG);
+ if (config) {
+ "core clock config not enabled, enable it to access core\n");
+ __write_register(device, CVP_WRAPPER_CORE_CLOCK_CONFIG, 0);
+ * add MNoC idle check before collapsing MVS1 per HPG update
+ * poll for NoC DMA idle -> HPG 6.2.1
+ do {
+ value = __read_register(device, CVP_SS_IDLE_STATUS);
+ if (value & 0x400000)
+ usleep_range(1000, 2000);
+ } while (count < max_count);
+ dprintk(CVP_WARN, "Core fail to go idle %x\n", value);
+ warn_flag = 1;
+ max_count = 1000;
+ __write_register(device, CVP_AON_WRAPPER_CVP_NOC_LPI_CONTROL, 0x1);
+ while (!reg_status && count < max_count) {
+ lpi_status =
+ __read_register(device,
+ CVP_AON_WRAPPER_CVP_NOC_LPI_STATUS);
+ reg_status = lpi_status & BIT(0);
+ /* Wait for Core noc lpi status to be set */
+ "Core Noc: lpi_status %x noc_status %x (count %d)\n",
+ lpi_status, reg_status, count);
+ "Core NOC not in qaccept status %x %x %x %x\n",
+ reg_status, lpi_status, wfi_status, pc_ready);
+ __write_register(device, CVP_AON_WRAPPER_CVP_NOC_LPI_CONTROL, 0x0);
+ if (warn_flag)
+ /* Reset both sides of 2 ahb2ahb_bridges (TZ and non-TZ) */
+ __write_register(device, CVP_AHB_BRIDGE_SYNC_RESET, 0x3);
+ __write_register(device, CVP_AHB_BRIDGE_SYNC_RESET, 0x2);
+ __write_register(device, CVP_AHB_BRIDGE_SYNC_RESET, 0x0);
+ __write_register(device, CVP_WRAPPER_CORE_CLOCK_CONFIG, config);
+ __disable_hw_power_collapse(device);
+static void power_off_iris2(struct iris_hfi_device *device)
+ if (!device->power_enabled || !device->res->sw_power_collapsible)
+ if (!(device->intr_status & CVP_WRAPPER_INTR_STATUS_A2HWD_BMSK))
+ disable_irq_nosync(device->cvp_hal_data->irq);
+ __power_off_core(device);
+ if (__unvote_buses(device))
+ dprintk(CVP_WARN, "Failed to unvote for buses\n");
+ /*Do not access registers after this point!*/
+ pr_info(CVP_DBG_TAG "cvp (eva) power collapsed\n", "pwr");
+static inline int __resume(struct iris_hfi_device *device)
+ } else if (device->power_enabled) {
+ } else if (!__core_in_valid_state(device)) {
+ dprintk(CVP_PWR, "iris_hfi_device in deinit state.");
+ dprintk(CVP_PWR, "Resuming from power collapse\n");
+ rc = __iris_power_on(device);
+ dprintk(CVP_ERR, "Failed to power on cvp\n");
+ goto err_iris_power_on;
+ __setup_ucregion_memory_map(device);
+ /* RUMI: set CVP_CTRL_INIT register to disable synx in FW */
+ /* Reboot the firmware */
+ rc = __tzbsp_set_cvp_state(TZ_SUBSYS_STATE_RESUME);
+ dprintk(CVP_ERR, "Failed to resume cvp core %d\n", rc);
+ goto err_set_cvp_state;
+ /* Wait for boot completion */
+ rc = __boot_firmware(device);
+ dprintk(CVP_ERR, "Failed to reset cvp core\n");
+ goto err_reset_core;
+ * Work around for H/W bug, need to reprogram these registers once
+ * firmware is out reset
+ __set_threshold_registers(device);
+ cvp_pm_qos_update(device, true);
+ __dsp_resume(device);
+ dprintk(CVP_PWR, "Resumed from power collapse\n");
+ /* Don't reset skip_pc_count for SYS_PC_PREP cmd */
+ if (device->last_packet_type != HFI_CMD_SYS_PC_PREP)
+err_reset_core:
+ __tzbsp_set_cvp_state(TZ_SUBSYS_STATE_SUSPEND);
+err_set_cvp_state:
+err_iris_power_on:
+ dprintk(CVP_ERR, "Failed to resume from power collapse\n");
+static int __power_on_init(struct iris_hfi_device *device)
+ /* Initialize resources */
+ rc = __init_resources(device, device->res);
+ dprintk(CVP_ERR, "Failed to init resources: %d\n", rc);
+ rc = __initialize_packetization(device);
+ dprintk(CVP_ERR, "Failed to initialize packetization\n");
+ goto fail_iris_init;
+ dprintk(CVP_ERR, "Failed to power on iris in in load_fw\n");
+fail_iris_init:
+ __deinit_resources(device);
+static int __load_fw(struct iris_hfi_device *device)
+ if ((!device->res->use_non_secure_pil && !device->res->firmware_base)
+ || device->res->use_non_secure_pil) {
+ rc = load_cvp_fw_impl(device);
+ goto fail_load_fw;
+fail_load_fw:
+static void __unload_fw(struct iris_hfi_device *device)
+ if (!device->resources.fw.cookie)
+ if (device->state != IRIS_STATE_DEINIT)
+ flush_workqueue(device->iris_pm_workq);
+ unload_cvp_fw_impl(device);
+ __interface_queues_release(device);
+ dprintk(CVP_WARN, "Firmware unloaded\n");
+static int iris_hfi_get_fw_info(void *dev, struct cvp_hal_fw_info *fw_info)
+ if (!device || !fw_info) {
+ "%s Invalid parameter: device = %pK fw_info = %pK\n",
+ __func__, device, fw_info);
+ while (cvp_driver->fw_version[i++] != 'V' && i < CVP_VERSION_LENGTH)
+ ;
+ if (i == CVP_VERSION_LENGTH - 1) {
+ dprintk(CVP_WARN, "Iris version string is not proper\n");
+ fw_info->version[0] = '\0';
+ goto fail_version_string;
+ memcpy(&fw_info->version[0], &cvp_driver->fw_version[0],
+ CVP_VERSION_LENGTH);
+ fw_info->version[CVP_VERSION_LENGTH - 1] = '\0';
+fail_version_string:
+ dprintk(CVP_CORE, "F/W version retrieved : %s\n", fw_info->version);
+ fw_info->base_addr = device->cvp_hal_data->firmware_base;
+ fw_info->register_base = device->res->register_base;
+ fw_info->register_size = device->cvp_hal_data->register_size;
+ fw_info->irq = device->cvp_hal_data->irq;
+static int iris_hfi_get_core_capabilities(void *dev)
+ dprintk(CVP_CORE, "%s not supported yet!\n", __func__);
+static const char * const mid_names[16] = {
+ "CVP_FW",
+ "ARP_DATA",
+ "CVP_MPU_PIXEL",
+ "CVP_MPU_NON_PIXEL",
+ "CVP_FDU_PIXEL",
+ "CVP_FDU_NON_PIXEL",
+ "CVP_GCE_PIXEL",
+ "CVP_GCE_NON_PIXEL",
+ "CVP_TOF_PIXEL",
+ "CVP_TOF_NON_PIXEL",
+ "CVP_VADL_PIXEL",
+ "CVP_VADL_NON_PIXEL",
+ "CVP_RGE_NON_PIXEL",
+ "CVP_CDM",
+ "Invalid",
+ "Invalid"
+static void __print_reg_details(u32 val)
+ u32 mid, sid;
+ mid = (val >> 5) & 0xF;
+ sid = (val >> 2) & 0x7;
+ dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG3_LOW: %#x\n", val);
+ dprintk(CVP_ERR, "Sub-client:%s, SID: %d\n", mid_names[mid], sid);
+static void __err_log(bool logging, u32 *data, const char *name, u32 val)
+ if (logging)
+ *data = val;
+ dprintk(CVP_ERR, "%s: %#x\n", name, val);
+static void __noc_error_info_iris2(struct iris_hfi_device *device)
+ struct cvp_noc_log *noc_log;
+ u32 val = 0, regi, regii, regiii;
+ bool log_required = false;
+ if (!core->ssr_count && core->resources.max_ssr_allowed > 1)
+ log_required = true;
+ noc_log = &core->log.noc_log;
+ if (noc_log->used) {
+ dprintk(CVP_WARN, "Data already in NoC log, skip logging\n");
+ noc_log->used = 1;
+ val = __read_register(device, CVP_CC_MVS1_GDSCR);
+ regi = __read_register(device, CVP_AON_WRAPPER_CVP_NOC_CORE_CLK_CONTROL);
+ regii = __read_register(device, CVP_CC_MVS1_CBCR);
+ regiii = __read_register(device, CVP_WRAPPER_CORE_CLOCK_CONFIG);
+ dprintk(CVP_ERR, "noc reg check: %#x %#x %#x %#x\n",
+ val, regi, regii, regiii);
+ val = __read_register(device, CVP_NOC_ERR_SWID_LOW_OFFS);
+ __err_log(log_required, &noc_log->err_ctrl_swid_low,
+ "CVP_NOC_ERL_MAIN_SWID_LOW", val);
+ val = __read_register(device, CVP_NOC_ERR_SWID_HIGH_OFFS);
+ __err_log(log_required, &noc_log->err_ctrl_swid_high,
+ "CVP_NOC_ERL_MAIN_SWID_HIGH", val);
+ val = __read_register(device, CVP_NOC_ERR_MAINCTL_LOW_OFFS);
+ __err_log(log_required, &noc_log->err_ctrl_mainctl_low,
+ "CVP_NOC_ERL_MAIN_MAINCTL_LOW", val);
+ __err_log(log_required, &noc_log->err_ctrl_errvld_low,
+ "CVP_NOC_ERL_MAIN_ERRVLD_LOW", val);
+ val = __read_register(device, CVP_NOC_ERR_ERRCLR_LOW_OFFS);
+ __err_log(log_required, &noc_log->err_ctrl_errclr_low,
+ "CVP_NOC_ERL_MAIN_ERRCLR_LOW", val);
+ __err_log(log_required, &noc_log->err_ctrl_errlog0_low,
+ "CVP_NOC_ERL_MAIN_ERRLOG0_LOW", val);
+ __err_log(log_required, &noc_log->err_ctrl_errlog0_high,
+ "CVP_NOC_ERL_MAIN_ERRLOG0_HIGH", val);
+ __err_log(log_required, &noc_log->err_ctrl_errlog1_low,
+ "CVP_NOC_ERL_MAIN_ERRLOG1_LOW", val);
+ __err_log(log_required, &noc_log->err_ctrl_errlog1_high,
+ "CVP_NOC_ERL_MAIN_ERRLOG1_HIGH", val);
+ __err_log(log_required, &noc_log->err_ctrl_errlog2_low,
+ "CVP_NOC_ERL_MAIN_ERRLOG2_LOW", val);
+ __err_log(log_required, &noc_log->err_ctrl_errlog2_high,
+ "CVP_NOC_ERL_MAIN_ERRLOG2_HIGH", val);
+ __err_log(log_required, &noc_log->err_ctrl_errlog3_low,
+ "CVP_NOC_ERL_MAIN_ERRLOG3_LOW", val);
+ __err_log(log_required, &noc_log->err_ctrl_errlog3_high,
+ "CVP_NOC_ERL_MAIN_ERRLOG3_HIGH", val);
+ val = __read_register(device,
+ CVP_NOC_CORE_ERR_SWID_LOW_OFFS + device->res->noc_core_err_offset);
+ __err_log(log_required, &noc_log->err_core_swid_low,
+ "CVP_NOC__CORE_ERL_MAIN_SWID_LOW", val);
+ CVP_NOC_CORE_ERR_SWID_HIGH_OFFS + device->res->noc_core_err_offset);
+ __err_log(log_required, &noc_log->err_core_swid_high,
+ "CVP_NOC_CORE_ERL_MAIN_SWID_HIGH", val);
+ CVP_NOC_CORE_ERR_MAINCTL_LOW_OFFS + device->res->noc_core_err_offset);
+ __err_log(log_required, &noc_log->err_core_mainctl_low,
+ "CVP_NOC_CORE_ERL_MAIN_MAINCTL_LOW", val);
+ CVP_NOC_CORE_ERR_ERRVLD_LOW_OFFS + device->res->noc_core_err_offset);
+ __err_log(log_required, &noc_log->err_core_errvld_low,
+ "CVP_NOC_CORE_ERL_MAIN_ERRVLD_LOW", val);
+ CVP_NOC_CORE_ERR_ERRCLR_LOW_OFFS + device->res->noc_core_err_offset);
+ __err_log(log_required, &noc_log->err_core_errclr_low,
+ "CVP_NOC_CORE_ERL_MAIN_ERRCLR_LOW", val);
+ CVP_NOC_CORE_ERR_ERRLOG0_LOW_OFFS + device->res->noc_core_err_offset);
+ __err_log(log_required, &noc_log->err_core_errlog0_low,
+ "CVP_NOC_CORE_ERL_MAIN_ERRLOG0_LOW", val);
+ CVP_NOC_CORE_ERR_ERRLOG0_HIGH_OFFS + device->res->noc_core_err_offset);
+ __err_log(log_required, &noc_log->err_core_errlog0_high,
+ "CVP_NOC_CORE_ERL_MAIN_ERRLOG0_HIGH", val);
+ CVP_NOC_CORE_ERR_ERRLOG1_LOW_OFFS + device->res->noc_core_err_offset);
+ __err_log(log_required, &noc_log->err_core_errlog1_low,
+ "CVP_NOC_CORE_ERL_MAIN_ERRLOG1_LOW", val);
+ CVP_NOC_CORE_ERR_ERRLOG1_HIGH_OFFS + device->res->noc_core_err_offset);
+ __err_log(log_required, &noc_log->err_core_errlog1_high,
+ "CVP_NOC_CORE_ERL_MAIN_ERRLOG1_HIGH", val);
+ CVP_NOC_CORE_ERR_ERRLOG2_LOW_OFFS + device->res->noc_core_err_offset);
+ __err_log(log_required, &noc_log->err_core_errlog2_low,
+ "CVP_NOC_CORE_ERL_MAIN_ERRLOG2_LOW", val);
+ CVP_NOC_CORE_ERR_ERRLOG2_HIGH_OFFS + device->res->noc_core_err_offset);
+ __err_log(log_required, &noc_log->err_core_errlog2_high,
+ "CVP_NOC_CORE_ERL_MAIN_ERRLOG2_HIGH", val);
+ CVP_NOC_CORE_ERR_ERRLOG3_LOW_OFFS + device->res->noc_core_err_offset);
+ __err_log(log_required, &noc_log->err_core_errlog3_low,
+ "CORE ERRLOG3_LOW, below details", val);
+ __print_reg_details(val);
+ CVP_NOC_CORE_ERR_ERRLOG3_HIGH_OFFS + device->res->noc_core_err_offset);
+ __err_log(log_required, &noc_log->err_core_errlog3_high,
+ "CVP_NOC_CORE_ERL_MAIN_ERRLOG3_HIGH", val);
+ CVP_NOC_CORE_ERR_ERRCLR_LOW_OFFS + device->res->noc_core_err_offset, 0x1);
+#define CVP_SS_CLK_HALT 0x8
+#define CVP_SS_CLK_EN 0xC
+#define CVP_SS_ARP_TEST_BUS_CONTROL 0x700
+#define CVP_SS_ARP_TEST_BUS_REGISTER 0x704
+#define CVP_DMA_TEST_BUS_CONTROL 0x66A0
+#define CVP_DMA_TEST_BUS_REGISTER 0x66A4
+#define CVP_VPU_WRAPPER_CORE_CONFIG 0xB0088
+ __write_register(device, CVP_SS_CLK_HALT, 0);
+ __write_register(device, CVP_SS_CLK_EN, 0x3f);
+ __write_register(device, CVP_VPU_WRAPPER_CORE_CONFIG, 0);
+static int iris_hfi_noc_error_info(void *dev)
+ if (!dev) {
+ dprintk(CVP_ERR, "%s: null device\n", __func__);
+ device = dev;
+ dprintk(CVP_ERR, "%s: non error information\n", __func__);
+ call_iris_op(device, noc_error_info, device);
+static int __initialize_packetization(struct iris_hfi_device *device)
+ if (!device || !device->res) {
+ dprintk(CVP_ERR, "%s - invalid param\n", __func__);
+ device->packetization_type = HFI_PACKETIZATION_4XX;
+ device->pkt_ops = cvp_hfi_get_pkt_ops_handle(
+ device->packetization_type);
+ if (!device->pkt_ops) {
+ dprintk(CVP_ERR, "Failed to get pkt_ops handle\n");
+void __init_cvp_ops(struct iris_hfi_device *device)
+ device->hal_ops = &hal_ops;
+static struct iris_hfi_device *__add_device(struct msm_cvp_platform_resources *res,
+ struct iris_hfi_device *hdevice = NULL;
+ if (!res || !callback) {
+ dprintk(CVP_ERR, "Invalid Parameters\n");
+ hdevice = kzalloc(sizeof(*hdevice), GFP_KERNEL);
+ if (!hdevice) {
+ dprintk(CVP_ERR, "failed to allocate new device\n");
+ hdevice->response_pkt = kmalloc_array(cvp_max_packets,
+ sizeof(*hdevice->response_pkt), GFP_KERNEL);
+ if (!hdevice->response_pkt) {
+ dprintk(CVP_ERR, "failed to allocate response_pkt\n");
+ goto err_cleanup;
+ hdevice->raw_packet =
+ kzalloc(CVP_IFACEQ_VAR_HUGE_PKT_SIZE, GFP_KERNEL);
+ if (!hdevice->raw_packet) {
+ dprintk(CVP_ERR, "failed to allocate raw packet\n");
+ rc = vm_manager.vm_ops->vm_init_reg_and_irq(hdevice, res);
+ hdevice->res = res;
+ hdevice->callback = callback;
+ __init_cvp_ops(hdevice);
+ hdevice->cvp_workq = create_singlethread_workqueue(
+ "msm_cvp_workerq_iris");
+ if (!hdevice->cvp_workq) {
+ dprintk(CVP_ERR, ": create cvp workq failed\n");
+ hdevice->iris_pm_workq = create_singlethread_workqueue(
+ "pm_workerq_iris");
+ if (!hdevice->iris_pm_workq) {
+ dprintk(CVP_ERR, ": create pm workq failed\n");
+ mutex_init(&hdevice->lock);
+ INIT_LIST_HEAD(&hdevice->sess_head);
+ return hdevice;
+err_cleanup:
+ if (hdevice->iris_pm_workq)
+ destroy_workqueue(hdevice->iris_pm_workq);
+ if (hdevice->cvp_workq)
+ destroy_workqueue(hdevice->cvp_workq);
+ kfree(hdevice->response_pkt);
+ kfree(hdevice->raw_packet);
+ kfree(hdevice);
+static struct iris_hfi_device *__get_device(struct msm_cvp_platform_resources *res,
+ dprintk(CVP_ERR, "Invalid params: %pK %pK\n", res, callback);
+ return __add_device(res, callback);
+void cvp_iris_hfi_delete_device(void *device)
+ struct iris_hfi_device *dev = NULL;
+ dev = core->dev_ops->hfi_device_data;
+ mutex_destroy(&dev->lock);
+ destroy_workqueue(dev->cvp_workq);
+ destroy_workqueue(dev->iris_pm_workq);
+ free_irq(dev->cvp_hal_data->irq, dev);
+ iounmap(dev->cvp_hal_data->register_base);
+ iounmap(dev->cvp_hal_data->gcc_reg_base);
+ kfree(dev->cvp_hal_data);
+ kfree(dev->response_pkt);
+ kfree(dev->raw_packet);
+ kfree(dev);
+static int iris_hfi_validate_session(void *sess, const char *func)
+ dprintk(CVP_ERR, " %s Invalid Params %pK\n", __func__, session);
+ if (!__is_session_valid(device, session, func))
+static void iris_init_hfi_callbacks(struct cvp_hfi_ops *ops_tbl)
+ ops_tbl->core_init = iris_hfi_core_init;
+ ops_tbl->core_release = iris_hfi_core_release;
+ ops_tbl->core_trigger_ssr = iris_hfi_core_trigger_ssr;
+ ops_tbl->session_init = iris_hfi_session_init;
+ ops_tbl->session_end = iris_hfi_session_end;
+ ops_tbl->session_start = iris_hfi_session_start;
+ ops_tbl->session_stop = iris_hfi_session_stop;
+ ops_tbl->session_abort = iris_hfi_session_abort;
+ ops_tbl->session_clean = iris_hfi_session_clean;
+ ops_tbl->session_set_buffers = iris_hfi_session_set_buffers;
+ ops_tbl->session_release_buffers = iris_hfi_session_release_buffers;
+ ops_tbl->session_send = iris_hfi_session_send;
+ ops_tbl->session_flush = iris_hfi_session_flush;
+ ops_tbl->scale_clocks = iris_hfi_scale_clocks;
+ ops_tbl->vote_bus = iris_hfi_vote_buses;
+ ops_tbl->get_fw_info = iris_hfi_get_fw_info;
+ ops_tbl->get_core_capabilities = iris_hfi_get_core_capabilities;
+ ops_tbl->suspend = iris_hfi_suspend;
+ ops_tbl->resume = iris_hfi_resume;
+ ops_tbl->flush_debug_queue = iris_hfi_flush_debug_queue;
+ ops_tbl->noc_error_info = iris_hfi_noc_error_info;
+ ops_tbl->validate_session = iris_hfi_validate_session;
+ ops_tbl->pm_qos_update = iris_pm_qos_update;
+ ops_tbl->debug_hook = iris_debug_hook;
+int cvp_iris_hfi_initialize(struct cvp_hfi_ops *ops_tbl,
+ if (!ops_tbl || !res || !callback) {
+ dprintk(CVP_ERR, "Invalid params: %pK %pK %pK\n",
+ ops_tbl, res, callback);
+ goto err_iris_hfi_init;
+ ops_tbl->hfi_device_data = __get_device(res, callback);
+ if (IS_ERR_OR_NULL(ops_tbl->hfi_device_data)) {
+ rc = PTR_ERR(ops_tbl->hfi_device_data) ?: -EINVAL;
+ iris_init_hfi_callbacks(ops_tbl);
+err_iris_hfi_init:
+static void dump_noc_reg(struct iris_hfi_device *device)
+ u32 val = 0, config;
+ if (msm_cvp_fw_low_power_mode) {
+ if (strcmp(rinfo->name, "cvp-core"))
+ "%s, Failed to acquire regulator control: %s\n",
+ dprintk(CVP_ERR, "%s, CVP_CC_MVS1_GDSCR: 0x%x", __func__, val);
+ dprintk(CVP_ERR, "%s, CVP_WRAPPER_CORE_CLOCK_CONFIG: 0x%x", __func__, config);
+ val = __read_register(device, CVP_NOC_RGE_NIU_DECCTL_LOW
+ + device->res->qos_noc_rge_niu_offset);
+ dprintk(CVP_ERR, "CVP_NOC_RGE_NIU_DECCTL_LOW: 0x%x", val);
+ val = __read_register(device, CVP_NOC_RGE_NIU_ENCCTL_LOW
+ dprintk(CVP_ERR, "CVP_NOC_RGE_NIU_ENCCTL_LOW: 0x%x", val);
+ val = __read_register(device, CVP_NOC_GCE_VADL_TOF_NIU_DECCTL_LOW
+ + device->res->qos_noc_gce_vadl_tof_niu_offset);
+ dprintk(CVP_ERR, "CVP_NOC_GCE_VADL_TOF_NIU_DECCTL_LOW: 0x%x", val);
+ val = __read_register(device, CVP_NOC_GCE_VADL_TOF_NIU_ENCCTL_LOW
+ dprintk(CVP_ERR, "CVP_NOC_GCE_VADL_TOF_NIU_ENCCTL_LOW: 0x%x", val);
+ val = __read_register(device, CVP_NOC_CORE_ERR_MAINCTL_LOW_OFFS
+ + device->res->noc_core_err_offset);
+ dprintk(CVP_ERR, "CVP_NOC_CORE_ERR_MAINCTL_LOW_OFFS: 0x%x", val);
+ val = __read_register(device, CVP_NOC_MAIN_SIDEBANDMANAGER_FAULTINEN0_LOW
+ + device->res->noc_main_sidebandmanager_offset);
+ dprintk(CVP_ERR, "CVP_NOC_MAIN_SIDEBANDMANAGER_FAULTINEN0_LOW: 0x%x", val);
+ dprintk(CVP_ERR, "Dumping Core NoC registers\n");
+ val = __read_register(device, CVP_NOC_CORE_ERR_SWID_LOW_OFFS
+ dprintk(CVP_ERR, "CVP_NOC__CORE_ERL_MAIN_SWID_LOW: 0x%x", val);
+ val = __read_register(device, CVP_NOC_CORE_ERR_SWID_HIGH_OFFS
+ dprintk(CVP_ERR, "CVVP_NOC_CORE_ERL_MAIN_SWID_HIGH 0x%x", val);
+ dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_MAINCTL_LOW 0x%x", val);
+ val = __read_register(device, CVP_NOC_CORE_ERR_ERRVLD_LOW_OFFS
+ dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRVLD_LOW 0x%x", val);
+ val = __read_register(device, CVP_NOC_CORE_ERR_ERRCLR_LOW_OFFS
+ dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRCLR_LOW 0x%x", val);
+ val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG0_LOW_OFFS
+ dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG0_LOW 0x%x", val);
+ val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG0_HIGH_OFFS
+ dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG0_HIGH 0x%x", val);
+ val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG1_LOW_OFFS
+ dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG1_LOW 0x%x", val);
+ val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG1_HIGH_OFFS
+ dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG1_HIGH 0x%x", val);
+ val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG2_LOW_OFFS
+ dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG2_LOW 0x%x", val);
+ val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG2_HIGH_OFFS
+ dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG2_HIGH 0x%x", val);
+ val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG3_LOW_OFFS
+ dprintk(CVP_ERR, "CORE ERRLOG3_LOW 0x%x, below details", val);
+ val = __read_register(device, CVP_NOC_CORE_ERR_ERRLOG3_HIGH_OFFS
+ dprintk(CVP_ERR, "CVP_NOC_CORE_ERL_MAIN_ERRLOG3_HIGH 0x%x", val);
+ __write_register(device, CVP_NOC_CORE_ERR_ERRCLR_LOW_OFFS
+ + device->res->noc_core_err_offset, 0x1);
@@ -0,0 +1,390 @@
+#ifndef __H_CVP_HFI_H__
+#define __H_CVP_HFI_H__
+#define HFI_CMD_SESSION_CVP_START \
+ (HFI_DOMAIN_BASE_CVP + HFI_ARCH_COMMON_OFFSET + \
+ HFI_CMD_START_OFFSET + 0x1000)
+#define HFI_CMD_SESSION_CVP_SET_BUFFERS\
+ (HFI_CMD_SESSION_CVP_START + 0x001)
+#define HFI_CMD_SESSION_CVP_RELEASE_BUFFERS\
+ (HFI_CMD_SESSION_CVP_START + 0x002)
+#define HFI_CMD_SESSION_CVP_DS\
+ (HFI_CMD_SESSION_CVP_START + 0x003)
+#define HFI_CMD_SESSION_CVP_HCD_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x004)
+#define HFI_CMD_SESSION_CVP_HCD_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x005)
+#define HFI_CMD_SESSION_CVP_CV_HOG_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x006)
+#define HFI_CMD_SESSION_CVP_CV_HOG_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x007)
+#define HFI_CMD_SESSION_CVP_SVM\
+ (HFI_CMD_SESSION_CVP_START + 0x008)
+#define HFI_CMD_SESSION_CVP_NCC_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x009)
+#define HFI_CMD_SESSION_CVP_NCC_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x00A)
+#define HFI_CMD_SESSION_CVP_DFS_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x00B)
+#define HFI_CMD_SESSION_CVP_DFS_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x00C)
+#define HFI_CMD_SESSION_CVP_FTEXT\
+ (HFI_CMD_SESSION_CVP_START + 0x00F)
+/* ==========CHAINED OPERATIONS===================*/
+#define HFI_CMD_SESSION_CVP_CV_HOG_SVM_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x010)
+#define HFI_CMD_SESSION_CVP_CV_HOG_SVM_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x011)
+#define HFI_CMD_SESSION_CVP_CV_HOG_SVM_HCD_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x012)
+#define HFI_CMD_SESSION_CVP_CV_HOG_SVM_HCD_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x013)
+#define HFI_CMD_SESSION_CVP_OPTICAL_FLOW\
+ (HFI_CMD_SESSION_CVP_START + 0x014)
+/* ===========USECASE OPERATIONS===============*/
+#define HFI_CMD_SESSION_CVP_DC_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x030)
+#define HFI_CMD_SESSION_CVP_DC_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x031)
+#define HFI_CMD_SESSION_CVP_DCM_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x034)
+#define HFI_CMD_SESSION_CVP_DCM_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x035)
+#define HFI_CMD_SESSION_CVP_DME_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x039)
+#define HFI_CMD_SESSION_CVP_DME_BASIC_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x03B)
+#define HFI_CMD_SESSION_CVP_DME_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x03A)
+#define HFI_CMD_SESSION_EVA_DME_ONLY_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x040)
+#define HFI_CMD_SESSION_EVA_DME_ONLY_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x041)
+#define HFI_CMD_SESSION_CVP_CV_TME_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x047)
+#define HFI_CMD_SESSION_CVP_CV_TME_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x048)
+#define HFI_CMD_SESSION_CVP_CV_OD_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x049)
+#define HFI_CMD_SESSION_CVP_CV_OD_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x04A)
+#define HFI_CMD_SESSION_CVP_CV_ODT_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x04B)
+#define HFI_CMD_SESSION_CVP_CV_ODT_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x04C)
+#define HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS\
+ (HFI_CMD_SESSION_CVP_START + 0x04D)
+#define HFI_CMD_SESSION_CVP_PYS_HCD_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x050)
+#define HFI_CMD_SESSION_CVP_PYS_HCD_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x051)
+#define HFI_CMD_SESSION_CVP_SET_MODEL_BUFFERS\
+ (HFI_CMD_SESSION_CVP_START + 0x052)
+#define HFI_CMD_SESSION_CVP_FD_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x053)
+#define HFI_CMD_SESSION_CVP_FD_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x054)
+#define HFI_CMD_SESSION_CVP_RELEASE_PERSIST_BUFFERS\
+ (HFI_CMD_SESSION_CVP_START + 0x055)
+#define HFI_CMD_SESSION_CVP_RELEASE_MODEL_BUFFERS\
+ (HFI_CMD_SESSION_CVP_START + 0x056)
+#define HFI_CMD_SESSION_CVP_SGM_DFS_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x057)
+#define HFI_CMD_SESSION_CVP_SGM_DFS_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x058)
+#define HFI_CMD_SESSION_CVP_SGM_OF_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x059)
+#define HFI_CMD_SESSION_CVP_SGM_OF_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x05A)
+#define HFI_CMD_SESSION_CVP_GCE_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x05B)
+#define HFI_CMD_SESSION_CVP_GCE_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x05C)
+#define HFI_CMD_SESSION_CVP_WARP_NCC_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x05D)
+#define HFI_CMD_SESSION_CVP_WARP_NCC_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x05E)
+#define HFI_CMD_SESSION_CVP_DMM_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x05F)
+#define HFI_CMD_SESSION_CVP_DMM_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x060)
+#define HFI_CMD_SESSION_CVP_FLUSH\
+ (HFI_CMD_SESSION_CVP_START + 0x061)
+#define HFI_CMD_SESSION_CVP_WARP_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x062)
+#define HFI_CMD_SESSION_CVP_WARP_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x063)
+#define HFI_CMD_SESSION_CVP_DMM_PARAMS\
+ (HFI_CMD_SESSION_CVP_START + 0x064)
+#define HFI_CMD_SESSION_CVP_WARP_DS_PARAMS\
+ (HFI_CMD_SESSION_CVP_START + 0x065)
+#define HFI_CMD_SESSION_CVP_XRA_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x066)
+#define HFI_CMD_SESSION_CVP_XRA_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x067)
+#define HFI_CMD_SESSION_CVP_XRA_BLOB_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x069)
+#define HFI_CMD_SESSION_CVP_XRA_BLOB_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x06A)
+#define HFI_CMD_SESSION_CVP_XRA_PATCH_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x06B)
+#define HFI_CMD_SESSION_CVP_XRA_PATCH_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x06C)
+#define HFI_CMD_SESSION_CVP_XRA_MATCH_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x06D)
+#define HFI_CMD_SESSION_CVP_XRA_MATCH_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x06E)
+#define HFI_CMD_SESSION_CVP_SET_SNAPSHOT_BUFFERS\
+ (HFI_CMD_SESSION_CVP_START + 0x070)
+#define HFI_CMD_SESSION_CVP_RELEASE_SNAPSHOT_BUFFERS\
+ (HFI_CMD_SESSION_CVP_START + 0x071)
+#define HFI_CMD_SESSION_CVP_SNAPSHOT_WRITE_DONE\
+ (HFI_CMD_SESSION_CVP_START + 0x072)
+#define HFI_CMD_SESSION_CVP_SET_SNAPSHOT_MODE\
+ (HFI_CMD_SESSION_CVP_START + 0x073)
+#define HFI_CMD_SESSION_EVA_ITOF_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x078)
+#define HFI_CMD_SESSION_EVA_ITOF_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x079)
+#define HFI_CMD_SESSION_EVA_DLFD_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x07C)
+#define HFI_CMD_SESSION_EVA_DLFD_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x07D)
+#define HFI_CMD_SESSION_CVP_RGE_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x07E)
+#define HFI_CMD_SESSION_CVP_RGE_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x07F)
+#define HFI_CMD_SESSION_EVA_DLFL_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x080)
+#define HFI_CMD_SESSION_EVA_DLFL_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x081)
+#define HFI_CMD_SESSION_CVP_SYNX\
+ (HFI_CMD_SESSION_CVP_START + 0x086)
+#define HFI_CMD_SESSION_EVA_START\
+ (HFI_CMD_SESSION_CVP_START + 0x088)
+#define HFI_CMD_SESSION_EVA_STOP\
+ (HFI_CMD_SESSION_CVP_START + 0x089)
+#define HFI_CMD_SESSION_CVP_ICA_FRAME\
+ (HFI_CMD_SESSION_CVP_START + 0x100)
+#define HFI_CMD_SESSION_CVP_ICA_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x101)
+#define HFI_CMD_SESSION_CVP_DS_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x02F)
+#define HFI_MSG_SESSION_CVP_START \
+ HFI_MSG_START_OFFSET + 0x1000)
+#define HFI_MSG_SESSION_CVP_SET_BUFFERS\
+ (HFI_MSG_SESSION_CVP_START + 0x001)
+#define HFI_MSG_SESSION_CVP_RELEASE_BUFFERS \
+ (HFI_MSG_SESSION_CVP_START + 0x002)
+#define HFI_MSG_SESSION_CVP_DS\
+ (HFI_MSG_SESSION_CVP_START + 0x003)
+#define HFI_MSG_SESSION_CVP_HCD\
+ (HFI_MSG_SESSION_CVP_START + 0x004)
+#define HFI_MSG_SESSION_CVP_CV_HOG\
+ (HFI_MSG_SESSION_CVP_START + 0x005)
+#define HFI_MSG_SESSION_CVP_SVM\
+ (HFI_MSG_SESSION_CVP_START + 0x006)
+#define HFI_MSG_SESSION_CVP_NCC\
+ (HFI_MSG_SESSION_CVP_START + 0x007)
+#define HFI_MSG_SESSION_CVP_DFS\
+ (HFI_MSG_SESSION_CVP_START + 0x008)
+#define HFI_MSG_SESSION_CVP_TME\
+ (HFI_MSG_SESSION_CVP_START + 0x009)
+#define HFI_MSG_SESSION_CVP_FTEXT\
+ (HFI_MSG_SESSION_CVP_START + 0x00A)
+#define HFI_MSG_SESSION_CVP_ICA\
+ (HFI_MSG_SESSION_CVP_START + 0x014)
+#define HFI_MSG_SESSION_CVP_DME\
+ (HFI_MSG_SESSION_CVP_START + 0x023)
+#define HFI_MSG_SESSION_EVA_DME_ONLY\
+ (HFI_MSG_SESSION_CVP_START + 0x050)
+#define HFI_MSG_SESSION_CVP_OPERATION_CONFIG (HFI_MSG_SESSION_CVP_START + 0x030)
+#define HFI_MSG_SESSION_CVP_SET_PERSIST_BUFFERS\
+ (HFI_MSG_SESSION_CVP_START + 0x034)
+#define HFI_MSG_SESSION_CVP_SET_MODEL_BUFFERS\
+ (HFI_MSG_SESSION_CVP_START + 0x036)
+#define HFI_MSG_SESSION_CVP_FD\
+ (HFI_MSG_SESSION_CVP_START + 0x037)
+#define HFI_MSG_SESSION_CVP_RELEASE_PERSIST_BUFFERS\
+ (HFI_MSG_SESSION_CVP_START + 0x038)
+#define HFI_MSG_SESSION_CVP_RELEASE_MODEL_BUFFERS\
+ (HFI_MSG_SESSION_CVP_START + 0x039)
+#define HFI_MSG_SESSION_CVP_SGM_OF\
+ (HFI_MSG_SESSION_CVP_START + 0x03A)
+#define HFI_MSG_SESSION_CVP_GCE\
+ (HFI_MSG_SESSION_CVP_START + 0x03B)
+#define HFI_MSG_SESSION_CVP_WARP_NCC\
+ (HFI_MSG_SESSION_CVP_START + 0x03C)
+#define HFI_MSG_SESSION_CVP_DMM\
+ (HFI_MSG_SESSION_CVP_START + 0x03D)
+#define HFI_MSG_SESSION_CVP_SGM_DFS\
+ (HFI_MSG_SESSION_CVP_START + 0x03E)
+#define HFI_MSG_SESSION_CVP_WARP\
+ (HFI_MSG_SESSION_CVP_START + 0x03F)
+#define HFI_MSG_SESSION_CVP_DMM_PARAMS\
+ (HFI_MSG_SESSION_CVP_START + 0x040)
+#define HFI_MSG_SESSION_CVP_WARP_DS_PARAMS\
+ (HFI_MSG_SESSION_CVP_START + 0x041)
+#define HFI_MSG_SESSION_CVP_SET_SNAPSHOT_BUFFERS\
+ (HFI_MSG_SESSION_CVP_START + 0x045)
+#define HFI_MSG_SESSION_CVP_RELEASE_SNAPSHOT_BUFFERS\
+ (HFI_MSG_SESSION_CVP_START + 0x046)
+#define HFI_MSG_EVENT_NOTIFY_SNAPSHOT_READY\
+ (HFI_MSG_SESSION_CVP_START + 0x047)
+#define HFI_MSG_SESSION_CVP_FLUSH\
+ (HFI_MSG_SESSION_CVP_START + 0x004A)
+#define HFI_MSG_SESSION_EVA_START\
+ (HFI_MSG_SESSION_CVP_START + 0x0058)
+#define HFI_MSG_SESSION_EVA_STOP\
+ (HFI_MSG_SESSION_CVP_START + 0x0059)
+#define CVP_IFACEQ_MAX_PKT_SIZE 1024
+#define CVP_IFACEQ_MED_PKT_SIZE 768
+#define CVP_IFACEQ_MIN_PKT_SIZE 8
+#define CVP_IFACEQ_VAR_SMALL_PKT_SIZE 100
+#define CVP_IFACEQ_VAR_LARGE_PKT_SIZE 512
+#define CVP_IFACEQ_VAR_HUGE_PKT_SIZE (1024*12)
+/* HFI packet info needed for sanity check */
+#define HFI_DFS_CONFIG_CMD_SIZE 38
+#define HFI_DFS_FRAME_CMD_SIZE 16
+#define HFI_DMM_CONFIG_CMD_SIZE 194
+#define HFI_DMM_BASIC_CONFIG_CMD_SIZE 51
+#define HFI_DMM_FRAME_CMD_SIZE 28
+#define HFI_PERSIST_CMD_SIZE 11
+#define HFI_DS_CONFIG_CMD_SIZE 11
+#define HFI_DS_CMD_SIZE 50
+#define HFI_OF_CONFIG_CMD_SIZE 34
+#define HFI_OF_FRAME_CMD_SIZE 24
+#define HFI_ODT_CONFIG_CMD_SIZE 23
+#define HFI_ODT_FRAME_CMD_SIZE 33
+#define HFI_OD_CONFIG_CMD_SIZE 24
+#define HFI_OD_FRAME_CMD_SIZE 12
+#define HFI_NCC_CONFIG_CMD_SIZE 47
+#define HFI_NCC_FRAME_CMD_SIZE 22
+#define HFI_ICA_CONFIG_CMD_SIZE 127
+#define HFI_ICA_FRAME_CMD_SIZE 14
+#define HFI_HCD_CONFIG_CMD_SIZE 46
+#define HFI_HCD_FRAME_CMD_SIZE 18
+#define HFI_DCM_CONFIG_CMD_SIZE 20
+#define HFI_DCM_FRAME_CMD_SIZE 19
+#define HFI_PYS_HCD_CONFIG_CMD_SIZE 461
+#define HFI_PYS_HCD_FRAME_CMD_SIZE 66
+#define HFI_FD_CONFIG_CMD_SIZE 28
+#define HFI_FD_FRAME_CMD_SIZE 10
+struct cvp_hfi_cmd_session_flush_packet {
+ u32 packet_type;
+ u32 session_id;
+ u32 flush_type;
+struct cvp_hfi_cmd_session_get_property_packet {
+ u32 num_properties;
+ u32 rg_property_data[1];
+struct cvp_hfi_msg_sys_session_abort_done_packet {
+ u32 error_type;
+struct cvp_hfi_msg_sys_property_info_packet {
+ u32 rg_property_data[128];
+enum session_flags {
+ SESSION_PAUSE = BIT(1),
+struct cvp_hal_session {
+ void *session_id;
+ u32 flags;
+ void *device;
+enum buf_map_type {
+ MAP_PERSIST = 1,
+ UNMAP_PERSIST = 2,
+ MAP_FRAME = 3,
+ MAP_INVALID,
+static inline enum buf_map_type cvp_find_map_type(int pkt_type)
+ if (pkt_type == HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS ||
+ pkt_type == HFI_CMD_SESSION_CVP_SET_MODEL_BUFFERS ||
+ pkt_type == HFI_CMD_SESSION_CVP_DMM_PARAMS ||
+ pkt_type == HFI_CMD_SESSION_CVP_SET_SNAPSHOT_BUFFERS ||
+ pkt_type == HFI_CMD_SESSION_CVP_WARP_DS_PARAMS ||
+ pkt_type == HFI_CMD_SESSION_EVA_DLFL_CONFIG)
+ return MAP_PERSIST;
+ else if (pkt_type == HFI_CMD_SESSION_CVP_RELEASE_PERSIST_BUFFERS ||
+ pkt_type ==
+ HFI_CMD_SESSION_CVP_RELEASE_SNAPSHOT_BUFFERS)
+ return UNMAP_PERSIST;
+ return MAP_FRAME;
+static inline bool is_params_pkt(int pkt_type)
+ if (pkt_type == HFI_CMD_SESSION_CVP_DMM_PARAMS ||
+ pkt_type == HFI_CMD_SESSION_CVP_WARP_DS_PARAMS)
@@ -0,0 +1,317 @@
+#ifndef __CVP_HFI_API_H__
+#define __CVP_HFI_API_H__
+#include <linux/log2.h>
+#include <linux/errno.h>
+#define CONTAINS(__a, __sz, __t) (\
+ (__t >= __a) && \
+ (__t < __a + __sz) \
+#define OVERLAPS(__t, __tsz, __a, __asz) (\
+ (__t <= __a) && \
+ (__t + __tsz >= __a + __asz) \
+#define CVP_VERSION_LENGTH 128
+/* 16 encoder and 16 decoder sessions */
+#define CVP_MAX_SESSIONS 32
+#define HFI_VERSION_MAJOR_MASK 0xFF000000
+#define HFI_VERSION_MAJOR_SHFIT 24
+#define HFI_VERSION_MINOR_MASK 0x00FFFFE0
+#define HFI_VERSION_MINOR_SHIFT 5
+#define HFI_VERSION_BRANCH_MASK 0x0000001F
+#define HFI_VERSION_BRANCH_SHIFT 0
+enum cvp_status {
+ CVP_ERR_NONE = 0x0,
+ CVP_ERR_FAIL = 0x80000000,
+ CVP_ERR_ALLOC_FAIL,
+ CVP_ERR_ILLEGAL_OP,
+ CVP_ERR_BAD_PARAM,
+ CVP_ERR_BAD_HANDLE,
+ CVP_ERR_NOT_SUPPORTED,
+ CVP_ERR_BAD_STATE,
+ CVP_ERR_MAX_CLIENTS,
+ CVP_ERR_IFRAME_EXPECTED,
+ CVP_ERR_HW_FATAL,
+ CVP_ERR_BITSTREAM_ERR,
+ CVP_ERR_INDEX_NOMORE,
+ CVP_ERR_SEQHDR_PARSE_FAIL,
+ CVP_ERR_INSUFFICIENT_BUFFER,
+ CVP_ERR_BAD_POWER_STATE,
+ CVP_ERR_NO_VALID_SESSION,
+ CVP_ERR_TIMEOUT,
+ CVP_ERR_CMDQFULL,
+ CVP_ERR_START_CODE_NOT_FOUND,
+ CVP_ERR_NOC_ERROR,
+ CVP_ERR_CLIENT_PRESENT = 0x90000001,
+ CVP_ERR_CLIENT_FATAL,
+ CVP_ERR_CMD_QUEUE_FULL,
+ CVP_ERR_UNUSED = 0x10000000
+enum hal_property {
+ HAL_UNUSED_PROPERTY = 0xFFFFFFFF,
+enum hal_ssr_trigger_type {
+ SSR_ERR_FATAL = 1,
+ SSR_SW_DIV_BY_ZERO,
+ SSR_HW_WDOG_IRQ,
+ SSR_SESSION_ABORT,
+enum hal_intra_refresh_mode {
+ HAL_INTRA_REFRESH_NONE,
+ HAL_INTRA_REFRESH_CYCLIC,
+ HAL_INTRA_REFRESH_RANDOM,
+ HAL_UNUSED_INTRA = 0x10000000,
+enum cvp_resource_id {
+ CVP_RESOURCE_NONE,
+ CVP_RESOURCE_SYSCACHE,
+ CVP_UNUSED_RESOURCE = 0x10000000,
+struct cvp_resource_hdr {
+ enum cvp_resource_id resource_id;
+ void *resource_handle;
+struct cvp_hal_fw_info {
+ char version[CVP_VERSION_LENGTH];
+ phys_addr_t base_addr;
+ int register_base;
+ int register_size;
+ int irq;
+enum hal_event_type {
+ HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES,
+ HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES,
+ HAL_EVENT_RELEASE_BUFFER_REFERENCE,
+ HAL_UNUSED_SEQCHG = 0x10000000,
+/* HAL Response */
+#define IS_HAL_SYS_CMD(cmd) ((cmd) >= HAL_SYS_INIT_DONE && \
+ (cmd) <= HAL_SYS_ERROR)
+#define IS_HAL_SESSION_CMD(cmd) ((cmd) >= HAL_SESSION_EVENT_CHANGE && \
+ (cmd) <= HAL_SESSION_ERROR)
+enum hal_command_response {
+ HAL_NO_RESP,
+ HAL_SYS_INIT_DONE,
+ HAL_SYS_SET_RESOURCE_DONE,
+ HAL_SYS_RELEASE_RESOURCE_DONE,
+ HAL_SYS_PING_ACK_DONE,
+ HAL_SYS_PC_PREP_DONE,
+ HAL_SYS_IDLE,
+ HAL_SYS_DEBUG,
+ HAL_SYS_WATCHDOG_TIMEOUT,
+ HAL_SYS_ERROR,
+ /* SESSION COMMANDS_DONE */
+ HAL_SESSION_EVENT_CHANGE,
+ HAL_SESSION_INIT_DONE,
+ HAL_SESSION_END_DONE,
+ HAL_SESSION_SET_BUFFER_DONE,
+ HAL_SESSION_ABORT_DONE,
+ HAL_SESSION_START_DONE,
+ HAL_SESSION_STOP_DONE,
+ HAL_SESSION_CVP_OPERATION_CONFIG,
+ HAL_SESSION_FLUSH_DONE,
+ HAL_SESSION_SUSPEND_DONE,
+ HAL_SESSION_RESUME_DONE,
+ HAL_SESSION_SET_PROP_DONE,
+ HAL_SESSION_GET_PROP_DONE,
+ HAL_SESSION_RELEASE_BUFFER_DONE,
+ HAL_SESSION_REGISTER_BUFFER_DONE,
+ HAL_SESSION_UNREGISTER_BUFFER_DONE,
+ HAL_SESSION_RELEASE_RESOURCE_DONE,
+ HAL_SESSION_PROPERTY_INFO,
+ HAL_SESSION_DUMP_NOTIFY,
+ HAL_SESSION_ERROR,
+ HAL_RESPONSE_UNUSED = 0x10000000,
+struct msm_cvp_capability {
+ u32 reserved[183];
+struct cvp_hal_sys_init_done {
+ u32 dec_codec_supported;
+ u32 enc_codec_supported;
+ u32 codec_count;
+ struct msm_cvp_capability *capabilities;
+ u32 max_sessions_supported;
+struct cvp_hal_session_init_done {
+ struct msm_cvp_capability capability;
+struct msm_cvp_cb_cmd_done {
+ u32 device_id;
+ enum cvp_status status;
+ struct cvp_hfi_msg_session_hdr msg_hdr;
+ struct cvp_resource_hdr resource_hdr;
+ struct cvp_hal_sys_init_done sys_init_done;
+ struct cvp_hal_session_init_done session_init_done;
+ u32 buffer_addr;
+struct msm_cvp_cb_data_done {
+ u32 client_data;
+struct msm_cvp_cb_info {
+ enum hal_command_response response_type;
+ struct msm_cvp_cb_cmd_done cmd;
+ struct msm_cvp_cb_data_done data;
+ } response;
+enum msm_cvp_hfi_type {
+ CVP_HFI_IRIS,
+enum msm_cvp_thermal_level {
+ CVP_THERMAL_NORMAL = 0,
+ CVP_THERMAL_LOW,
+ CVP_THERMAL_HIGH,
+ CVP_THERMAL_CRITICAL
+struct msm_cvp_gov_data {
+ struct cvp_bus_vote_data *data;
+ u32 data_count;
+enum msm_cvp_power_mode {
+ CVP_POWER_NORMAL = 0,
+ CVP_POWER_LOW,
+ CVP_POWER_TURBO
+struct cvp_bus_vote_data {
+ u32 domain;
+ u32 ddr_bw;
+ u32 sys_cache_bw;
+ enum msm_cvp_power_mode power_mode;
+ bool use_sys_cache;
+struct cvp_hal_cmd_sys_get_property_packet {
+#define call_hfi_op(q, op, args...) \
+ (((q) && (q)->op) ? ((q)->op(args)) : 0)
+#define PKT_NAME_LEN 24
+#define MAX_PKT_IDX 0x200
+struct msm_cvp_hfi_defs {
+ unsigned int size;
+ unsigned int type;
+ bool is_config_pkt;
+ bool checksum_enabled;
+ enum hal_command_response resp;
+ char name[PKT_NAME_LEN];
+ bool force_kernel_fence;
+struct cvp_hfi_ops {
+ void *hfi_device_data;
+ /*Add function pointers for all the hfi functions below*/
+ int (*core_init)(void *device);
+ int (*core_release)(void *device);
+ int (*core_trigger_ssr)(void *device, enum hal_ssr_trigger_type);
+ int (*session_init)(void *device, void *session_id, void **new_session);
+ int (*session_end)(void *session);
+ int (*session_start)(void *session);
+ int (*session_stop)(void *session);
+ int (*session_abort)(void *session);
+ int (*session_set_buffers)(void *sess, u32 iova, u32 size);
+ int (*session_release_buffers)(void *sess);
+ int (*session_send)(void *sess, struct eva_kmd_hfi_packet *in_pkt);
+ int (*session_flush)(void *sess);
+ int (*scale_clocks)(void *dev, u32 freq);
+ int (*vote_bus)(void *dev, struct bus_info *bus, unsigned long bw);
+ int (*get_fw_info)(void *dev, struct cvp_hal_fw_info *fw_info);
+ int (*session_clean)(void *sess);
+ int (*get_core_capabilities)(void *dev);
+ int (*suspend)(void *dev);
+ int (*resume)(void *dev);
+ int (*flush_debug_queue)(void *dev);
+ int (*noc_error_info)(void *dev);
+ int (*validate_session)(void *sess, const char *func);
+ int (*pm_qos_update)(void *device);
+ int (*debug_hook)(void *device);
+typedef void (*hfi_cmd_response_callback) (enum hal_command_response cmd,
+ void *data);
+typedef void (*msm_cvp_callback) (enum hal_command_response response,
+ void *callback);
+struct msm_cvp_fw {
+ int cookie;
+int cvp_hfi_process_msg_packet(u32 device_id,
+ void *msg_hdr, struct msm_cvp_cb_info *info);
+enum cvp_status cvp_hfi_process_sys_init_done_prop_read(
+ struct cvp_hfi_msg_sys_init_done_packet *pkt,
+ struct cvp_hal_sys_init_done *sys_init_done);
+enum cvp_status hfi_process_session_init_done_prop_read(
+ struct cvp_hfi_msg_sys_session_init_done_packet *pkt,
+ struct cvp_hal_session_init_done *session_init_done);
+ struct cvp_hfi_ops *hdev);
+int get_pkt_index(struct cvp_hal_session_cmd_pkt *hdr);
+int get_pkt_fenceoverride(struct cvp_hal_session_cmd_pkt* hdr);
+int get_pkt_index_from_type(u32 pkt_type);
+int get_hfi_version(void);
+unsigned int get_msg_size(struct cvp_hfi_msg_session_hdr *hdr);
+unsigned int get_msg_session_id(void *msg);
+unsigned int get_msg_errorcode(void *msg);
+ unsigned int *error_type, unsigned int *config_id);
+extern const struct msm_cvp_hfi_defs cvp_hfi_defs[MAX_PKT_IDX];
+void print_hfi_queue_info(struct cvp_hfi_ops *hdev);
+#endif /*__CVP_HFI_API_H__ */
@@ -0,0 +1,511 @@
+#ifndef __H_CVP_HFI_HELPER_H__
+#define __H_CVP_HFI_HELPER_H__
+#define HFI_COMMON_BASE (0)
+#define HFI_DOMAIN_BASE_COMMON (HFI_COMMON_BASE + 0)
+#define HFI_DOMAIN_BASE_CVP (HFI_COMMON_BASE + 0x04000000)
+#define HFI_ARCH_COMMON_OFFSET (0)
+#define HFI_CMD_START_OFFSET (0x00010000)
+#define HFI_MSG_START_OFFSET (0x00020000)
+#define HFI_ERR_NONE (HFI_COMMON_BASE) /**< Status: No error */
+#define HFI_ERR_SYS_FATAL (HFI_COMMON_BASE + 0x1) /**< Fatal system error */
+#define HFI_ERR_SYS_INVALID_PARAMETER (HFI_COMMON_BASE + 0x2) /**< Invalid system parameter encountered */
+#define HFI_ERR_SYS_VERSION_MISMATCH (HFI_COMMON_BASE + 0x3) /**< Interface version mismatch */
+#define HFI_ERR_SYS_INSUFFICIENT_RESOURCES (HFI_COMMON_BASE + 0x4) /**< Insufficient system resources */
+#define HFI_ERR_SYS_MAX_SESSIONS_REACHED (HFI_COMMON_BASE + 0x5) /**< Maximum number of sessions reached */
+#define HFI_ERR_SYS_SESSION_IN_USE (HFI_COMMON_BASE + 0x7) /**< Session ID specified is in use */
+#define HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE (HFI_COMMON_BASE + 0x8) /**< ID is out of range */
+#define HFI_ERR_SYS_UNSUPPORTED_TRIGCMD (HFI_COMMON_BASE + 0xA) /**< Unsupported TRIGCMD command*/
+#define HFI_ERR_SYS_UNSUPPORTED_RESOURCES (HFI_COMMON_BASE + 0xB) /**< Unsupported resource*/
+#define HFI_ERR_SYS_UNSUPPORT_CMD (HFI_COMMON_BASE + 0xC) /**< Command is not supported*/
+#define HFI_ERR_SYS_CMDSIZE (HFI_COMMON_BASE + 0xD) /**< command size err*/
+#define HFI_ERR_SYS_UNSUPPORT_PROPERTY (HFI_COMMON_BASE + 0xE) /**< Unsupported property*/
+#define HFI_ERR_SYS_INIT_EXPECTED (HFI_COMMON_BASE + 0xF) /**< Upon FW start, first command must be SYS_INIT*/
+#define HFI_ERR_SYS_INIT_IGNORED (HFI_COMMON_BASE + 0x10) /**< After FW started, SYS_INIT will be ignored*/
+#define HFI_ERR_SYS_MAX_DME_SESSIONS_REACHED (HFI_COMMON_BASE + 0x11) /**< Maximum DME sessions Reached */
+#define HFI_ERR_SYS_MAX_FD_SESSIONS_REACHED (HFI_COMMON_BASE + 0x12) /**< Maximum FD sessions Reached */
+#define HFI_ERR_SYS_MAX_ODT_SESSIONS_REACHED (HFI_COMMON_BASE + 0x13) /**< Maximum ODT sessions Reached*/
+#define HFI_ERR_SYS_MAX_CV_SESSIONS_REACHED (HFI_COMMON_BASE + 0x14) /**< Maximum CV sessions Reached*/
+#define HFI_ERR_SYS_INVALID_SESSION_TYPE (HFI_COMMON_BASE + 0x15) /**< Invalid session TYPE. */
+#define HFI_ERR_SYS_NOC_ERROR (HFI_COMMON_BASE + 0x16) /**< NOC Error encountered */
+ /**
+ Level 2 Comment: "Session Level Error types"
+ Common HFI_ERROR_SESSION_X values to be used as session level error/warning
+ for event and messages
+#define HFI_ERR_SESSION_FATAL (HFI_COMMON_BASE + 0x1001) /**< Fatal session error */
+#define HFI_ERR_SESSION_INVALID_PARAMETER (HFI_COMMON_BASE + 0x1002) /**< Invalid session parameter */
+#define HFI_ERR_SESSION_BAD_POINTER (HFI_COMMON_BASE + 0x1003) /**< Bad pointer encountered */
+#define HFI_ERR_SESSION_INVALID_SESSION_ID (HFI_COMMON_BASE + 0x1004) /**< Invalid session ID. eventData2 specifies the session ID. */
+#define HFI_ERR_SESSION_INVALID_STREAM_ID (HFI_COMMON_BASE + 0x1005) /**< Invalid stream ID. eventData2 specifies the stream ID. */
+#define HFI_ERR_SESSION_INCORRECT_STATE_OPERATION (HFI_COMMON_BASE + 0x1006) /**< Incorrect state for specified operation */
+#define HFI_ERR_SESSION_UNSUPPORTED_PROPERTY (HFI_COMMON_BASE + 0x1007) /**< Unsupported property. eventData2 specifies the property index. */
+#define HFI_ERR_SESSION_UNSUPPORTED_SETTING (HFI_COMMON_BASE + 0x1008) /**< Unsupported property setting. eventData2 specifies the property index. */
+#define HFI_ERR_SESSION_INSUFFICIENT_RESOURCES (HFI_COMMON_BASE + 0x1009) /**< Insufficient resources for session */
+#define HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED (HFI_COMMON_BASE + 0x100A) /**< Stream is found to be corrupt; processing is stalled */
+#define HFI_ERR_SESSION_STREAM_CORRUPT (HFI_COMMON_BASE + 0x100B) /**< Stream is found to be corrupt; processing is recoverable */
+#define HFI_ERR_SESSION_RESERVED (HFI_COMMON_BASE + 0x100C) /**< Reserved */
+#define HFI_ERR_SESSION_UNSUPPORTED_STREAM (HFI_COMMON_BASE + 0x100D) /**< Unsupported stream */
+#define HFI_ERR_SESSION_CMDSIZE (HFI_COMMON_BASE + 0x100E) /**< Command packet size err*/
+#define HFI_ERR_SESSION_UNSUPPORT_CMD (HFI_COMMON_BASE + 0x100F) /**< Command is not supported*/
+#define HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE (HFI_COMMON_BASE + 0x1010) /**< BufferType is not supported*/
+#define HFI_ERR_SESSION_BUFFERCOUNT_TOOSMALL (HFI_COMMON_BASE + 0x1011) /**< Buffer Count is less than default*/
+#define HFI_ERR_SESSION_INVALID_SCALE_FACTOR (HFI_COMMON_BASE + 0x1012) /**< Downscaling not possible */
+#define HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED (HFI_COMMON_BASE + 0x1013) /**< Upscaling not possible */
+#define HFI_ERR_SESSION_CANNOT_KEEP_ASPECT_RATIO (HFI_COMMON_BASE + 0x1014) /**< Cannot maintain aspect ratio */
+#define HFI_ERR_SESSION_ADDRESS_NOT_ALIGNED (HFI_COMMON_BASE + 0x1016) /**Address is not aligned */
+#define HFI_ERR_SESSION_BUFFERSIZE_TOOSMALL (HFI_COMMON_BASE + 0x1017) /**< Buffer Count is less than default*/
+#define HFI_ERR_SESSION_ABORTED (HFI_COMMON_BASE + 0x1018) /**< error caused by session abort*/
+#define HFI_ERR_SESSION_BUFFER_ALREADY_SET (HFI_COMMON_BASE + 0x1019) /**< Cannot set buffer multiple times without releasing in between. */
+#define HFI_ERR_SESSION_BUFFER_ALREADY_RELEASED (HFI_COMMON_BASE + 0x101A) /**< Cannot release buffer multiple times without setting in between. */
+#define HFI_ERR_SESSION_END_BUFFER_NOT_RELEASED (HFI_COMMON_BASE + 0x101B) /**< Session was ended without properly releasing all buffers */
+#define HFI_ERR_SESSION_FLUSHED (HFI_COMMON_BASE + 0x101C) /**< Cannot set buffer multiple times without releasing in between. */
+#define HFI_ERR_SESSION_KERNEL_MAX_STREAMS_REACHED (HFI_COMMON_BASE + 0x101D) /*Maximum Streams per Kernel reached in a session*/
+#define HFI_ERR_SESSION_MAX_STREAMS_REACHED (HFI_COMMON_BASE + 0x101E) /*Maximum Streams Reached in a session*/
+#define HFI_ERR_SESSION_HW_HANG_DETECTED (HFI_COMMON_BASE + 0x101F) /*HW hang was detected in one of the HW blocks for a frame*/
+#define HFI_EVENT_SYS_ERROR (HFI_COMMON_BASE + 0x1)
+#define HFI_EVENT_SESSION_ERROR (HFI_COMMON_BASE + 0x2)
+#define HFI_TME_PROFILE_DEFAULT 0x00000001
+#define HFI_TME_PROFILE_FRC 0x00000002
+#define HFI_TME_PROFILE_ASW 0x00000004
+#define HFI_TME_PROFILE_DFS_BOKEH 0x00000008
+#define HFI_TME_LEVEL_INTEGER 0x00000001
+#define HFI_BUFFER_INPUT (HFI_COMMON_BASE + 0x1)
+#define HFI_BUFFER_OUTPUT (HFI_COMMON_BASE + 0x2)
+#define HFI_BUFFER_OUTPUT2 (HFI_COMMON_BASE + 0x3)
+#define HFI_BUFFER_INTERNAL_PERSIST (HFI_COMMON_BASE + 0x4)
+#define HFI_BUFFER_INTERNAL_PERSIST_1 (HFI_COMMON_BASE + 0x5)
+#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH (HFI_COMMON_BASE + 0x6)
+#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH_1 (HFI_COMMON_BASE + 0x7)
+#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH_2 (HFI_COMMON_BASE + 0x8)
+#define HFI_BUFFER_COMMON_INTERNAL_RECON (HFI_COMMON_BASE + 0x9)
+#define HFI_BUFFER_EXTRADATA_OUTPUT (HFI_COMMON_BASE + 0xA)
+#define HFI_BUFFER_EXTRADATA_OUTPUT2 (HFI_COMMON_BASE + 0xB)
+#define HFI_BUFFER_EXTRADATA_INPUT (HFI_COMMON_BASE + 0xC)
+#define HFI_PROPERTY_SYS_COMMON_START \
+ (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000)
+#define HFI_PROPERTY_SYS_DEBUG_CONFIG \
+ (HFI_PROPERTY_SYS_COMMON_START + 0x001)
+#define HFI_PROPERTY_SYS_RESOURCE_OCMEM_REQUIREMENT_INFO \
+ (HFI_PROPERTY_SYS_COMMON_START + 0x002)
+#define HFI_PROPERTY_SYS_CONFIG_VCODEC_CLKFREQ \
+ (HFI_PROPERTY_SYS_COMMON_START + 0x003)
+#define HFI_PROPERTY_SYS_IDLE_INDICATOR \
+ (HFI_PROPERTY_SYS_COMMON_START + 0x004)
+#define HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL \
+ (HFI_PROPERTY_SYS_COMMON_START + 0x005)
+#define HFI_PROPERTY_SYS_IMAGE_VERSION \
+ (HFI_PROPERTY_SYS_COMMON_START + 0x006)
+#define HFI_PROPERTY_SYS_CONFIG_COVERAGE \
+ (HFI_PROPERTY_SYS_COMMON_START + 0x007)
+#define HFI_PROPERTY_SYS_UBWC_CONFIG \
+ (HFI_PROPERTY_SYS_COMMON_START + 0x008)
+#define HFI_DEBUG_MSG_LOW 0x00000001
+#define HFI_DEBUG_MSG_MEDIUM 0x00000002
+#define HFI_DEBUG_MSG_HIGH 0x00000004
+#define HFI_DEBUG_MSG_ERROR 0x00000008
+#define HFI_DEBUG_MSG_FATAL 0x00000010
+#define HFI_DEBUG_MSG_PERF 0x00000020
+#define HFI_DEBUG_MODE_QUEUE 0x00000001
+#define HFI_DEBUG_MODE_QDSS 0x00000002
+struct cvp_hfi_debug_config {
+ u32 debug_config;
+ u32 debug_mode;
+struct cvp_hfi_enable {
+ u32 enable;
+#define HFI_RESOURCE_SYSCACHE 0x00000002
+struct cvp_hfi_resource_subcache_type {
+ u32 sc_id;
+struct cvp_hfi_resource_syscache_info_type {
+ u32 num_entries;
+ struct cvp_hfi_resource_subcache_type rg_subcache_entries[1];
+#define HFI_CMD_SYS_COMMON_START \
+(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + HFI_CMD_START_OFFSET \
+ + 0x0000)
+#define HFI_CMD_SYS_INIT (HFI_CMD_SYS_COMMON_START + 0x001)
+#define HFI_CMD_SYS_PC_PREP (HFI_CMD_SYS_COMMON_START + 0x002)
+#define HFI_CMD_SYS_SET_RESOURCE (HFI_CMD_SYS_COMMON_START + 0x003)
+#define HFI_CMD_SYS_RELEASE_RESOURCE (HFI_CMD_SYS_COMMON_START + 0x004)
+#define HFI_CMD_SYS_SET_PROPERTY (HFI_CMD_SYS_COMMON_START + 0x005)
+#define HFI_CMD_SYS_GET_PROPERTY (HFI_CMD_SYS_COMMON_START + 0x006)
+#define HFI_CMD_SYS_SESSION_INIT (HFI_CMD_SYS_COMMON_START + 0x007)
+#define HFI_CMD_SYS_SESSION_END (HFI_CMD_SYS_COMMON_START + 0x008)
+#define HFI_CMD_SYS_SET_BUFFERS (HFI_CMD_SYS_COMMON_START + 0x009)
+#define HFI_CMD_SYS_SESSION_ABORT (HFI_CMD_SYS_COMMON_START + 0x00A)
+#define HFI_CMD_SYS_TEST_START (HFI_CMD_SYS_COMMON_START + 0x100)
+#define HFI_MSG_SYS_COMMON_START \
+ (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + \
+ HFI_MSG_START_OFFSET + 0x0000)
+#define HFI_MSG_SYS_INIT_DONE (HFI_MSG_SYS_COMMON_START + 0x1)
+#define HFI_MSG_SYS_PC_PREP_DONE (HFI_MSG_SYS_COMMON_START + 0x2)
+#define HFI_MSG_SYS_RELEASE_RESOURCE (HFI_MSG_SYS_COMMON_START + 0x3)
+#define HFI_MSG_SYS_DEBUG (HFI_MSG_SYS_COMMON_START + 0x4)
+#define HFI_MSG_SYS_SESSION_INIT_DONE (HFI_MSG_SYS_COMMON_START + 0x6)
+#define HFI_MSG_SYS_SESSION_END_DONE (HFI_MSG_SYS_COMMON_START + 0x7)
+#define HFI_MSG_SYS_IDLE (HFI_MSG_SYS_COMMON_START + 0x8)
+#define HFI_MSG_SYS_COV (HFI_MSG_SYS_COMMON_START + 0x9)
+#define HFI_MSG_SYS_PROPERTY_INFO (HFI_MSG_SYS_COMMON_START + 0xA)
+#define HFI_MSG_SYS_SESSION_ABORT_DONE (HFI_MSG_SYS_COMMON_START + 0xC)
+#define HFI_MSG_SESSION_SYNC_DONE (HFI_MSG_SESSION_OX_START + 0xD)
+#define HFI_MSG_SESSION_COMMON_START \
+#define HFI_MSG_EVENT_NOTIFY (HFI_MSG_SESSION_COMMON_START + 0x1)
+#define HFI_MSG_SESSION_GET_SEQUENCE_HEADER_DONE \
+ (HFI_MSG_SESSION_COMMON_START + 0x2)
+#define HFI_CMD_SYS_TEST_SSR (HFI_CMD_SYS_TEST_START + 0x1)
+#define HFI_TEST_SSR_SW_ERR_FATAL 0x1
+#define HFI_TEST_SSR_SW_DIV_BY_ZERO 0x2
+#define HFI_TEST_SSR_HW_WDOG_IRQ 0x3
+struct cvp_hal_cmd_pkt_hdr {
+struct cvp_hal_msg_pkt_hdr {
+ u32 packet;
+struct cvp_hal_session_cmd_pkt {
+struct cvp_hfi_cmd_sys_init_packet {
+ u32 arch_type;
+struct cvp_hfi_cmd_sys_pc_prep_packet {
+struct cvp_hfi_cmd_sys_set_resource_packet {
+ u32 resource_handle;
+ u32 resource_type;
+ u32 rg_resource_data[1];
+struct cvp_hfi_cmd_sys_release_resource_packet {
+struct cvp_hfi_cmd_sys_set_property_packet {
+struct cvp_hfi_cmd_sys_get_property_packet {
+struct cvp_hfi_cmd_sys_session_init_packet {
+ u32 session_type;
+ u32 session_kmask;
+ u32 session_prio;
+ u32 is_secure;
+ u32 dsp_ac_mask;
+struct cvp_hfi_cmd_sys_session_end_packet {
+struct cvp_hfi_cmd_sys_set_buffers_packet {
+ u32 buffer_type;
+ u32 buffer_size;
+ u32 num_buffers;
+ u32 rg_buffer_addr[1];
+struct cvp_hfi_cmd_sys_set_ubwc_config_packet_type {
+ struct {
+ u32 max_channel_override : 1;
+ u32 mal_length_override : 1;
+ u32 hb_override : 1;
+ u32 bank_swzl_level_override : 1;
+ u32 bank_spreading_override : 1;
+ u32 reserved : 27;
+ } override_bit_info;
+ u32 max_channels;
+ u32 mal_length;
+ u32 highest_bank_bit;
+ u32 bank_swzl_level;
+ u32 bank_spreading;
+ u32 reserved[2];
+struct cvp_hfi_cmd_session_set_property_packet {
+struct cvp_hfi_client {
+ u32 transaction_id;
+ u32 data1;
+ u32 data2;
+ u64 kdata;
+ u32 reserved1;
+ u32 reserved2;
+} __packed;
+struct cvp_hfi_buf_type {
+ u32 iova;
+ u32 offset;
+ u32 fence_type;
+ u32 input_handle;
+ u32 output_handle;
+struct cvp_hfi_cmd_session_set_buffers_packet {
+ struct cvp_hfi_client client_data;
+ struct cvp_hfi_buf_type buf_type;
+struct cvp_session_release_buffers_packet {
+ u32 kernel_type;
+ u32 buffer_idx;
+struct cvp_hfi_cmd_session_hdr {
+ u32 stream_idx;
+struct cvp_hfi_msg_session_hdr {
+struct cvp_hfi_dumpmsg_session_hdr {
+ u32 dump_offset;
+ u32 dump_size;
+#define HFI_MAX_HW_ACTIVATIONS_PER_FRAME (6)
+enum hfi_hw_thread {
+ HFI_HW_FDU,
+ HFI_HW_MPU,
+ HFI_HW_OD,
+ HFI_HW_ICA,
+ HFI_HW_VADL,
+ HFI_HW_TOF,
+ HFI_HW_RGE,
+ HFI_HW_XRA,
+ HFI_HW_LSR,
+ HFI_MAX_HW_THREADS
+struct cvp_hfi_msg_session_hdr_ext {
+ u32 busy_cycles;
+ u32 total_cycles;
+ u32 hw_cycles[HFI_MAX_HW_THREADS][HFI_MAX_HW_ACTIVATIONS_PER_FRAME];
+ u32 fw_cycles[HFI_MAX_HW_ACTIVATIONS_PER_FRAME];
+struct cvp_hfi_buffer_mapping_type {
+ u32 index;
+ u32 device_addr;
+struct cvp_hfi_cmd_session_sync_process_packet {
+ u32 sync_id;
+ u32 rg_data[1];
+struct cvp_hfi_msg_event_notify_packet {
+ u32 event_id;
+ u32 event_data1;
+ u32 event_data2;
+ u32 rg_ext_event_data[1];
+struct cvp_hfi_msg_session_op_cfg_packet {
+ u32 op_conf_id;
+struct cvp_hfi_msg_sys_init_done_packet {
+struct cvp_hfi_msg_sys_pc_prep_done_packet {
+struct cvp_hfi_msg_sys_release_resource_done_packet {
+struct cvp_hfi_msg_sys_session_init_done_packet {
+struct cvp_hfi_msg_sys_session_end_done_packet {
+struct cvp_hfi_msg_session_get_sequence_header_done_packet {
+ u32 header_len;
+ u32 sequence_header;
+struct cvp_hfi_msg_sys_debug_packet {
+ u32 msg_type;
+ u32 msg_size;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u8 rg_msg_data[1];
+struct cvp_hfi_packet_header {
+struct cvp_hfi_sfr_struct {
+ u32 bufSize;
+ u8 rg_data[1];
+struct cvp_hfi_cmd_sys_test_ssr_packet {
+ u32 trigger_type;
+struct cvp_hfi_msg_sys_session_ctrl_done_packet {
@@ -0,0 +1,311 @@
+#ifndef __CVP_HFI_IO_H__
+#define __CVP_HFI_IO_H__
+#define CVP_TOP_BASE_OFFS 0x00000000
+#define CVP_SS_IDLE_STATUS (CVP_TOP_BASE_OFFS + 0x50)
+#define CVP_CPU_BASE_OFFS 0x000A0000
+#define CVP_AON_BASE_OFFS 0x000E0000
+#define CVP_CPU_CS_A2HSOFTINTEN (CVP_CPU_BASE_OFFS + 0x10)
+#define CVP_CPU_CS_A2HSOFTINTENCLR (CVP_CPU_BASE_OFFS + 0x14)
+#define CVP_CPU_CS_A2HSOFTINT (CVP_CPU_BASE_OFFS + 0x18)
+#define CVP_CPU_CS_A2HSOFTINTCLR (CVP_CPU_BASE_OFFS + 0x1C)
+#define CVP_CPU_CS_VMIMSG (CVP_CPU_BASE_OFFS + 0x34)
+#define CVP_CPU_CS_VMIMSGAG0 (CVP_CPU_BASE_OFFS + 0x38)
+#define CVP_CPU_CS_VMIMSGAG1 (CVP_CPU_BASE_OFFS + 0x3C)
+#define CVP_CPU_CS_VMIMSGAG2 (CVP_CPU_BASE_OFFS + 0x40)
+#define CVP_CPU_CS_VMIMSGAG3 (CVP_CPU_BASE_OFFS + 0x44)
+#define CVP_CPU_CS_SCIACMD (CVP_CPU_BASE_OFFS + 0x48)
+#define CVP_CPU_CS_AXI4_QOS (CVP_CPU_BASE_OFFS + 0x13C)
+#define CVP_CPU_CS_H2XSOFTINTEN (CVP_CPU_BASE_OFFS + 0x148)
+/* CVP_CTRL_STATUS */
+#define CVP_CPU_CS_SCIACMDARG0 (CVP_CPU_BASE_OFFS + 0x4C)
+#define CVP_CPU_CS_SCIACMDARG0_BMSK 0xff
+#define CVP_CPU_CS_SCIACMDARG0_SHFT 0x0
+#define CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_BMSK 0xfe
+#define CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_SHFT 0x1
+#define CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_STATUS_BMSK 0x1
+#define CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_STATUS_SHFT 0x0
+#define CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY 0x100
+/* HFI_QTBL_INFO */
+#define CVP_CPU_CS_SCIACMDARG1 (CVP_CPU_BASE_OFFS + 0x50)
+/* HFI_QTBL_ADDR */
+#define CVP_CPU_CS_SCIACMDARG2 (CVP_CPU_BASE_OFFS + 0x54)
+/* HFI_VERSION_INFO */
+#define CVP_CPU_CS_SCIACMDARG3 (CVP_CPU_BASE_OFFS + 0x58)
+/* CVP_SFR_ADDR */
+#define CVP_CPU_CS_SCIBCMD (CVP_CPU_BASE_OFFS + 0x5C)
+/* CVP_MMAP_ADDR */
+#define CVP_CPU_CS_SCIBCMDARG0 (CVP_CPU_BASE_OFFS + 0x60)
+/* CVP_UC_REGION_ADDR */
+#define CVP_CPU_CS_SCIBARG1 (CVP_CPU_BASE_OFFS + 0x64)
+#define CVP_CPU_CS_SCIBARG2 (CVP_CPU_BASE_OFFS + 0x68)
+#define CVP_CPU_CS_SCIBARG3 (CVP_CPU_BASE_OFFS + 0x6C)
+#define CVP_CPU_CS_H2ASOFTINTEN (CVP_CPU_BASE_OFFS + 0x148)
+#define CVP_CPU_CS_H2ASOFTINTENCLR (CVP_CPU_BASE_OFFS + 0x14c)
+#define CVP_CPU_CS_H2ASOFTINT (CVP_CPU_BASE_OFFS + 0x150)
+#define CVP_CPU_CS_H2ASOFTINTCLR (CVP_CPU_BASE_OFFS + 0x154)
+#define CVP_AHB_BRIDGE_SYNC_RESET (CVP_CPU_BASE_OFFS + 0x160)
+/* FAL10 Feature Control */
+#define CVP_CPU_CS_X2RPMh (CVP_CPU_BASE_OFFS + 0x168)
+#define CVP_CPU_CS_X2RPMh_MASK0_BMSK 0x1
+#define CVP_CPU_CS_X2RPMh_MASK0_SHFT 0x0
+#define CVP_CPU_CS_X2RPMh_MASK1_BMSK 0x2
+#define CVP_CPU_CS_X2RPMh_MASK1_SHFT 0x1
+#define CVP_CPU_CS_X2RPMh_SWOVERRIDE_BMSK 0x4
+#define CVP_CPU_CS_X2RPMh_SWOVERRIDE_SHFT 0x3
+#define CVP_CPU_CS_X2RPMh_STATUS (CVP_CPU_BASE_OFFS + 0x170)
+ * --------------------------------------------------------------------------
+ * MODULE: cvp_wrapper
+#define CVP_WRAPPER_BASE_OFFS 0x000B0000
+#define CVP_WRAPPER_HW_VERSION (CVP_WRAPPER_BASE_OFFS + 0x00)
+#define CVP_WRAPPER_HW_VERSION_MAJOR_VERSION_MASK 0x78000000
+#define CVP_WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT 28
+#define CVP_WRAPPER_HW_VERSION_MINOR_VERSION_MASK 0xFFF0000
+#define CVP_WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT 16
+#define CVP_WRAPPER_HW_VERSION_STEP_VERSION_MASK 0xFFFF
+#define CVP_WRAPPER_INTR_STATUS (CVP_WRAPPER_BASE_OFFS + 0x0C)
+#define CVP_WRAPPER_INTR_STATUS_A2HWD_BMSK 0x8
+#define CVP_WRAPPER_INTR_STATUS_A2H_BMSK 0x4
+#define CVP_SS_IRQ_MASK (CVP_TOP_BASE_OFFS + 0x04)
+#define CVP_SS_INTR_BMASK (0x100)
+#define CVP_WRAPPER_INTR_MASK (CVP_WRAPPER_BASE_OFFS + 0x10)
+#define CVP_FATAL_INTR_BMSK (CVP_WRAPPER_INTR_MASK_CPU_NOC_BMSK | \
+ CVP_WRAPPER_INTR_MASK_CORE_NOC_BMSK )
+#define CVP_WRAPPER_INTR_MASK_CPU_NOC_BMSK 0x40
+#define CVP_WRAPPER_INTR_MASK_CORE_NOC_BMSK 0x20
+#define CVP_WRAPPER_INTR_MASK_A2HWD_BMSK 0x8
+#define CVP_WRAPPER_INTR_MASK_A2HCPU_BMSK 0x4
+#define CVP_WRAPPER_INTR_MASK_A2HCPU_SHFT 0x2
+#define CVP_WRAPPER_INTR_CLEAR (CVP_WRAPPER_BASE_OFFS + 0x14)
+#define CVP_WRAPPER_TZ_BASE_OFFS 0x000C0000
+#define CVP_WRAPPER_TZ_CPU_CLOCK_CONFIG (CVP_WRAPPER_TZ_BASE_OFFS)
+#define CVP_WRAPPER_INTR_CLEAR_A2HWD_BMSK 0x10
+#define CVP_WRAPPER_INTR_CLEAR_A2HWD_SHFT 0x4
+#define CVP_WRAPPER_INTR_CLEAR_A2H_BMSK 0x4
+#define CVP_WRAPPER_INTR_CLEAR_A2H_SHFT 0x2
+#define CVP_WRAPPER_CPU_STATUS (CVP_WRAPPER_TZ_BASE_OFFS + 0x10)
+#define CVP_WRAPPER_AXI_CLOCK_CONFIG (CVP_WRAPPER_TZ_BASE_OFFS + 0x14)
+#define CVP_WRAPPER_QNS4PDXFIFO_RESET (CVP_WRAPPER_TZ_BASE_OFFS + 0x18)
+#define CVP_WRAPPER_CPU_CGC_DIS (CVP_WRAPPER_BASE_OFFS + 0x2010)
+#define CVP_WRAPPER_CPU_CLOCK_CONFIG (CVP_WRAPPER_BASE_OFFS + 0x50)
+#define CVP_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL (CVP_WRAPPER_BASE_OFFS + 0x54)
+#define CVP_WRAPPER_DEBUG_BRIDGE_LPI_STATUS (CVP_WRAPPER_BASE_OFFS + 0x58)
+#define CVP_WRAPPER_CPU_NOC_LPI_CONTROL (CVP_WRAPPER_BASE_OFFS + 0x5C)
+#define CVP_WRAPPER_CPU_NOC_LPI_STATUS (CVP_WRAPPER_BASE_OFFS + 0x60)
+#define CVP_WRAPPER_CORE_CLOCK_CONFIG (CVP_WRAPPER_BASE_OFFS + 0x88)
+#define CVP_CTRL_INIT CVP_CPU_CS_SCIACMD
+#define CVP_CTRL_STATUS CVP_CPU_CS_SCIACMDARG0
+#define CVP_CTRL_INIT_STATUS__M \
+ CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_STATUS_BMSK
+#define CVP_CTRL_ERROR_STATUS__M \
+ CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_ERROR_STATUS_BMSK
+#define CVP_CTRL_INIT_IDLE_MSG_BMSK \
+ CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_INIT_IDLE_MSG_BMSK
+#define CVP_CTRL_STATUS_PC_READY \
+ CVP_CPU_CS_SCIACMDARG0_HFI_CTRL_PC_READY
+#define CVP_QTBL_INFO CVP_CPU_CS_SCIACMDARG1
+#define CVP_QTBL_ADDR CVP_CPU_CS_SCIACMDARG2
+#define CVP_VERSION_INFO CVP_CPU_CS_SCIACMDARG3
+#define CVP_SFR_ADDR CVP_CPU_CS_SCIBCMD
+#define CVP_MMAP_ADDR CVP_CPU_CS_SCIBCMDARG0
+#define CVP_UC_REGION_ADDR CVP_CPU_CS_SCIBARG1
+#define CVP_UC_REGION_SIZE CVP_CPU_CS_SCIBARG2
+/* HFI_DSP_QTBL_ADDR
+ * 31:3 - HFI_DSP_QTBL_ADDR
+ * 4-byte aligned Address
+#define HFI_DSP_QTBL_ADDR CVP_CPU_CS_VMIMSG
+/* HFI_DSP_UC_REGION_ADDR
+ * 31:20 - HFI_DSP_UC_REGION_ADDR
+ * 1MB aligned address.
+ * Uncached Region start Address. This region covers
+ * HFI DSP QTable,
+ * HFI DSP Queue Headers,
+ * HFI DSP Queues,
+#define HFI_DSP_UC_REGION_ADDR CVP_CPU_CS_VMIMSGAG0
+/* HFI_DSP_UC_REGION_SIZE
+ * 31:20 - HFI_DSP_UC_REGION_SIZE
+ * Multiples of 1MB.
+ * Size of the DSP_UC_REGION Uncached Region
+#define HFI_DSP_UC_REGION_SIZE CVP_CPU_CS_VMIMSGAG1
+ * MODULE: vcodec noc error log registers
+#define CVP_NOC_BASE_OFFS 0x000D0000
+#define CVP_NOC_ERR_SWID_LOW_OFFS (CVP_NOC_BASE_OFFS + 0x0)
+#define CVP_NOC_ERR_SWID_HIGH_OFFS (CVP_NOC_BASE_OFFS + 0x4)
+#define CVP_NOC_ERR_MAINCTL_LOW_OFFS (CVP_NOC_BASE_OFFS + 0x8)
+#define CVP_NOC_ERR_ERRVLD_LOW_OFFS (CVP_NOC_BASE_OFFS + 0x10)
+#define CVP_NOC_ERR_ERRCLR_LOW_OFFS (CVP_NOC_BASE_OFFS + 0x18)
+#define CVP_NOC_ERR_ERRLOG0_LOW_OFFS (CVP_NOC_BASE_OFFS + 0x20)
+#define CVP_NOC_ERR_ERRLOG0_HIGH_OFFS (CVP_NOC_BASE_OFFS + 0x24)
+#define CVP_NOC_ERR_ERRLOG1_LOW_OFFS (CVP_NOC_BASE_OFFS + 0x28)
+#define CVP_NOC_ERR_ERRLOG1_HIGH_OFFS (CVP_NOC_BASE_OFFS + 0x2C)
+#define CVP_NOC_ERR_ERRLOG2_LOW_OFFS (CVP_NOC_BASE_OFFS + 0x30)
+#define CVP_NOC_ERR_ERRLOG2_HIGH_OFFS (CVP_NOC_BASE_OFFS + 0x34)
+#define CVP_NOC_ERR_ERRLOG3_LOW_OFFS (CVP_NOC_BASE_OFFS + 0x38)
+#define CVP_NOC_ERR_ERRLOG3_HIGH_OFFS (CVP_NOC_BASE_OFFS + 0x3C)
+#define CVP_NOC_SBM_FAULTINEN0_LOW (CVP_NOC_BASE_OFFS + 0x240)
+#define CVP_NOC_SBM_FAULTINSTATUS0_LOW (CVP_NOC_BASE_OFFS + 0x248)
+#define CVP_NOC_SBM_SENSELN0_LOW (CVP_NOC_BASE_OFFS + 0x300)
+#define CVP_NOC_CORE_BASE_OFFS 0x00010000
+#define CVP_NOC_RGE_NIU_DECCTL_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3108)
+#define CVP_NOC_RGE_NIU_ENCCTL_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3188)
+#define CVP_NOC_GCE_VADL_TOF_NIU_DECCTL_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3508)
+#define CVP_NOC_GCE_VADL_TOF_NIU_ENCCTL_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3588)
+#define CVP_NOC_MAIN_SIDEBANDMANAGER_FAULTINEN0_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x0240)
+#define CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN0_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x0300)
+#define CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN0_HIGH \
+ (CVP_NOC_CORE_BASE_OFFS + 0x0304)
+#define CVP_NOC_MAIN_SIDEBANDMANAGER_SENSELN1_HIGH \
+ (CVP_NOC_CORE_BASE_OFFS + 0x030C)
+#define CVP_NOC_CORE_ERR_SWID_LOW_OFFS \
+ (CVP_NOC_CORE_BASE_OFFS + 0x0000)
+#define CVP_NOC_CORE_ERR_SWID_HIGH_OFFS \
+ (CVP_NOC_CORE_BASE_OFFS + 0x0004)
+#define CVP_NOC_CORE_ERR_MAINCTL_LOW_OFFS \
+ (CVP_NOC_CORE_BASE_OFFS + 0x0008)
+#define CVP_NOC_CORE_ERR_ERRVLD_LOW_OFFS \
+ (CVP_NOC_CORE_BASE_OFFS + 0x0010)
+#define CVP_NOC_CORE_ERR_ERRCLR_LOW_OFFS \
+ (CVP_NOC_CORE_BASE_OFFS + 0x0018)
+#define CVP_NOC_CORE_ERR_ERRLOG0_LOW_OFFS \
+ (CVP_NOC_CORE_BASE_OFFS + 0x0020)
+#define CVP_NOC_CORE_ERR_ERRLOG0_HIGH_OFFS \
+ (CVP_NOC_CORE_BASE_OFFS + 0x0024)
+#define CVP_NOC_CORE_ERR_ERRLOG1_LOW_OFFS \
+ (CVP_NOC_CORE_BASE_OFFS + 0x0028)
+#define CVP_NOC_CORE_ERR_ERRLOG1_HIGH_OFFS \
+ (CVP_NOC_CORE_BASE_OFFS + 0x002C)
+#define CVP_NOC_CORE_ERR_ERRLOG2_LOW_OFFS \
+ (CVP_NOC_CORE_BASE_OFFS + 0x0030)
+#define CVP_NOC_CORE_ERR_ERRLOG2_HIGH_OFFS \
+ (CVP_NOC_CORE_BASE_OFFS + 0x0034)
+#define CVP_NOC_CORE_ERR_ERRLOG3_LOW_OFFS \
+ (CVP_NOC_CORE_BASE_OFFS + 0x0038)
+#define CVP_NOC_CORE_ERR_ERRLOG3_HIGH_OFFS \
+ (CVP_NOC_CORE_BASE_OFFS + 0x003C)
+#define CVP_NOC_RCG_VNOC_NOC_CLK_FORCECLOCKON_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x2018)
+/* NoC QoS registers */
+#define CVP_NOC_RGE_PRIORITYLUT_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3030)
+#define CVP_NOC_RGE_PRIORITYLUT_HIGH \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3034)
+#define CVP_NOC_RGE_URGENCY_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3038)
+#define CVP_NOC_RGE_DANGERLUT_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3040)
+#define CVP_NOC_RGE_SAFELUT_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3048)
+#define CVP_NOC_GCE_PRIORITYLUT_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3430)
+#define CVP_NOC_GCE_PRIORITYLUT_HIGH \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3434)
+#define CVP_NOC_GCE_URGENCY_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3438)
+#define CVP_NOC_GCE_DANGERLUT_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3440)
+#define CVP_NOC_GCE_SAFELUT_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3448)
+#define CVP_NOC_CDM_PRIORITYLUT_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3830)
+#define CVP_NOC_CDM_PRIORITYLUT_HIGH \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3834)
+#define CVP_NOC_CDM_URGENCY_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3838)
+#define CVP_NOC_CDM_DANGERLUT_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3840)
+#define CVP_NOC_CDM_SAFELUT_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0x3848)
+/* End of NoC Qos */
+#define CVP_NOC_RCGCONTROLLER_MAINCTL_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0xC008)
+#define CVP_NOC_RCGCONTROLLER_HYSTERESIS_LOW \
+ (CVP_NOC_CORE_BASE_OFFS + 0xC010)
+#define CVP_NOC_RESET_REQ \
+ (CVP_NOC_CORE_BASE_OFFS + 0xf000)
+#define CVP_NOC_RESET_ACK \
+ (CVP_NOC_CORE_BASE_OFFS + 0xf004)
+#define CVP_AON_WRAPPER_CVP_NOC_LPI_CONTROL (CVP_AON_BASE_OFFS + 0x8)
+#define CVP_AON_WRAPPER_CVP_NOC_LPI_STATUS (CVP_AON_BASE_OFFS + 0xC)
+#define CVP_AON_WRAPPER_CVP_NOC_ARCG_CONTROL (CVP_AON_BASE_OFFS + 0x14)
+#define CVP_AON_WRAPPER_CVP_NOC_CORE_CLK_CONTROL (CVP_AON_BASE_OFFS + 0x24)
+#define CVP_AON_WRAPPER_CVP_NOC_CORE_SW_RESET (CVP_AON_BASE_OFFS + 0x1C)
+#define CVP_AON_WRAPPER_SPARE (CVP_AON_BASE_OFFS + 0x28)
+#define CVP_CC_BASE_OFFS 0xF8000
+#define CVP_CC_MVS1C_GDSCR (CVP_CC_BASE_OFFS + 0x78)
+#define CVP_CC_MVS1C_CBCR (CVP_CC_BASE_OFFS + 0x90)
+#define CVP_CC_MVS1_GDSCR (CVP_CC_BASE_OFFS + 0xCC)
+#define CVP_CC_MVS1_CBCR (CVP_CC_BASE_OFFS + 0xE0)
+#define CVP_CC_AHB_CBCR (CVP_CC_BASE_OFFS + 0xF4)
+#define CVP_CC_XO_CBCR (CVP_CC_BASE_OFFS + 0x124)
+#define CVP_CC_SLEEP_CBCR (CVP_CC_BASE_OFFS + 0x150)
+#define CVP_GCC_VIDEO_AXI1_CBCR (0x22024)
@@ -0,0 +1,343 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+#include "cvp_power.h"
+static inline int find_max(unsigned long *array, unsigned int num)
+ int i, max = 0;
+ for (i = 0; i < num; i++)
+ max = array[i] > max ? array[i] : max;
+ return max;
+static bool is_subblock_profile_existed(struct msm_cvp_inst *inst)
+ return (inst->prop.cycles[HFI_HW_OD] ||
+ inst->prop.cycles[HFI_HW_MPU] ||
+ inst->prop.cycles[HFI_HW_FDU] ||
+ inst->prop.cycles[HFI_HW_ICA] ||
+ inst->prop.cycles[HFI_HW_VADL] ||
+ inst->prop.cycles[HFI_HW_TOF] ||
+ inst->prop.cycles[HFI_HW_RGE] ||
+ inst->prop.cycles[HFI_HW_XRA] ||
+ inst->prop.cycles[HFI_HW_LSR]);
+static char hw_names[HFI_MAX_HW_THREADS][8] = {{"FDU"}, {"MPU"}, {"OD"}, {"ICA"},
+ {"VADL"}, {"TOF"}, {"RGE"}, {"XRA"},
+ {"LSR"}};
+static void aggregate_power_update(struct msm_cvp_core *core,
+ struct cvp_power_level *nrt_pwr,
+ struct cvp_power_level *rt_pwr,
+ unsigned int max_clk_rate)
+ int i, j;
+ unsigned long blocks_sum[2][HFI_MAX_HW_THREADS] = {0};
+ unsigned long fw_sum[2] = {0}, max_cycle[2] = {0}, op_max_cycle[2] = {0};
+ unsigned long op_blocks_max[2][HFI_MAX_HW_THREADS] = {0};
+ unsigned long op_fw_max[2] = {0}, bw_sum[2] = {0}, op_bw_max[2] = {0};
+ list_for_each_entry(inst, &core->instances, list) {
+ if (inst->state == MSM_CVP_CORE_INVALID ||
+ inst->state == MSM_CVP_CORE_UNINIT ||
+ !is_subblock_profile_existed(inst))
+ if (inst->prop.priority <= CVP_RT_PRIO_THRESHOLD) {
+ /* Non-realtime session use index 0 */
+ i = 0;
+ i = 1;
+ for (j = 0; j < HFI_MAX_HW_THREADS; j++)
+ if (inst->prop.cycles[j])
+ dprintk(CVP_PWR, "pwrUpdate %s %u\n",
+ hw_names[j], inst->prop.cycles[j]);
+ if (inst->prop.op_cycles[j])
+ dprintk(CVP_PWR, "pwrUpdate_OP %s %u\n",
+ hw_names[j], inst->prop.op_cycles[j]);
+ dprintk(CVP_PWR, " fw %u fw_o %u\n", inst->prop.fw_cycles,
+ inst->prop.fw_op_cycles);
+ blocks_sum[i][j] += inst->prop.cycles[j];
+ fw_sum[i] += inst->prop.fw_cycles;
+ op_blocks_max[i][j] =
+ (op_blocks_max[i][j] >= inst->prop.op_cycles[j]) ?
+ op_blocks_max[i][j] : inst->prop.op_cycles[j];
+ op_fw_max[i] =
+ (op_fw_max[i] >= inst->prop.fw_op_cycles) ?
+ op_fw_max[i] : inst->prop.fw_op_cycles;
+ bw_sum[i] += inst->prop.ddr_bw;
+ op_bw_max[i] =
+ (op_bw_max[i] >= inst->prop.ddr_op_bw) ?
+ op_bw_max[i] : inst->prop.ddr_op_bw;
+ for (j = 0; j < HFI_MAX_HW_THREADS; j++) {
+ if (inst->prop.fps[j])
+ dprintk(CVP_PWR, "fps %s %d ", hw_names[j],
+ inst->prop.fps[j]);
+ for (i = 0; i < 2; i++) {
+ max_cycle[i] = find_max(&blocks_sum[i][0], HFI_MAX_HW_THREADS);
+ op_max_cycle[i] = find_max(&op_blocks_max[i][0], HFI_MAX_HW_THREADS);
+ op_max_cycle[i] =
+ (op_max_cycle[i] > max_clk_rate) ?
+ max_clk_rate : op_max_cycle[i];
+ bw_sum[i] = (bw_sum[i] >= op_bw_max[i]) ?
+ bw_sum[i] : op_bw_max[i];
+ nrt_pwr->core_sum += max_cycle[0];
+ nrt_pwr->op_core_sum = (nrt_pwr->op_core_sum >= op_max_cycle[0]) ?
+ nrt_pwr->op_core_sum : op_max_cycle[0];
+ nrt_pwr->bw_sum += bw_sum[0];
+ rt_pwr->core_sum += max_cycle[1];
+ rt_pwr->op_core_sum = (rt_pwr->op_core_sum >= op_max_cycle[1]) ?
+ rt_pwr->op_core_sum : op_max_cycle[1];
+ rt_pwr->bw_sum += bw_sum[1];
+ * adjust_bw_freqs(): calculate CVP clock freq and bw required to sustain
+ * required use case.
+ * Bandwidth vote will be best-effort, not returning error if the request
+ * b/w exceeds max limit.
+ * Clock vote from non-realtime sessions will be best effort, not returning
+ * error if the aggreated session clock request exceeds max limit.
+ * Clock vote from realtime session will be hard request. If aggregated
+ * session clock request exceeds max limit, the function will return
+ * error.
+ * Ensure caller acquires clk_lock!
+static int adjust_bw_freqs(unsigned int max_bw, unsigned int min_bw)
+ struct iris_hfi_device *hdev;
+ struct allowed_clock_rates_table *tbl = NULL;
+ unsigned int tbl_size;
+ unsigned int cvp_min_rate, cvp_max_rate;
+ struct cvp_power_level rt_pwr = {0}, nrt_pwr = {0};
+ unsigned long tmp, core_sum, op_core_sum, bw_sum;
+ hdev = core->dev_ops->hfi_device_data;
+ tbl = core->resources.allowed_clks_tbl;
+ tbl_size = core->resources.allowed_clks_tbl_size;
+ cvp_min_rate = tbl[0].clock_rate;
+ cvp_max_rate = tbl[tbl_size - 1].clock_rate;
+ aggregate_power_update(core, &nrt_pwr, &rt_pwr, cvp_max_rate);
+ dprintk(CVP_PWR, "PwrUpdate nrt %u %u rt %u %u\n",
+ nrt_pwr.core_sum, nrt_pwr.op_core_sum,
+ rt_pwr.core_sum, rt_pwr.op_core_sum);
+ if (rt_pwr.core_sum > cvp_max_rate) {
+ dprintk(CVP_WARN, "%s clk vote out of range %lld\n",
+ __func__, rt_pwr.core_sum);
+ core_sum = rt_pwr.core_sum + nrt_pwr.core_sum;
+ op_core_sum = (rt_pwr.op_core_sum >= nrt_pwr.op_core_sum) ?
+ rt_pwr.op_core_sum : nrt_pwr.op_core_sum;
+ core_sum = (core_sum >= op_core_sum) ?
+ core_sum : op_core_sum;
+ if (core_sum > cvp_max_rate) {
+ core_sum = cvp_max_rate;
+ } else if (core_sum <= cvp_min_rate) {
+ core_sum = cvp_min_rate;
+ for (i = 1; i < tbl_size; i++)
+ if (core_sum <= tbl[i].clock_rate)
+ core_sum = tbl[i].clock_rate;
+ bw_sum = rt_pwr.bw_sum + nrt_pwr.bw_sum;
+ bw_sum = bw_sum >> 10;
+ bw_sum = (bw_sum > max_bw) ? max_bw : bw_sum;
+ bw_sum = (bw_sum < min_bw) ? min_bw : bw_sum;
+ dprintk(CVP_PWR, "%s %lld %lld\n", __func__,
+ core_sum, bw_sum);
+ tmp = core->curr_freq;
+ core->curr_freq = core_sum;
+ core->orig_core_sum = tmp;
+ hdev->clk_freq = core->curr_freq;
+ core->bw_sum = bw_sum;
+int msm_cvp_update_power(struct msm_cvp_inst *inst)
+ struct msm_cvp_inst *s;
+ struct clock_set *clocks;
+ struct clock_info *cl;
+ int bus_count = 0;
+ unsigned int max_bw = 0, min_bw = 0;
+ s = cvp_get_inst_validate(inst->core, inst);
+ if (!s)
+ core = inst->core;
+ if (!core || core->state == CVP_CORE_UNINIT) {
+ goto adjust_exit;
+ clocks = &core->resources.clock_set;
+ cl = &clocks->clock_tbl[clocks->count - 1];
+ if (!cl->has_scaling) {
+ dprintk(CVP_ERR, "Cannot scale CVP clock\n");
+ for (bus_count = 0; bus_count < core->resources.bus_set.count; bus_count++) {
+ if (!strcmp(core->resources.bus_set.bus_tbl[bus_count].name, "cvp-ddr")) {
+ bus = &core->resources.bus_set.bus_tbl[bus_count];
+ max_bw = bus->range[1];
+ min_bw = max_bw/10;
+ if (!bus) {
+ dprintk(CVP_ERR, "bus node is NULL for cvp-ddr\n");
+ mutex_lock(&core->clk_lock);
+ rc = adjust_bw_freqs(max_bw, min_bw);
+ mutex_unlock(&core->clk_lock);
+ rc = msm_cvp_set_clocks(core);
+ "Failed to set clock rate %u %s: %d %s\n",
+ core->curr_freq, cl->name, rc, __func__);
+ core->curr_freq = core->orig_core_sum;
+ rc = msm_cvp_set_bw(core, bus, core->bw_sum);
+adjust_exit:
+ cvp_put_inst(s);
+unsigned int msm_cvp_get_hw_aggregate_cycles(enum hfi_hw_thread hwblk)
+ unsigned long cycles_sum = 0;
+ dprintk(CVP_ERR, "%s: invalid core\n", __func__);
+ switch (hwblk) {
+ case HFI_HW_FDU:
+ cycles_sum += inst->prop.cycles[HFI_HW_FDU];
+ case HFI_HW_ICA:
+ cycles_sum += inst->prop.cycles[HFI_HW_ICA];
+ case HFI_HW_MPU:
+ cycles_sum += inst->prop.cycles[HFI_HW_MPU];
+ case HFI_HW_OD:
+ cycles_sum += inst->prop.cycles[HFI_HW_OD];
+ case HFI_HW_VADL:
+ cycles_sum += inst->prop.cycles[HFI_HW_VADL];
+ case HFI_HW_TOF:
+ cycles_sum += inst->prop.cycles[HFI_HW_TOF];
+ case HFI_HW_RGE:
+ cycles_sum += inst->prop.cycles[HFI_HW_RGE];
+ case HFI_HW_XRA:
+ cycles_sum += inst->prop.cycles[HFI_HW_XRA];
+ case HFI_HW_LSR:
+ cycles_sum += inst->prop.cycles[HFI_HW_LSR];
+ dprintk(CVP_ERR, "unrecognized hw block %d\n",
+ hwblk);
+ cycles_sum = cycles_sum&0xFFFFFFFF;
+ return (unsigned int)cycles_sum;
@@ -0,0 +1,23 @@
+#ifndef _CVP_POWER_H_
+#define _CVP_POWER_H_
+struct cvp_power_level {
+ unsigned long core_sum;
+ unsigned long op_core_sum;
+ unsigned long bw_sum;
+int msm_cvp_update_power(struct msm_cvp_inst *inst);
+unsigned int msm_cvp_get_hw_aggregate_cycles(enum hfi_hw_thread hwblk);
+#ifndef _MSM_V4L2_PRIVATE_H_
+#define _MSM_V4L2_PRIVATE_H_
+#include <media/msm_eva_private.h>
+long cvp_unblocked_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
+long cvp_compat_ioctl(struct file *filp,
@@ -0,0 +1,663 @@
+ * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/dma-direction.h>
+#include <linux/msm_dma_iommu_mapping.h>
+#include <soc/qcom/secure_buffer.h>
+#include <linux/mem-buf.h>
+#include <linux/qcom-dma-mapping.h>
+static void * __cvp_dma_buf_vmap(struct dma_buf *dbuf)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0))
+ struct dma_buf_map map;
+ struct iosys_map map;
+ void *dma_map;
+ int err;
+ err = dma_buf_vmap(dbuf, &map);
+ dma_map = err ? NULL : map.vaddr;
+ if (!dma_map)
+ dprintk(CVP_ERR, "map to kvaddr failed\n");
+ return dma_map;
+static void __cvp_dma_buf_vunmap(struct dma_buf *dbuf, void *vaddr)
+ struct dma_buf_map map = { \
+ .vaddr = vaddr, \
+ .is_iomem = false, \
+ struct iosys_map map = { \
+ if (vaddr)
+ dma_buf_vunmap(dbuf, &map);
+static int msm_dma_get_device_address(struct dma_buf *dbuf, u32 align,
+ dma_addr_t *iova, u32 flags, struct msm_cvp_platform_resources *res,
+ struct cvp_dma_mapping_info *mapping_info)
+ struct dma_buf_attachment *attach;
+ struct sg_table *table = NULL;
+ struct context_bank_info *cb = NULL;
+ if (!dbuf || !iova || !mapping_info) {
+ dprintk(CVP_ERR, "Invalid params: %pK, %pK, %pK\n",
+ dbuf, iova, mapping_info);
+ if (is_iommu_present(res)) {
+ cb = msm_cvp_smem_get_context_bank(res, flags);
+ "%s: Failed to get context bank device\n",
+ rc = -EIO;
+ goto mem_map_failed;
+ /* Prepare a dma buf for dma on the given device */
+ attach = dma_buf_attach(dbuf, cb->dev);
+ if (IS_ERR_OR_NULL(attach)) {
+ rc = PTR_ERR(attach) ?: -ENOMEM;
+ dprintk(CVP_ERR, "Failed to attach dmabuf\n");
+ goto mem_buf_attach_failed;
+ dprintk(CVP_MEM, "%s: CB dev: %s, attach dev: %s, attach: 0x%lx, dbuf: 0x%lx",
+ __func__, dev_name(cb->dev), dev_name(attach->dev), attach, dbuf);
+ * Get the scatterlist for the given attachment
+ * Mapping of sg is taken care by map attachment
+ * We do not need dma_map function to perform cache operations
+ * on the whole buffer size and hence pass skip sync flag.
+ * We do the required cache operations separately for the
+ * required buffer size
+ attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+ if (flags & SMEM_CAMERA)
+ attach->dma_map_attrs |= DMA_ATTR_QTI_SMMU_PROXY_MAP;
+ if (res->sys_cache_present)
+ attach->dma_map_attrs |=
+ DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
+ table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(table)) {
+ dprintk(CVP_ERR, "Failed to map table %d\n", PTR_ERR(table));
+ "Mapping detail dma_buf 0x%llx, %s, size %#x\n",
+ dbuf, dbuf->name, dbuf->size);
+ rc = PTR_ERR(table) ?: -ENOMEM;
+ goto mem_map_table_failed;
+ if (table->sgl) {
+ *iova = table->sgl->dma_address;
+ dprintk(CVP_ERR, "sgl is NULL\n");
+ goto mem_map_sg_failed;
+ mapping_info->dev = cb->dev;
+ mapping_info->domain = cb->domain;
+ mapping_info->table = table;
+ mapping_info->attach = attach;
+ mapping_info->buf = dbuf;
+ mapping_info->cb_info = (void *)cb;
+ dprintk(CVP_MEM, "%s: sg-table: 0x%lx, dbuf: 0x%lx, table->sgl->dma_address: 0x%lx",
+ __func__, table, dbuf, table->sgl->dma_address);
+ dprintk(CVP_MEM, "iommu not present, use phys mem addr\n");
+mem_map_sg_failed:
+ dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
+mem_map_table_failed:
+ dma_buf_detach(dbuf, attach);
+mem_buf_attach_failed:
+mem_map_failed:
+static int msm_dma_put_device_address(u32 flags,
+ struct dma_buf_attachment *attach = NULL;
+ struct dma_buf *dbuf = NULL;
+ if (!mapping_info) {
+ dprintk(CVP_WARN, "Invalid mapping_info\n");
+ if (!mapping_info->dev || !mapping_info->table ||
+ !mapping_info->buf || !mapping_info->attach ||
+ !mapping_info->cb_info) {
+ dprintk(CVP_WARN, "Invalid params\n");
+ attach = mapping_info->attach;
+ table = mapping_info->table;
+ cb = (struct context_bank_info *) mapping_info->cb_info;
+ dbuf = mapping_info->buf;
+ dprintk(CVP_MEM, "%s: CB dev_name: %s, attach dev_name: %s, attach: 0x%lx, dbuf: 0x%lx",
+ dprintk(CVP_MEM, "%s: sg-table: 0x%lx, table->sgl->dma_address: 0x%lx",
+ dma_buf_unmap_attachment(mapping_info->attach,
+ mapping_info->table, DMA_BIDIRECTIONAL);
+ dma_buf_detach(mapping_info->buf, mapping_info->attach);
+ mapping_info->dev = NULL;
+ mapping_info->domain = NULL;
+ mapping_info->table = NULL;
+ mapping_info->attach = NULL;
+ mapping_info->buf = NULL;
+ mapping_info->cb_info = NULL;
+struct dma_buf *msm_cvp_smem_get_dma_buf(int fd)
+ struct dma_buf *dma_buf;
+ dma_buf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(dma_buf)) {
+ dprintk(CVP_ERR, "Failed to get dma_buf for %d, error %ld\n",
+ fd, PTR_ERR(dma_buf));
+ dma_buf = NULL;
+ return dma_buf;
+void msm_cvp_smem_put_dma_buf(void *dma_buf)
+ if (!dma_buf) {
+ dprintk(CVP_ERR, "%s: NULL dma_buf\n", __func__);
+ dma_heap_buffer_free((struct dma_buf *)dma_buf);
+int msm_cvp_map_smem(struct msm_cvp_inst *inst,
+ struct msm_cvp_smem *smem,
+ const char *str)
+ int *vmid_list;
+ int *perms_list;
+ int nelems = 0;
+ dma_addr_t iova = 0;
+ u32 temp = 0, checksum = 0;
+ u32 align = SZ_4K;
+ bool is_config_pkt = false;
+ if (!inst || !smem) {
+ dprintk(CVP_ERR, "%s: Invalid params: %pK %pK\n",
+ __func__, inst, smem);
+ dma_buf = smem->dma_buf;
+ rc = mem_buf_dma_buf_copy_vmperm(dma_buf,
+ &vmid_list, &perms_list, &nelems);
+ dprintk(CVP_ERR, "%s fail to get vmid and perms %d\n",
+ for (temp = 0; temp < nelems; temp++) {
+ if (vmid_list[temp] == VMID_CP_PIXEL)
+ smem->flags |= (SMEM_SECURE | SMEM_PIXEL);
+ else if (vmid_list[temp] == VMID_CP_NON_PIXEL)
+ smem->flags |= (SMEM_SECURE | SMEM_NON_PIXEL);
+ else if (vmid_list[temp] == VMID_CP_CAMERA ||
+ /* To-do: what if the EVA driver runs in TVM */
+ vmid_list[temp] == VMID_TVM)
+ smem->flags |= (SMEM_SECURE | SMEM_CAMERA);
+ dprintk(CVP_MEM, "inst %pK VM idx %d VM_ID %d fd %d pkt_type %#x\n",
+ inst, temp, vmid_list[temp], smem->fd, smem->pkt_type);
+ rc = msm_dma_get_device_address(dma_buf, align, &iova, smem->flags,
+ &(inst->core->resources), &smem->mapping_info);
+ dprintk(CVP_ERR, "Failed to get device address: %d\n", rc);
+ temp = (u32)iova;
+ if ((dma_addr_t)temp != iova) {
+ dprintk(CVP_ERR, "iova(%pa) truncated to %#x", &iova, temp);
+ smem->size = dma_buf->size;
+ smem->device_addr = (u32)iova;
+ i = get_pkt_index_from_type(smem->pkt_type);
+ if (i > 0 && smem->pkt_type != HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS
+ && smem->pkt_type != HFI_CMD_SESSION_CVP_SET_MODEL_BUFFERS
+ && smem->pkt_type != HFI_CMD_SESSION_EVA_DLFL_CONFIG)
+ /* User persist buffer has no feature config info */
+ is_config_pkt = cvp_hfi_defs[i].is_config_pkt;
+ if (i > 0 && cvp_hfi_defs[i].checksum_enabled) {
+ dma_buf_begin_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
+ smem->kvaddr = __cvp_dma_buf_vmap(dma_buf);
+ if (!smem->kvaddr) {
+ dprintk(CVP_WARN, "%s Fail map into kernel\n",
+ dma_buf_end_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
+ for (i = 0; i < 256; i++)
+ checksum += *(u32 *)(smem->kvaddr + i*sizeof(u32));
+ dprintk(CVP_MEM, "Map checksum %#x fd=%d\n",
+ checksum, smem->fd);
+ print_smem(CVP_MEM, str, inst, smem);
+ atomic_inc(&inst->smem_count);
+ goto success;
+ smem->device_addr = 0x0;
+success:
+ kfree(vmid_list);
+ kfree(perms_list);
+int msm_cvp_unmap_smem(struct msm_cvp_inst *inst,
+ u32 checksum = 0;
+ if (!smem) {
+ dprintk(CVP_ERR, "%s: Invalid params: %pK\n", __func__, smem);
+ dprintk(CVP_WARN, "%s DS buf Fail map into kernel\n",
+ dprintk(CVP_MEM, "Unmap checksum %#x fd=%d\n",
+ __cvp_dma_buf_vunmap(dma_buf, smem->kvaddr);
+ smem->kvaddr = 0;
+ rc = msm_dma_put_device_address(smem->flags, &smem->mapping_info);
+ dprintk(CVP_ERR, "Failed to put device address: %d\n", rc);
+ atomic_dec(&inst->smem_count);
+static int alloc_dma_mem(size_t size, u32 align, int map_kernel,
+ struct msm_cvp_platform_resources *res, struct msm_cvp_smem *mem)
+ struct dma_heap *heap = NULL;
+ struct mem_buf_lend_kernel_arg arg;
+ int vmids[1];
+ int perms[1];
+ if (!res) {
+ dprintk(CVP_ERR, "%s: NULL res\n", __func__);
+ align = ALIGN(align, SZ_4K);
+ size = ALIGN(size, SZ_4K);
+ heap = dma_heap_find("qcom,system");
+ dprintk(CVP_MEM, "%s size %zx align %d flag %d\n",
+ __func__, size, align, mem->flags);
+ "No IOMMU CB: allocate shared memory heap size %zx align %d\n",
+ size, align);
+ dbuf = dma_heap_buffer_alloc(heap, size, 0, 0);
+ if (IS_ERR_OR_NULL(dbuf)) {
+ "Failed to allocate shared memory = %x bytes, %x %x\n",
+ size, mem->flags, PTR_ERR(dbuf));
+ goto fail_shared_mem_alloc;
+ perms[0] = PERM_READ | PERM_WRITE;
+ arg.nr_acl_entries = 1;
+ arg.vmids = vmids;
+ arg.perms = perms;
+ if (mem->flags & SMEM_NON_PIXEL) {
+ vmids[0] = VMID_CP_NON_PIXEL;
+ rc = mem_buf_lend(dbuf, &arg);
+ } else if (mem->flags & SMEM_PIXEL) {
+ vmids[0] = VMID_CP_PIXEL;
+ dprintk(CVP_ERR, "Failed to lend dmabuf %d, vmid %d\n",
+ rc, vmids[0]);
+ goto fail_device_address;
+ if (!gfa_cv.dmabuf_f_op)
+ gfa_cv.dmabuf_f_op = (const struct file_operations *)dbuf->file->f_op;
+ mem->size = size;
+ mem->dma_buf = dbuf;
+ mem->kvaddr = NULL;
+ rc = msm_dma_get_device_address(dbuf, align, &iova, mem->flags,
+ res, &mem->mapping_info);
+ dprintk(CVP_ERR, "Failed to get device address: %d\n",
+ mem->device_addr = (u32)iova;
+ if ((dma_addr_t)mem->device_addr != iova) {
+ dprintk(CVP_ERR, "iova(%pa) truncated to %#x",
+ &iova, mem->device_addr);
+ if (map_kernel) {
+ dma_buf_begin_cpu_access(dbuf, DMA_BIDIRECTIONAL);
+ mem->kvaddr = __cvp_dma_buf_vmap(dbuf);
+ if (!mem->kvaddr) {
+ "Failed to map shared mem in kernel\n");
+ goto fail_map;
+ dprintk(CVP_MEM,
+ "%s: dma_buf=%pK,iova=%x,size=%d,kvaddr=%pK,flags=%#lx\n",
+ __func__, mem->dma_buf, mem->device_addr, mem->size,
+ mem->kvaddr, mem->flags);
+fail_map:
+ if (map_kernel)
+ dma_buf_end_cpu_access(dbuf, DMA_BIDIRECTIONAL);
+fail_device_address:
+ dma_heap_buffer_free(dbuf);
+fail_shared_mem_alloc:
+static int free_dma_mem(struct msm_cvp_smem *mem)
+ "%s: dma_buf = %pK, device_addr = %x, size = %d, kvaddr = %pK\n",
+ __func__, mem->dma_buf, mem->device_addr, mem->size, mem->kvaddr);
+ if (mem->device_addr) {
+ msm_dma_put_device_address(mem->flags, &mem->mapping_info);
+ mem->device_addr = 0x0;
+ if (mem->kvaddr) {
+ __cvp_dma_buf_vunmap(mem->dma_buf, mem->kvaddr);
+ dma_buf_end_cpu_access(mem->dma_buf, DMA_BIDIRECTIONAL);
+ if (mem->dma_buf) {
+ dma_heap_buffer_free(mem->dma_buf);
+ mem->dma_buf = NULL;
+int msm_cvp_smem_alloc(size_t size, u32 align, int map_kernel,
+ void *res, struct msm_cvp_smem *smem)
+ if (!smem || !size) {
+ dprintk(CVP_ERR, "%s: NULL smem or %d size\n",
+ __func__, (u32)size);
+ rc = alloc_dma_mem(size, align, map_kernel,
+ (struct msm_cvp_platform_resources *)res, smem);
+int msm_cvp_smem_free(struct msm_cvp_smem *smem)
+ dprintk(CVP_ERR, "NULL smem passed\n");
+ rc = free_dma_mem(smem);
+int msm_cvp_smem_cache_operations(struct dma_buf *dbuf,
+ enum smem_cache_ops cache_op, unsigned long offset, unsigned long size)
+ if (!dbuf) {
+ switch (cache_op) {
+ case SMEM_CACHE_CLEAN:
+ case SMEM_CACHE_CLEAN_INVALIDATE:
+ rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_BIDIRECTIONAL,
+ offset, size);
+ rc = dma_buf_end_cpu_access_partial(dbuf, DMA_BIDIRECTIONAL,
+ case SMEM_CACHE_INVALIDATE:
+ rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE,
+ rc = dma_buf_end_cpu_access_partial(dbuf, DMA_FROM_DEVICE,
+ dprintk(CVP_ERR, "%s: cache (%d) operation not supported\n",
+ __func__, cache_op);
+struct context_bank_info *msm_cvp_smem_get_context_bank(
+ unsigned int flags)
+ struct context_bank_info *cb = NULL, *match = NULL;
+ char *search_str;
+ char *non_secure_cb = "cvp_hlos";
+ char *secure_nonpixel_cb = "cvp_sec_nonpixel";
+ char *secure_pixel_cb = "cvp_sec_pixel";
+ char *camera_cb = "cvp_camera";
+ char *dsp_cb = "cvp_dsp";
+ bool is_secure = (flags & SMEM_SECURE) ? true : false;
+ if (flags & SMEM_PIXEL)
+ search_str = secure_pixel_cb;
+ else if (flags & SMEM_NON_PIXEL)
+ search_str = secure_nonpixel_cb;
+ else if (flags & SMEM_CAMERA)
+ /* Secure Camera pixel buffer */
+ search_str = camera_cb;
+ else if (flags & SMEM_CDSP)
+ search_str = dsp_cb;
+ search_str = non_secure_cb;
+ list_for_each_entry(cb, &res->context_banks, list) {
+ if (cb->is_secure == is_secure &&
+ !strcmp(search_str, cb->name)) {
+ match = cb;
+ if (!match)
+ "%s: cb not found for flags %x, is_secure %d\n",
+ __func__, flags, is_secure);
+ return match;
+int msm_cvp_map_ipcc_regs(u32 *iova)
+ struct cvp_hfi_ops *ops_tbl;
+ phys_addr_t paddr;
+ if (core) {
+ ops_tbl = core->dev_ops;
+ if (ops_tbl)
+ dev = ops_tbl->hfi_device_data;
+ paddr = dev->res->ipcc_reg_base;
+ size = dev->res->ipcc_reg_size;
+ if (!paddr || !size)
+ dprintk(CVP_ERR, "%s: fail to get context bank\n", __func__);
+ *iova = dma_map_resource(cb->dev, paddr, size, DMA_BIDIRECTIONAL, 0);
+ if (*iova == DMA_MAPPING_ERROR) {
+ dprintk(CVP_WARN, "%s: fail to map IPCC regs\n", __func__);
+ return -EFAULT;
+int msm_cvp_unmap_ipcc_regs(u32 iova)
+ if (!iova || !size)
+ dma_unmap_resource(cb->dev, iova, size, DMA_BIDIRECTIONAL, 0);
@@ -0,0 +1,86 @@
+ * SPDX-License-Identifier: GPL-2.0-only
+ * This file contains definitions that are common to UMD and KMD
+ * but shall not be added to the UAPI to allow for better UAPI
+ * backward compatibility. Identical copies of this file must be
+ * used by both UMD and KMD for desired functioning.
+#ifndef _EVA_SHARED_DEF_H_
+#define _EVA_SHARED_DEF_H_
+ * Structure corresponding to HFI_CVP_BUFFER_TYPE
+struct cvp_buf_type {
+ __s32 fd;
+ __u32 reserved1;
+ __u32 reserved2;
+ __u32 fence_type;
+ __u32 input_handle;
+ __u32 output_handle;
+ __u32 debug_flags;
+ __u32 crc;
+ * Structures and macros for Warp-NCC Out-of-Band (OOB) buffer
+#define EVA_KMD_WNCC_MAX_LAYERS 4
+#define EVA_KMD_WNCC_MAX_ADDRESSES 4095
+#define EVA_KMD_WNCC_MAX_SRC_BUFS 2400
+#define EVA_KMD_WNCC_SRC_BUF_ID_OFFSET 1
+struct eva_kmd_wncc_metadata {
+ __u64 loc_x_dec : 12;
+ __u64 loc_x_frac : 9;
+ __u64 loc_y_dec : 12;
+ __u64 loc_y_frac : 9;
+ __u64 iova_lsb : 22; /* Populated in KMD */
+ __u64 iova_msb : 10; /* Populated in KMD */
+ __u64 scale_idx : 2;
+ __s64 aff_coeff_3 : 13;
+ __s64 aff_coeff_2 : 13;
+ __s64 aff_coeff_1 : 13;
+ __s64 aff_coeff_0 : 13;
+struct eva_kmd_oob_wncc {
+ __u32 metadata_bufs_offset;
+ __u32 num_layers;
+ struct eva_kmd_wncc_layer {
+ __u32 num_addrs;
+ struct eva_kmd_wncc_addr {
+ __u32 buffer_id;
+ } addrs[EVA_KMD_WNCC_MAX_ADDRESSES];
+ } layers[EVA_KMD_WNCC_MAX_LAYERS];
+ * Structure and macros for Out-of-Band (OOB) buffer
+ * that may accompany HFI packet data
+#define EVA_KMD_OOB_INVALID 0
+#define EVA_KMD_OOB_WNCC 1
+struct eva_kmd_oob_buf {
+ __u32 oob_type;
+ struct eva_kmd_oob_wncc wncc;
@@ -0,0 +1,463 @@
+/* Set up look-up tables to convert HAL_* to HFI_*.
+ * The tables below mostly take advantage of the fact that most
+ * HAL_* types are defined bitwise. So if we index them normally
+ * when declaring the tables, we end up with huge arrays with wasted
+ * space. So before indexing them, we apply log2 to use a more
+ * sensible index.
+int cvp_create_pkt_cmd_sys_init(struct cvp_hfi_cmd_sys_init_packet *pkt,
+ u32 arch_type)
+ if (!pkt)
+ pkt->packet_type = HFI_CMD_SYS_INIT;
+ pkt->size = sizeof(struct cvp_hfi_cmd_sys_init_packet);
+ pkt->arch_type = arch_type;
+int cvp_create_pkt_cmd_sys_pc_prep(struct cvp_hfi_cmd_sys_pc_prep_packet *pkt)
+ pkt->packet_type = HFI_CMD_SYS_PC_PREP;
+ pkt->size = sizeof(struct cvp_hfi_cmd_sys_pc_prep_packet);
+int cvp_create_pkt_cmd_sys_debug_config(
+ struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+ u32 mode)
+ struct cvp_hfi_debug_config *hfi;
+ pkt->size = sizeof(struct cvp_hfi_cmd_sys_set_property_packet) +
+ sizeof(struct cvp_hfi_debug_config) + sizeof(u32);
+ pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY;
+ pkt->num_properties = 1;
+ pkt->rg_property_data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG;
+ hfi = (struct cvp_hfi_debug_config *) &pkt->rg_property_data[1];
+ hfi->debug_config = mode;
+ hfi->debug_mode = HFI_DEBUG_MODE_QUEUE;
+ if (msm_cvp_fw_debug_mode
+ <= (HFI_DEBUG_MODE_QUEUE | HFI_DEBUG_MODE_QDSS))
+ hfi->debug_mode = msm_cvp_fw_debug_mode;
+int cvp_create_pkt_cmd_sys_coverage_config(
+ dprintk(CVP_ERR, "In %s(), No input packet\n", __func__);
+ sizeof(u32);
+ pkt->rg_property_data[0] = HFI_PROPERTY_SYS_CONFIG_COVERAGE;
+ pkt->rg_property_data[1] = mode;
+ dprintk(CVP_PKT, "Firmware coverage mode %d\n",
+ pkt->rg_property_data[1]);
+int cvp_create_pkt_cmd_sys_set_idle_indicator(
+ pkt->rg_property_data[0] = HFI_PROPERTY_SYS_IDLE_INDICATOR;
+ dprintk(CVP_PKT, "Firmware idle indicator mode %d\n",
+int cvp_create_pkt_cmd_sys_set_resource(
+ struct cvp_hfi_cmd_sys_set_resource_packet *pkt,
+ struct cvp_resource_hdr *res_hdr,
+ void *res_value)
+ u32 i = 0;
+ if (!pkt || !res_hdr || !res_value) {
+ "Invalid paramas pkt %pK res_hdr %pK res_value %pK\n",
+ pkt, res_hdr, res_value);
+ pkt->packet_type = HFI_CMD_SYS_SET_RESOURCE;
+ pkt->size = sizeof(struct cvp_hfi_cmd_sys_set_resource_packet);
+ pkt->resource_handle = hash32_ptr(res_hdr->resource_handle);
+ switch (res_hdr->resource_id) {
+ case CVP_RESOURCE_SYSCACHE:
+ struct cvp_hfi_resource_syscache_info_type *res_sc_info =
+ (struct cvp_hfi_resource_syscache_info_type *)res_value;
+ struct cvp_hfi_resource_subcache_type *res_sc =
+ (struct cvp_hfi_resource_subcache_type *)
+ &(res_sc_info->rg_subcache_entries[0]);
+ struct cvp_hfi_resource_syscache_info_type *hfi_sc_info =
+ (struct cvp_hfi_resource_syscache_info_type *)
+ &pkt->rg_resource_data[0];
+ struct cvp_hfi_resource_subcache_type *hfi_sc =
+ &(hfi_sc_info->rg_subcache_entries[0]);
+ pkt->resource_type = HFI_RESOURCE_SYSCACHE;
+ hfi_sc_info->num_entries = res_sc_info->num_entries;
+ pkt->size += (sizeof(struct cvp_hfi_resource_subcache_type))
+ * hfi_sc_info->num_entries;
+ for (i = 0; i < hfi_sc_info->num_entries; i++) {
+ hfi_sc[i] = res_sc[i];
+ dprintk(CVP_PKT, "entry hfi#%d, sc_id %d, size %d\n",
+ i, hfi_sc[i].sc_id, hfi_sc[i].size);
+ "Invalid resource_id %d\n", res_hdr->resource_id);
+ rc = -ENOTSUPP;
+int cvp_create_pkt_cmd_sys_release_resource(
+ struct cvp_hfi_cmd_sys_release_resource_packet *pkt,
+ struct cvp_resource_hdr *res_hdr)
+ if (!pkt || !res_hdr) {
+ "Invalid paramas pkt %pK res_hdr %pK\n",
+ pkt, res_hdr);
+ pkt->size = sizeof(struct cvp_hfi_cmd_sys_release_resource_packet);
+ pkt->packet_type = HFI_CMD_SYS_RELEASE_RESOURCE;
+ dprintk(CVP_PKT,
+ "rel_res: pkt_type 0x%x res_type 0x%x prepared\n",
+ pkt->packet_type, pkt->resource_type);
+inline int cvp_create_pkt_cmd_sys_session_init(
+ struct cvp_hfi_cmd_sys_session_init_packet *pkt,
+ struct cvp_hal_session *session)
+ struct msm_cvp_inst *inst = session->session_id;
+ if (!pkt || !inst)
+ pkt->size = sizeof(struct cvp_hfi_cmd_sys_session_init_packet);
+ pkt->packet_type = HFI_CMD_SYS_SESSION_INIT;
+ pkt->session_id = hash32_ptr(session);
+ pkt->session_type = inst->prop.type;
+ pkt->session_kmask = inst->prop.kernel_mask;
+ pkt->session_prio = inst->prop.priority;
+ pkt->is_secure = inst->prop.is_secure;
+ pkt->dsp_ac_mask = inst->prop.dsp_mask;
+static int create_pkt_cmd_sys_ubwc_config(
+ struct msm_cvp_ubwc_config_data *ubwc_config)
+ struct cvp_hfi_cmd_sys_set_ubwc_config_packet_type *hfi;
+ sizeof(struct cvp_hfi_cmd_sys_set_ubwc_config_packet_type)
+ + sizeof(u32);
+ pkt->rg_property_data[0] = HFI_PROPERTY_SYS_UBWC_CONFIG;
+ hfi = (struct cvp_hfi_cmd_sys_set_ubwc_config_packet_type *)
+ &pkt->rg_property_data[1];
+ hfi->max_channels = ubwc_config->max_channels;
+ hfi->override_bit_info.max_channel_override =
+ ubwc_config->override_bit_info.max_channel_override;
+ hfi->mal_length = ubwc_config->mal_length;
+ hfi->override_bit_info.mal_length_override =
+ ubwc_config->override_bit_info.mal_length_override;
+ hfi->highest_bank_bit = ubwc_config->highest_bank_bit;
+ hfi->override_bit_info.hb_override =
+ ubwc_config->override_bit_info.hb_override;
+ hfi->bank_swzl_level = ubwc_config->bank_swzl_level;
+ hfi->override_bit_info.bank_swzl_level_override =
+ ubwc_config->override_bit_info.bank_swzl_level_override;
+ hfi->bank_spreading = ubwc_config->bank_spreading;
+ hfi->override_bit_info.bank_spreading_override =
+ ubwc_config->override_bit_info.bank_spreading_override;
+ hfi->size = sizeof(struct cvp_hfi_cmd_sys_set_ubwc_config_packet_type);
+int cvp_create_pkt_cmd_session_cmd(struct cvp_hal_session_cmd_pkt *pkt,
+ int pkt_type, struct cvp_hal_session *session)
+ pkt->size = sizeof(struct cvp_hal_session_cmd_pkt);
+ pkt->packet_type = pkt_type;
+int cvp_create_pkt_cmd_sys_power_control(
+ struct cvp_hfi_cmd_sys_set_property_packet *pkt, u32 enable)
+ struct cvp_hfi_enable *hfi;
+ dprintk(CVP_ERR, "No input packet\n");
+ sizeof(struct cvp_hfi_enable) + sizeof(u32);
+ pkt->rg_property_data[0] = HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL;
+ hfi = (struct cvp_hfi_enable *) &pkt->rg_property_data[1];
+ hfi->enable = enable;
+int cvp_create_pkt_cmd_session_set_buffers(
+ void *cmd,
+ struct cvp_hal_session *session,
+ u32 iova,
+ u32 size)
+ struct cvp_hfi_cmd_session_set_buffers_packet *pkt;
+ if (!cmd || !session)
+ pkt = (struct cvp_hfi_cmd_session_set_buffers_packet *)cmd;
+ pkt->packet_type = HFI_CMD_SESSION_CVP_SET_BUFFERS;
+ pkt->buf_type.iova = iova;
+ pkt->buf_type.size = size;
+ pkt->size = sizeof(struct cvp_hfi_cmd_session_set_buffers_packet);
+int cvp_create_pkt_cmd_session_release_buffers(
+ struct cvp_session_release_buffers_packet *pkt;
+ if (!cmd || !session || session == (void *)0xdeadbeef)
+ pkt = (struct cvp_session_release_buffers_packet *)cmd;
+ pkt->packet_type = HFI_CMD_SESSION_CVP_RELEASE_BUFFERS;
+ pkt->num_buffers = 1;
+ pkt->buffer_type = 0;
+ pkt->size = sizeof(struct cvp_session_release_buffers_packet) +
+ ((pkt->num_buffers - 1) * sizeof(u32));
+int cvp_create_pkt_cmd_session_send(
+ struct eva_kmd_hfi_packet *out_pkt,
+ int def_idx;
+ struct cvp_hal_session_cmd_pkt *ptr =
+ (struct cvp_hal_session_cmd_pkt *)in_pkt;
+ if (!out_pkt || !in_pkt || !session)
+ if (ptr->size > MAX_HFI_PKT_SIZE * sizeof(unsigned int))
+ goto error_hfi_packet;
+ if (ptr->session_id != hash32_ptr(session))
+ def_idx = get_pkt_index(ptr);
+ if (def_idx < 0) {
+ memcpy(out_pkt, in_pkt, ptr->size);
+ if (cvp_hfi_defs[def_idx].type != ptr->packet_type)
+error_hfi_packet:
+ dprintk(CVP_ERR, "%s incorrect packet: size=%d type=%d sessionid=%d\n",
+ __func__, ptr->size, ptr->packet_type, ptr->session_id);
+static int get_hfi_ssr_type(enum hal_ssr_trigger_type type)
+ int rc = HFI_TEST_SSR_HW_WDOG_IRQ;
+ switch (type) {
+ case SSR_ERR_FATAL:
+ rc = HFI_TEST_SSR_SW_ERR_FATAL;
+ case SSR_SW_DIV_BY_ZERO:
+ rc = HFI_TEST_SSR_SW_DIV_BY_ZERO;
+ case SSR_HW_WDOG_IRQ:
+ rc = HFI_TEST_SSR_HW_WDOG_IRQ;
+ "SSR trigger type not recognized, using WDOG.\n");
+int cvp_create_pkt_ssr_cmd(enum hal_ssr_trigger_type type,
+ struct cvp_hfi_cmd_sys_test_ssr_packet *pkt)
+ dprintk(CVP_ERR, "Invalid params, device: %pK\n", pkt);
+ pkt->size = sizeof(struct cvp_hfi_cmd_sys_test_ssr_packet);
+ pkt->packet_type = HFI_CMD_SYS_TEST_SSR;
+ pkt->trigger_type = get_hfi_ssr_type(type);
+int cvp_create_pkt_cmd_sys_image_version(
+ struct cvp_hfi_cmd_sys_get_property_packet *pkt)
+ dprintk(CVP_ERR, "%s invalid param :%pK\n", __func__, pkt);
+ pkt->size = sizeof(struct cvp_hfi_cmd_sys_get_property_packet);
+ pkt->packet_type = HFI_CMD_SYS_GET_PROPERTY;
+ pkt->rg_property_data[0] = HFI_PROPERTY_SYS_IMAGE_VERSION;
+static struct cvp_hfi_packetization_ops hfi_default = {
+ .sys_init = cvp_create_pkt_cmd_sys_init,
+ .sys_pc_prep = cvp_create_pkt_cmd_sys_pc_prep,
+ .sys_power_control = cvp_create_pkt_cmd_sys_power_control,
+ .sys_set_resource = cvp_create_pkt_cmd_sys_set_resource,
+ .sys_debug_config = cvp_create_pkt_cmd_sys_debug_config,
+ .sys_coverage_config = cvp_create_pkt_cmd_sys_coverage_config,
+ .sys_set_idle_indicator = cvp_create_pkt_cmd_sys_set_idle_indicator,
+ .sys_release_resource = cvp_create_pkt_cmd_sys_release_resource,
+ .sys_image_version = cvp_create_pkt_cmd_sys_image_version,
+ .sys_ubwc_config = create_pkt_cmd_sys_ubwc_config,
+ .ssr_cmd = cvp_create_pkt_ssr_cmd,
+ .session_init = cvp_create_pkt_cmd_sys_session_init,
+ .session_cmd = cvp_create_pkt_cmd_session_cmd,
+ .session_set_buffers =
+ cvp_create_pkt_cmd_session_set_buffers,
+ .session_release_buffers =
+ cvp_create_pkt_cmd_session_release_buffers,
+ .session_send = cvp_create_pkt_cmd_session_send,
+struct cvp_hfi_packetization_ops *cvp_hfi_get_pkt_ops_handle(
+ enum hfi_packetization_type type)
+ dprintk(CVP_HFI, "%s selected\n",
+ type == HFI_PACKETIZATION_4XX ?
+ "4xx packetization" : "Unknown hfi");
+ case HFI_PACKETIZATION_4XX:
+ return &hfi_default;
@@ -0,0 +1,75 @@
+#ifndef __HFI_PACKETIZATION__
+#define __HFI_PACKETIZATION__
+#define call_hfi_pkt_op(q, op, ...) \
+ (((q) && (q)->pkt_ops && (q)->pkt_ops->op) ? \
+ ((q)->pkt_ops->op(__VA_ARGS__)) : 0)
+enum hfi_packetization_type {
+ HFI_PACKETIZATION_4XX,
+struct cvp_hfi_packetization_ops {
+ int (*sys_init)(struct cvp_hfi_cmd_sys_init_packet *pkt, u32 arch_type);
+ int (*sys_pc_prep)(struct cvp_hfi_cmd_sys_pc_prep_packet *pkt);
+ int (*sys_power_control)(
+ u32 enable);
+ int (*sys_set_resource)(
+ struct cvp_resource_hdr *resource_hdr,
+ void *resource_value);
+ int (*sys_debug_config)(struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+ u32 mode);
+ int (*sys_coverage_config)(
+ int (*sys_set_idle_indicator)(
+ int (*sys_release_resource)(
+ struct cvp_resource_hdr *resource_hdr);
+ int (*sys_image_version)(
+ struct cvp_hfi_cmd_sys_get_property_packet *pkt);
+ int (*sys_ubwc_config)(struct cvp_hfi_cmd_sys_set_property_packet *pkt,
+ struct msm_cvp_ubwc_config_data *ubwc_config);
+ int (*ssr_cmd)(enum hal_ssr_trigger_type type,
+ struct cvp_hfi_cmd_sys_test_ssr_packet *pkt);
+ int (*session_init)(
+ struct cvp_hal_session *session);
+ int (*session_cmd)(struct cvp_hal_session_cmd_pkt *pkt,
+ int pkt_type, struct cvp_hal_session *session);
+ int (*session_set_buffers)(
+ void *pkt,
+ u32 size);
+ int (*session_release_buffers)(
+ int (*session_get_buf_req)(
+ struct cvp_hfi_cmd_session_get_property_packet *pkt,
+ int (*session_sync_process)(
+ struct cvp_hfi_cmd_session_sync_process_packet *pkt,
+ int (*session_send)(
+ struct eva_kmd_hfi_packet *in_pkt);
+ enum hfi_packetization_type);
@@ -0,0 +1,748 @@
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+#include <linux/bitops.h>
+extern struct msm_cvp_drv *cvp_driver;
+static enum cvp_status hfi_map_err_status(u32 hfi_err)
+ enum cvp_status cvp_err;
+ switch (hfi_err) {
+ case HFI_ERR_NONE:
+ cvp_err = CVP_ERR_NONE;
+ case HFI_ERR_SYS_FATAL:
+ cvp_err = CVP_ERR_HW_FATAL;
+ case HFI_ERR_SYS_NOC_ERROR:
+ cvp_err = CVP_ERR_NOC_ERROR;
+ case HFI_ERR_SYS_VERSION_MISMATCH:
+ case HFI_ERR_SYS_INVALID_PARAMETER:
+ case HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE:
+ case HFI_ERR_SESSION_INVALID_PARAMETER:
+ case HFI_ERR_SESSION_INVALID_SESSION_ID:
+ case HFI_ERR_SESSION_INVALID_STREAM_ID:
+ cvp_err = CVP_ERR_BAD_PARAM;
+ case HFI_ERR_SYS_INSUFFICIENT_RESOURCES:
+ case HFI_ERR_SESSION_UNSUPPORTED_PROPERTY:
+ case HFI_ERR_SESSION_UNSUPPORTED_SETTING:
+ case HFI_ERR_SESSION_INSUFFICIENT_RESOURCES:
+ case HFI_ERR_SESSION_UNSUPPORTED_STREAM:
+ cvp_err = CVP_ERR_NOT_SUPPORTED;
+ case HFI_ERR_SYS_MAX_SESSIONS_REACHED:
+ cvp_err = CVP_ERR_MAX_CLIENTS;
+ case HFI_ERR_SYS_SESSION_IN_USE:
+ cvp_err = CVP_ERR_CLIENT_PRESENT;
+ case HFI_ERR_SESSION_FATAL:
+ cvp_err = CVP_ERR_CLIENT_FATAL;
+ case HFI_ERR_SESSION_BAD_POINTER:
+ case HFI_ERR_SESSION_INCORRECT_STATE_OPERATION:
+ cvp_err = CVP_ERR_BAD_STATE;
+ cvp_err = CVP_ERR_FAIL;
+ return cvp_err;
+static int hfi_process_sys_error(u32 device_id,
+ struct cvp_hfi_msg_event_notify_packet *pkt,
+ struct msm_cvp_cb_info *info)
+ cmd_done.device_id = device_id;
+ cmd_done.status = hfi_map_err_status(pkt->event_data1);
+ info->response_type = HAL_SYS_ERROR;
+ info->response.cmd = cmd_done;
+ dprintk(CVP_ERR, "Received FW sys error %#x\n", pkt->event_data1);
+static int hfi_process_session_error(u32 device_id,
+ cmd_done.session_id = (void *)(uintptr_t)pkt->session_id;
+ cmd_done.size = pkt->event_data2;
+ dprintk(CVP_WARN, "Received: SESSION_ERROR with event data 1 2: %#x %#x\n",
+ pkt->event_data1, pkt->event_data2);
+ switch (pkt->event_data1) {
+ /* Ignore below errors */
+ case HFI_ERR_SESSION_INVALID_SCALE_FACTOR:
+ case HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED:
+ dprintk(CVP_INFO, "Non Fatal: HFI_EVENT_SESSION_ERROR\n");
+ info->response_type = HAL_RESPONSE_UNUSED;
+ "%s: session %x id %#x, data1 %#x, data2 %#x\n",
+ __func__, pkt->session_id, pkt->event_id,
+ info->response_type = HAL_SESSION_ERROR;
+static int hfi_process_event_notify(u32 device_id,
+ void *hdr, struct msm_cvp_cb_info *info)
+ struct cvp_hfi_msg_event_notify_packet *pkt =
+ (struct cvp_hfi_msg_event_notify_packet *)hdr;
+ dprintk(CVP_HFI, "Received: EVENT_NOTIFY\n");
+ if (pkt->size < sizeof(struct cvp_hfi_msg_event_notify_packet)) {
+ return -E2BIG;
+ switch (pkt->event_id) {
+ case HFI_EVENT_SYS_ERROR:
+ dprintk(CVP_ERR, "HFI_EVENT_SYS_ERROR: %d, %#x\n",
+ return hfi_process_sys_error(device_id, pkt, info);
+ case HFI_EVENT_SESSION_ERROR:
+ return hfi_process_session_error(device_id, pkt, info);
+ *info = (struct msm_cvp_cb_info) {
+ .response_type = HAL_RESPONSE_UNUSED,
+static int hfi_process_sys_init_done(u32 device_id,
+ struct cvp_hfi_msg_sys_init_done_packet *pkt =
+ (struct cvp_hfi_msg_sys_init_done_packet *)hdr;
+ enum cvp_status status = CVP_ERR_NONE;
+ dprintk(CVP_CORE, "RECEIVED: SYS_INIT_DONE\n");
+ if (sizeof(struct cvp_hfi_msg_sys_init_done_packet) > pkt->size) {
+ dprintk(CVP_ERR, "%s: bad_pkt_size: %d\n", __func__,
+ pkt->size);
+ if (!pkt->num_properties) {
+ "hal_process_sys_init_done: no_properties\n");
+ goto err_no_prop;
+ status = hfi_map_err_status(pkt->error_type);
+ if (status) {
+ dprintk(CVP_ERR, "%s: status %#x hfi type %#x err %#x\n",
+ __func__, status, pkt->packet_type, pkt->error_type);
+err_no_prop:
+ cmd_done.session_id = NULL;
+ cmd_done.status = (u32)status;
+ cmd_done.size = sizeof(struct cvp_hal_sys_init_done);
+ info->response_type = HAL_SYS_INIT_DONE;
+ struct cvp_hal_sys_init_done *sys_init_done)
+ u32 rem_bytes, num_properties;
+ u8 *data_ptr;
+ if (!pkt || !sys_init_done) {
+ "hfi_msg_sys_init_done: Invalid input\n");
+ return CVP_ERR_FAIL;
+ rem_bytes = pkt->size - sizeof(struct
+ cvp_hfi_msg_sys_init_done_packet) + sizeof(u32);
+ if (!rem_bytes) {
+ "hfi_msg_sys_init_done: missing_prop_info\n");
+ return status;
+ data_ptr = (u8 *) &pkt->rg_property_data[0];
+ num_properties = pkt->num_properties;
+ "%s: data_start %pK, num_properties %#x\n",
+ __func__, data_ptr, num_properties);
+ sys_init_done->capabilities = NULL;
+static int hfi_process_session_init_done(u32 device_id,
+ struct cvp_hfi_msg_sys_session_init_done_packet *pkt =
+ (struct cvp_hfi_msg_sys_session_init_done_packet *)hdr;
+ struct cvp_hal_session_init_done session_init_done = { {0} };
+ dprintk(CVP_SESS, "RECEIVED: SESSION_INIT_DONE[%x]\n", pkt->session_id);
+ if (sizeof(struct cvp_hfi_msg_sys_session_init_done_packet)
+ > pkt->size) {
+ "hal_process_session_init_done: bad_pkt_size\n");
+ cmd_done.status = hfi_map_err_status(pkt->error_type);
+ if (cmd_done.status)
+ __func__, cmd_done.status, pkt->packet_type, pkt->error_type);
+ cmd_done.data.session_init_done = session_init_done;
+ cmd_done.size = sizeof(struct cvp_hal_session_init_done);
+ info->response_type = HAL_SESSION_INIT_DONE;
+static int hfi_process_session_end_done(u32 device_id,
+ struct cvp_hfi_msg_sys_session_end_done_packet *pkt =
+ (struct cvp_hfi_msg_sys_session_end_done_packet *)hdr;
+ dprintk(CVP_SESS, "RECEIVED: SESSION_END_DONE[%#x]\n", pkt->session_id);
+ if (!pkt || pkt->size !=
+ sizeof(struct cvp_hfi_msg_sys_session_end_done_packet)) {
+ dprintk(CVP_ERR, "%s: bad packet/packet size\n", __func__);
+ cmd_done.size = 0;
+ info->response_type = HAL_SESSION_END_DONE;
+static int hfi_process_session_abort_done(u32 device_id,
+ struct cvp_hfi_msg_sys_session_abort_done_packet *pkt =
+ (struct cvp_hfi_msg_sys_session_abort_done_packet *)hdr;
+ dprintk(CVP_SESS, "RECEIVED: SESSION_ABORT_DONE[%#x]\n",
+ pkt->session_id);
+ sizeof(struct cvp_hfi_msg_sys_session_abort_done_packet)) {
+ dprintk(CVP_ERR, "%s: bad packet/packet size: %d\n",
+ __func__, pkt ? pkt->size : 0);
+ info->response_type = HAL_SESSION_ABORT_DONE;
+static int hfi_process_session_set_buf_done(u32 device_id,
+ struct cvp_hfi_msg_session_hdr *pkt =
+ unsigned int pkt_size = get_msg_size(pkt);
+ if (!pkt || pkt->size < pkt_size) {
+ dprintk(CVP_ERR, "bad packet/packet size %d\n",
+ pkt ? pkt->size : 0);
+ dprintk(CVP_SESS, "RECEIVED:CVP_SET_BUFFER_DONE[%#x]\n",
+ cmd_done.session_id = (void *)(uintptr_t)get_msg_session_id(pkt);
+ cmd_done.status = hfi_map_err_status(get_msg_errorcode(pkt));
+ info->response_type = HAL_SESSION_SET_BUFFER_DONE;
+static int hfi_process_session_flush_done(u32 device_id,
+ struct cvp_hfi_msg_sys_session_ctrl_done_packet *pkt =
+ (struct cvp_hfi_msg_sys_session_ctrl_done_packet *)hdr;
+ dprintk(CVP_SESS, "RECEIVED: SESSION_FLUSH_DONE[%#x]\n",
+ if (!pkt || pkt->size <
+ sizeof(struct cvp_hfi_msg_sys_session_ctrl_done_packet)) {
+ info->response_type = HAL_SESSION_FLUSH_DONE;
+static int hfi_process_session_start_done(u32 device_id,
+ dprintk(CVP_SESS, "RECEIVED: SESSION_START_DONE[%#x]\n",
+ info->response_type = HAL_SESSION_START_DONE;
+static int hfi_process_session_stop_done(u32 device_id,
+ dprintk(CVP_SESS, "RECEIVED: SESSION_STOP_DONE[%#x]\n",
+ info->response_type = HAL_SESSION_STOP_DONE;
+static int hfi_process_session_rel_buf_done(u32 device_id,
+ dprintk(CVP_SESS, "RECEIVED:CVP_RELEASE_BUFFER_DONE[%#x]\n",
+ info->response_type = HAL_SESSION_RELEASE_BUFFER_DONE;
+static struct msm_cvp_inst *cvp_get_inst_from_id(struct msm_cvp_core *core,
+ unsigned int session_id)
+ struct msm_cvp_inst *inst = NULL;
+ bool match = false;
+ if (!core || !session_id)
+retry:
+ if (mutex_trylock(&core->lock)) {
+ if (hash32_ptr(inst->session) == session_id) {
+ match = true;
+ inst = match && kref_get_unless_zero(&inst->kref) ? inst : NULL;
+ mutex_unlock(&core->lock);
+ if (core->state == CVP_CORE_UNINIT)
+ if (count < 1000)
+ goto retry;
+ dprintk(CVP_ERR, "timeout locking core mutex\n");
+ return inst;
+static int hfi_process_session_dump_notify(u32 device_id,
+ struct cvp_session_prop *session_prop;
+ unsigned int session_id;
+ struct cvp_hfi_dumpmsg_session_hdr *pkt =
+ (struct cvp_hfi_dumpmsg_session_hdr *)hdr;
+ dprintk(CVP_ERR, "%s: invalid param\n", __func__);
+ } else if (pkt->size > sizeof(struct cvp_hfi_dumpmsg_session_hdr)) {
+ dprintk(CVP_ERR, "%s: bad_pkt_size %d\n", __func__, pkt->size);
+ session_id = get_msg_session_id(pkt);
+ inst = cvp_get_inst_from_id(core, session_id);
+ dprintk(CVP_ERR, "%s: invalid session\n", __func__);
+ session_prop = &inst->prop;
+ session_prop->dump_offset = pkt->dump_offset;
+ session_prop->dump_size = pkt->dump_size;
+ dprintk(CVP_SESS, "RECEIVED: SESSION_DUMP[%x]\n", session_id);
+ info->response_type = HAL_SESSION_DUMP_NOTIFY;
+ cvp_put_inst(inst);
+static int hfi_process_session_cvp_msg(u32 device_id,
+ struct cvp_session_msg *sess_msg;
+ struct cvp_session_queue *sq;
+ } else if (pkt->size > MAX_HFI_PKT_SIZE * sizeof(unsigned int)) {
+ if (pkt->client_data.kdata & FENCE_BIT)
+ sq = &inst->session_queue_fence;
+ sq = &inst->session_queue;
+ sess_msg = cvp_kmem_cache_zalloc(&cvp_driver->msg_cache, GFP_KERNEL);
+ if (sess_msg == NULL) {
+ dprintk(CVP_ERR, "%s runs out msg cache memory\n", __func__);
+ goto error_no_mem;
+ memcpy(&sess_msg->pkt, pkt, get_msg_size(pkt));
+ "%s: Received msg %x cmd_done.status=%d sessionid=%x\n",
+ __func__, pkt->packet_type,
+ hfi_map_err_status(get_msg_errorcode(pkt)), session_id);
+ spin_lock(&sq->lock);
+ if (sq->msg_count >= MAX_NUM_MSGS_PER_SESSION) {
+ dprintk(CVP_ERR, "Reached session queue size limit\n");
+ goto error_handle_msg;
+ list_add_tail(&sess_msg->node, &sq->msgs);
+ sq->msg_count++;
+ spin_unlock(&sq->lock);
+ wake_up_all(&sq->wq);
+ info->response_type = HAL_NO_RESP;
+error_handle_msg:
+ cvp_kmem_cache_free(&cvp_driver->msg_cache, sess_msg);
+error_no_mem:
+static void hfi_process_sys_get_prop_image_version(
+ struct cvp_hfi_msg_sys_property_info_packet *pkt)
+ const u32 version_string_size = 128;
+ u8 *str_image_version;
+ int req_bytes;
+ req_bytes = pkt->size - sizeof(*pkt);
+ if (req_bytes < (signed int)version_string_size ||
+ !pkt->rg_property_data[1] ||
+ pkt->num_properties > 1) {
+ dprintk(CVP_ERR, "%s: bad_pkt: %d\n", __func__, req_bytes);
+ str_image_version = (u8 *)&pkt->rg_property_data[1];
+ * The version string returned by firmware includes null
+ * characters at the start and in between. Replace the null
+ * characters with space, to print the version info.
+ for (i = 0; i < version_string_size; i++) {
+ if (str_image_version[i] != '\0')
+ cvp_driver->fw_version[i] = str_image_version[i];
+ cvp_driver->fw_version[i] = ' ';
+ cvp_driver->fw_version[i - 1] = '\0';
+ dprintk(CVP_HFI, "F/W version: %s\n", cvp_driver->fw_version);
+static int hfi_process_sys_property_info(u32 device_id,
+ struct cvp_hfi_msg_sys_property_info_packet *pkt =
+ (struct cvp_hfi_msg_sys_property_info_packet *)hdr;
+ } else if (pkt->size > sizeof(*pkt)) {
+ "%s: bad_pkt_size %d\n", __func__, pkt->size);
+ } else if (!pkt->num_properties) {
+ "%s: no_properties\n", __func__);
+ switch (pkt->rg_property_data[0]) {
+ case HFI_PROPERTY_SYS_IMAGE_VERSION:
+ hfi_process_sys_get_prop_image_version(pkt);
+ "%s: unknown_prop_id: %x\n",
+ __func__, pkt->rg_property_data[0]);
+int cvp_hfi_process_msg_packet(u32 device_id, void *hdr,
+ typedef int (*pkt_func_def)(u32, void *, struct msm_cvp_cb_info *info);
+ pkt_func_def pkt_func = NULL;
+ struct cvp_hal_msg_pkt_hdr *msg_hdr = (struct cvp_hal_msg_pkt_hdr *)hdr;
+ if (!info || !msg_hdr || msg_hdr->size < CVP_IFACEQ_MIN_PKT_SIZE) {
+ dprintk(CVP_ERR, "%s: bad packet/packet size\n",
+ dprintk(CVP_HFI, "Received HFI MSG with type %#x\n", msg_hdr->packet);
+ switch (msg_hdr->packet) {
+ case HFI_MSG_EVENT_NOTIFY:
+ pkt_func = (pkt_func_def)hfi_process_event_notify;
+ case HFI_MSG_SYS_INIT_DONE:
+ pkt_func = (pkt_func_def)hfi_process_sys_init_done;
+ case HFI_MSG_SYS_SESSION_INIT_DONE:
+ pkt_func = (pkt_func_def)hfi_process_session_init_done;
+ case HFI_MSG_SYS_PROPERTY_INFO:
+ pkt_func = (pkt_func_def)hfi_process_sys_property_info;
+ case HFI_MSG_SYS_SESSION_END_DONE:
+ pkt_func = (pkt_func_def)hfi_process_session_end_done;
+ case HFI_MSG_SESSION_CVP_SET_BUFFERS:
+ pkt_func = (pkt_func_def) hfi_process_session_set_buf_done;
+ case HFI_MSG_SESSION_CVP_RELEASE_BUFFERS:
+ pkt_func = (pkt_func_def)hfi_process_session_rel_buf_done;
+ case HFI_MSG_SYS_SESSION_ABORT_DONE:
+ pkt_func = (pkt_func_def)hfi_process_session_abort_done;
+ case HFI_MSG_SESSION_CVP_FLUSH:
+ pkt_func = (pkt_func_def)hfi_process_session_flush_done;
+ case HFI_MSG_SESSION_EVA_START:
+ pkt_func = (pkt_func_def)hfi_process_session_start_done;
+ case HFI_MSG_SESSION_EVA_STOP:
+ pkt_func = (pkt_func_def)hfi_process_session_stop_done;
+ case HFI_MSG_EVENT_NOTIFY_SNAPSHOT_READY:
+ pkt_func = (pkt_func_def)hfi_process_session_dump_notify;
+ dprintk(CVP_HFI, "Use default msg handler: %#x\n",
+ msg_hdr->packet);
+ pkt_func = (pkt_func_def)hfi_process_session_cvp_msg;
+ return pkt_func ?
+ pkt_func(device_id, hdr, info) : -ENOTSUPP;
@@ -0,0 +1,1708 @@
+static int cvp_enqueue_pkt(struct msm_cvp_inst* inst,
+ struct eva_kmd_hfi_packet *in_pkt,
+ unsigned int in_offset,
+ unsigned int in_buf_num);
+int msm_cvp_get_session_info(struct msm_cvp_inst *inst, u32 *session)
+ if (!inst || !inst->core || !session) {
+ *session = hash32_ptr(inst->session);
+ dprintk(CVP_SESS, "%s: id 0x%x\n", __func__, *session);
+static bool cvp_msg_pending(struct cvp_session_queue *sq,
+ struct cvp_session_msg **msg, u64 *ktid)
+ struct cvp_session_msg *mptr = NULL, *dummy;
+ bool result = false;
+ if (!sq)
+ if (sq->state == QUEUE_INIT || sq->state == QUEUE_INVALID) {
+ /* The session is being deleted */
+ *msg = NULL;
+ result = list_empty(&sq->msgs);
+ if (!result) {
+ mptr = list_first_entry(&sq->msgs,
+ struct cvp_session_msg,
+ node);
+ if (!ktid) {
+ if (mptr) {
+ list_del_init(&mptr->node);
+ sq->msg_count--;
+ result = true;
+ list_for_each_entry_safe(mptr, dummy, &sq->msgs, node) {
+ if (*ktid == mptr->pkt.client_data.kdata) {
+ result = false;
+ if (result)
+ mptr = NULL;
+ *msg = mptr;
+ return !result;
+static int cvp_wait_process_message(struct msm_cvp_inst *inst,
+ struct cvp_session_queue *sq, u64 *ktid,
+ unsigned long timeout,
+ struct eva_kmd_hfi_packet *out)
+ struct cvp_session_msg *msg = NULL;
+ struct cvp_hfi_msg_session_hdr *hdr;
+ if (wait_event_timeout(sq->wq,
+ cvp_msg_pending(sq, &msg, ktid), timeout) == 0) {
+ dprintk(CVP_WARN, "session queue wait timeout\n");
+ if (inst && inst->core && inst->core->dev_ops &&
+ inst->state != MSM_CVP_CORE_INVALID)
+ print_hfi_queue_info(inst->core->dev_ops);
+ rc = -ETIMEDOUT;
+ if (msg == NULL) {
+ dprintk(CVP_WARN, "%s: queue state %d, msg cnt %d\n", __func__,
+ sq->state, sq->msg_count);
+ if (inst->state >= MSM_CVP_CLOSE_DONE ||
+ (sq->state != QUEUE_ACTIVE &&
+ sq->state != QUEUE_START)) {
+ msm_cvp_comm_kill_session(inst);
+ if (!out) {
+ cvp_kmem_cache_free(&cvp_driver->msg_cache, msg);
+ hdr = (struct cvp_hfi_msg_session_hdr *)&msg->pkt;
+ memcpy(out, &msg->pkt, get_msg_size(hdr));
+ if (hdr->client_data.kdata >= ARRAY_SIZE(cvp_hfi_defs))
+ msm_cvp_unmap_frame(inst, hdr->client_data.kdata);
+static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
+ struct eva_kmd_hfi_packet *out_pkt)
+ unsigned long wait_time;
+ dprintk(CVP_ERR, "%s invalid session\n", __func__);
+ wait_time = msecs_to_jiffies(
+ inst->core->resources.msm_cvp_hw_rsp_timeout);
+ rc = cvp_wait_process_message(inst, sq, NULL, wait_time, out_pkt);
+static int msm_cvp_session_process_hfi(
+ struct msm_cvp_inst *inst,
+ unsigned int in_buf_num)
+ int pkt_idx, rc = 0;
+ unsigned int offset = 0, buf_num = 0, signal;
+ struct cvp_hfi_cmd_session_hdr *pkt_hdr;
+ if (!inst || !inst->core || !in_pkt) {
+ if (inst->state == MSM_CVP_CORE_INVALID) {
+ dprintk(CVP_ERR, "sess %pK INVALIDim reject new HFIs\n", inst);
+ pkt_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
+ __func__, pkt_hdr->packet_type,
+ pkt_hdr->session_id,
+ pkt_hdr->client_data.transaction_id,
+ pkt_hdr->client_data.kdata & (FENCE_BIT - 1));
+ pkt_idx = get_pkt_index((struct cvp_hal_session_cmd_pkt *)in_pkt);
+ if (pkt_idx < 0) {
+ dprintk(CVP_ERR, "%s incorrect packet %d, %x\n", __func__,
+ in_pkt->pkt_data[0],
+ in_pkt->pkt_data[1]);
+ signal = cvp_hfi_defs[pkt_idx].resp;
+ is_config_pkt = cvp_hfi_defs[pkt_idx].is_config_pkt;
+ if (is_config_pkt)
+ pr_info_ratelimited(CVP_DBG_TAG "inst %pK config %s\n", "sess",
+ inst, cvp_hfi_defs[pkt_idx].name);
+ if (signal == HAL_NO_RESP) {
+ /* Frame packets are not allowed before session starts*/
+ if ((sq->state != QUEUE_START && !is_config_pkt) ||
+ (sq->state >= QUEUE_INVALID)) {
+ * A init packet is allowed in case of
+ * QUEUE_ACTIVE, QUEUE_START, QUEUE_STOP
+ * A frame packet is only allowed in case of
+ * QUEUE_START
+ dprintk(CVP_ERR, "%s: invalid queue state %d\n",
+ __func__, sq->state);
+ if (in_offset && in_buf_num) {
+ offset = in_offset;
+ buf_num = in_buf_num;
+ if (!is_buf_param_valid(buf_num, offset)) {
+ dprintk(CVP_ERR, "Incorrect buffer num and offset in cmd\n");
+ rc = msm_cvp_proc_oob(inst, in_pkt);
+ dprintk(CVP_ERR, "%s: failed to process OOB buffer", __func__);
+ rc = cvp_enqueue_pkt(inst, in_pkt, offset, buf_num);
+ dprintk(CVP_ERR, "Failed to enqueue pkt, inst %pK "
+ "pkt_type %08x ktid %llu transaction_id %u\n",
+ inst, pkt_hdr->packet_type,
+ pkt_hdr->client_data.kdata,
+ pkt_hdr->client_data.transaction_id);
+static bool cvp_fence_wait(struct cvp_fence_queue *q,
+ struct cvp_fence_command **fence,
+ enum queue_state *state)
+ struct cvp_fence_command *f;
+ if (!q)
+ *fence = NULL;
+ while (!mutex_trylock(&q->lock))
+ *state = q->state;
+ if (*state != QUEUE_START) {
+ mutex_unlock(&q->lock);
+ if (list_empty(&q->wait_list)) {
+ f = list_first_entry(&q->wait_list, struct cvp_fence_command, list);
+ list_del_init(&f->list);
+ list_add_tail(&f->list, &q->sched_list);
+ *fence = f;
+static int cvp_fence_proc(struct msm_cvp_inst *inst,
+ struct cvp_fence_command *fc,
+ struct cvp_hfi_cmd_session_hdr *pkt)
+ unsigned long timeout;
+ u64 ktid;
+ int synx_state = SYNX_STATE_SIGNALED_SUCCESS;
+ u32 hfi_err = HFI_ERR_NONE;
+ struct cvp_hfi_msg_session_hdr_ext hdr;
+ dprintk(CVP_SYNX, "%s %s\n", current->comm, __func__);
+ if (!inst || !inst->core)
+ ops_tbl = inst->core->dev_ops;
+ ktid = pkt->client_data.kdata;
+ rc = inst->core->synx_ftbl->cvp_synx_ops(inst, CVP_INPUT_SYNX,
+ fc, &synx_state);
+ msm_cvp_unmap_frame(inst, pkt->client_data.kdata);
+ rc = call_hfi_op(ops_tbl, session_send, (void *)inst->session,
+ (struct eva_kmd_hfi_packet *)pkt);
+ dprintk(CVP_ERR, "%s %s: Failed in call_hfi_op %d, %x\n",
+ current->comm, __func__, pkt->size, pkt->packet_type);
+ synx_state = SYNX_STATE_SIGNALED_CANCEL;
+ timeout = msecs_to_jiffies(
+ rc = cvp_wait_process_message(inst, sq, &ktid, timeout,
+ (struct eva_kmd_hfi_packet *)&hdr);
+ hfi_err = hdr.error_type;
+ dprintk(CVP_ERR, "%s %s: cvp_wait_process_message rc %d\n",
+ current->comm, __func__, rc);
+ if (hfi_err == HFI_ERR_SESSION_FLUSHED) {
+ dprintk(CVP_SYNX, "%s %s: cvp_wait_process_message flushed\n",
+ current->comm, __func__);
+ } else if (hfi_err == HFI_ERR_SESSION_STREAM_CORRUPT) {
+ dprintk(CVP_INFO, "%s %s: cvp_wait_process_msg non-fatal %d\n",
+ current->comm, __func__, hfi_err);
+ synx_state = SYNX_STATE_SIGNALED_SUCCESS;
+ } else if (hfi_err == HFI_ERR_SESSION_HW_HANG_DETECTED) {
+ dprintk(CVP_ERR, "%s %s: cvp_wait_process_message hfi HW hang err %d\n",
+ device = ops_tbl->hfi_device_data;
+ } else if (hfi_err != HFI_ERR_NONE) {
+ dprintk(CVP_ERR, "%s %s: cvp_wait_process_message hfi err %d\n",
+ rc = inst->core->synx_ftbl->cvp_synx_ops(inst, CVP_OUTPUT_SYNX,
+static int cvp_alloc_fence_data(struct cvp_fence_command **f, u32 size)
+ struct cvp_fence_command *fcmd;
+ int alloc_size = sizeof(struct cvp_hfi_msg_session_hdr_ext);
+ fcmd = kzalloc(sizeof(struct cvp_fence_command), GFP_KERNEL);
+ if (!fcmd)
+ alloc_size = (alloc_size >= size) ? alloc_size : size;
+ fcmd->pkt = kzalloc(alloc_size, GFP_KERNEL);
+ if (!fcmd->pkt) {
+ kfree(fcmd);
+ *f = fcmd;
+static void cvp_free_fence_data(struct cvp_fence_command *f)
+ kfree(f->pkt);
+ f->pkt = NULL;
+ kfree(f);
+ f = NULL;
+static int cvp_fence_thread(void *data)
+ int rc = 0, num_fences;
+ struct cvp_fence_queue *q;
+ enum queue_state state;
+ struct cvp_hfi_cmd_session_hdr *pkt;
+ u32 *synx;
+ u64 ktid = 0;
+ dprintk(CVP_SYNX, "Enter %s\n", current->comm);
+ inst = (struct msm_cvp_inst *)data;
+ if (!inst || !inst->core || !inst->core->dev_ops) {
+ dprintk(CVP_ERR, "%s invalid inst %pK\n", current->comm, inst);
+ q = &inst->fence_cmd_queue;
+wait:
+ dprintk(CVP_SYNX, "%s starts wait\n", current->comm);
+ wait_event_interruptible(q->wq, cvp_fence_wait(q, &f, &state));
+ if (state != QUEUE_START)
+ if (!f) {
+ goto wait;
+ pkt = f->pkt;
+ synx = (u32 *)f->synx;
+ num_fences = f->num_fences - f->output_index;
+ * If there is output fence, go through fence path
+ * Otherwise, go through non-fenced path
+ if (num_fences)
+ ktid = pkt->client_data.kdata & (FENCE_BIT - 1);
+ dprintk(CVP_SYNX, "%s pkt type %d on ktid %llu frameID %llu\n",
+ current->comm, pkt->packet_type, ktid, f->frame_id);
+ rc = cvp_fence_proc(inst, f, pkt);
+ mutex_lock(&q->lock);
+ inst->core->synx_ftbl->cvp_release_synx(inst, f);
+ state = q->state;
+ dprintk(CVP_SYNX, "%s done with %d ktid %llu frameID %llu rc %d\n",
+ current->comm, pkt->packet_type, ktid, f->frame_id, rc);
+ cvp_free_fence_data(f);
+ if (rc && state != QUEUE_START)
+ dprintk(CVP_SYNX, "%s exit\n", current->comm);
+static int msm_cvp_session_process_hfi_fence(struct msm_cvp_inst *inst,
+ struct eva_kmd_arg *arg)
+ dprintk(CVP_WARN, "Deprecated IOCTL command %s\n", __func__);
+static int cvp_populate_fences( struct eva_kmd_hfi_packet *in_pkt,
+ unsigned int offset, unsigned int num, struct msm_cvp_inst *inst)
+ u32 i, buf_offset, fence_cnt;
+ struct cvp_hfi_cmd_session_hdr *cmd_hdr;
+ enum op_mode mode;
+ struct cvp_buf_type *buf;
+ bool override;
+ unsigned int total_fence_count = 0;
+ cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
+ if (!offset || !num)
+ if (offset < (sizeof(struct cvp_hfi_cmd_session_hdr)/sizeof(u32))) {
+ dprintk(CVP_ERR, "%s: Incorrect offset in cmd %d\n", __func__, offset);
+ override = get_pkt_fenceoverride((struct cvp_hal_session_cmd_pkt*)in_pkt);
+ dprintk(CVP_SYNX, "%s:Fence Override is %d\n",__func__, override);
+ dprintk(CVP_SYNX, "%s:Kernel Fence is %d\n", __func__, cvp_kernel_fence_enabled);
+ mode = q->mode;
+ if (mode == OP_DRAINING) {
+ dprintk(CVP_SYNX, "%s: flush in progress\n", __func__);
+ rc = cvp_alloc_fence_data((&f), cmd_hdr->size);
+ dprintk(CVP_ERR,"%s: Failed to alloc fence data", __func__);
+ f->type = cmd_hdr->packet_type;
+ f->mode = OP_NORMAL;
+ f->signature = 0xFEEDFACE;
+ f->num_fences = 0;
+ f->output_index = 0;
+ buf_offset = offset;
+ if (cvp_kernel_fence_enabled == 0)
+ goto soc_fence;
+ else if (cvp_kernel_fence_enabled == 1)
+ goto kernel_fence;
+ else if (cvp_kernel_fence_enabled == 2)
+ if (override == true)
+ else if (override == false)
+ dprintk(CVP_ERR, "%s: invalid params", __func__);
+ goto free_exit;
+soc_fence:
+ for (i = 0; i < num; i++) {
+ buf = (struct cvp_buf_type*)&in_pkt->pkt_data[buf_offset];
+ buf_offset += sizeof(*buf) >> 2;
+ if (buf->input_handle || buf->output_handle) {
+ f->num_fences++;
+ if (buf->input_handle)
+ f->output_index++;
+ f->signature = 0xB0BABABE;
+ if (f->num_fences)
+ goto fence_cmd_queue;
+kernel_fence:
+ /* First pass to find INPUT synx handles */
+ buf = (struct cvp_buf_type *)&in_pkt->pkt_data[buf_offset];
+ if (buf->input_handle) {
+ /* Check fence_type? */
+ fences[f->num_fences].h_synx = buf->input_handle;
+ buf->fence_type &= ~INPUT_FENCE_BITMASK;
+ buf->input_handle = 0;
+ total_fence_count++;
+ if (buf->output_handle)
+ if (total_fence_count > MAX_HFI_FENCE_SIZE) {
+ dprintk(CVP_ERR, "Invalid total_fence_count %d\n", total_fence_count);
+ f->output_index = f->num_fences;
+ dprintk(CVP_SYNX, "%s:Input Fence passed - Number of Fences is %d\n",
+ __func__, f->num_fences);
+ * Second pass to find OUTPUT synx handle
+ * If no of fences is 0 dont execute the below portion until line 911, return 0
+ if (buf->output_handle) {
+ fences[f->num_fences].h_synx = buf->output_handle;
+ buf->fence_type &= ~OUTPUT_FENCE_BITMASK;
+ buf->output_handle = 0;
+ dprintk(CVP_SYNX, "%s:Output Fence passed - Number of Fences is %d\n",
+ if (f->num_fences == 0)
+ rc = inst->core->synx_ftbl->cvp_import_synx(inst, f, (u32*)fences);
+ dprintk(CVP_ERR,"%s: Failed to import fences", __func__);
+fence_cmd_queue:
+ fence_cnt = f->num_fences;
+ memcpy(f->pkt, cmd_hdr, cmd_hdr->size);
+ f->pkt->client_data.kdata |= FENCE_BIT;
+ list_add_tail(&f->list, &inst->fence_cmd_queue.wait_list);
+ wake_up(&inst->fence_cmd_queue.wq);
+ return fence_cnt;
+free_exit:
+ int pkt_type, rc = 0;
+ enum buf_map_type map_type;
+ pkt_type = in_pkt->pkt_data[1];
+ map_type = cvp_find_map_type(pkt_type);
+ /* The kdata will be overriden by transaction ID if the cmd has buf */
+ cmd_hdr->client_data.kdata = 0;
+ __func__, cmd_hdr->packet_type,
+ cmd_hdr->session_id,
+ cmd_hdr->client_data.transaction_id,
+ cmd_hdr->client_data.kdata & (FENCE_BIT - 1));
+ if (map_type == MAP_PERSIST)
+ rc = msm_cvp_map_user_persist(inst, in_pkt, in_offset, in_buf_num);
+ else if (map_type == UNMAP_PERSIST)
+ rc = msm_cvp_unmap_user_persist(inst, in_pkt, in_offset, in_buf_num);
+ rc = msm_cvp_map_frame(inst, in_pkt, in_offset, in_buf_num);
+ rc = cvp_populate_fences(in_pkt, in_offset, in_buf_num, inst);
+ if (rc == 0) {
+ in_pkt);
+ dprintk(CVP_ERR,"%s: Failed in call_hfi_op %d, %x\n",
+ __func__, in_pkt->pkt_data[0],
+ if (map_type == MAP_FRAME)
+ msm_cvp_unmap_frame(inst,
+ cmd_hdr->client_data.kdata);
+ } else if (rc > 0) {
+ dprintk(CVP_SYNX, "Going fenced path\n");
+ dprintk(CVP_ERR,"%s: Failed to populate fences\n",
+ msm_cvp_unmap_frame(inst, cmd_hdr->client_data.kdata);
+static inline int div_by_1dot5(unsigned int a)
+ unsigned long i = a << 1;
+ return (unsigned int) i/3;
+int msm_cvp_session_delete(struct msm_cvp_inst *inst)
+int msm_cvp_session_create(struct msm_cvp_inst *inst)
+ int rc = 0, rc1 = 0;
+ if (inst->state >= MSM_CVP_CLOSE_DONE)
+ if (inst->state != MSM_CVP_CORE_INIT_DONE ||
+ inst->state > MSM_CVP_OPEN_DONE) {
+ "%s Incorrect CVP state %d to create session\n",
+ __func__, inst->state);
+ rc = msm_cvp_comm_try_state(inst, MSM_CVP_OPEN_DONE);
+ "Failed to move instance to open done state\n");
+ goto fail_create;
+ rc = cvp_comm_set_arp_buffers(inst);
+ "Failed to set ARP buffers\n");
+ goto fail_init;
+ inst->core->synx_ftbl->cvp_sess_init_synx(inst);
+ sq->state = QUEUE_ACTIVE;
+fail_init:
+ rc1 = msm_cvp_comm_try_state(inst, MSM_CVP_CLOSE_DONE);
+ if (rc1)
+ dprintk(CVP_ERR, "%s: close failed\n", __func__);
+fail_create:
+static int session_state_check_init(struct msm_cvp_inst *inst)
+ mutex_lock(&inst->lock);
+ if (inst->state == MSM_CVP_OPEN || inst->state == MSM_CVP_OPEN_DONE) {
+ mutex_unlock(&inst->lock);
+ return msm_cvp_session_create(inst);
+static int cvp_fence_thread_start(struct msm_cvp_inst *inst)
+ u32 tnum = 0;
+ char tname[16];
+ struct task_struct *thread;
+ if (!inst->prop.fthread_nr)
+ q->state = QUEUE_START;
+ for (i = 0; i < inst->prop.fthread_nr; ++i) {
+ if (!cvp_get_inst_validate(inst->core, inst)) {
+ snprintf(tname, sizeof(tname), "fthread_%d", tnum++);
+ thread = kthread_run(cvp_fence_thread, inst, tname);
+ if (!thread) {
+ dprintk(CVP_ERR, "%s create %s fail", __func__, tname);
+ rc = -ECHILD;
+ sq->state = QUEUE_START;
+ q->state = QUEUE_STOP;
+ wake_up_all(&q->wq);
+static int cvp_fence_thread_stop(struct msm_cvp_inst *inst)
+ sq->state = QUEUE_STOP;
+int msm_cvp_session_start(struct msm_cvp_inst *inst,
+ enum queue_state old_state;
+ if (!inst || !inst->core) {
+ if (sq->msg_count) {
+ dprintk(CVP_ERR, "session start failed queue not empty%d\n",
+ sq->msg_count);
+ old_state = sq->state;
+ if (inst->prop.type == HFI_SESSION_FD
+ || inst->prop.type == HFI_SESSION_DMM) {
+ spin_lock(&inst->core->resources.pm_qos.lock);
+ inst->core->resources.pm_qos.off_vote_cnt++;
+ spin_unlock(&inst->core->resources.pm_qos.lock);
+ call_hfi_op(ops_tbl, pm_qos_update, ops_tbl->hfi_device_data);
+ * cvp_fence_thread_start will increment reference to instance.
+ * It guarantees the EVA session won't be deleted. Use of session
+ * functions, such as session_start requires the session to be valid.
+ rc = cvp_fence_thread_start(inst);
+ goto restore_state;
+ /* Send SESSION_START command */
+ rc = call_hfi_op(ops_tbl, session_start, (void *)inst->session);
+ dprintk(CVP_WARN, "%s: session start failed rc %d\n",
+ goto stop_thread;
+ /* Wait for FW response */
+ rc = wait_for_sess_signal_receipt(inst, HAL_SESSION_START_DONE);
+ dprintk(CVP_WARN, "%s: wait for signal failed, rc %d\n",
+ pr_info_ratelimited(CVP_DBG_TAG "session %llx (%#x) started\n",
+ "sess", inst, hash32_ptr(inst->session));
+stop_thread:
+ cvp_fence_thread_stop(inst);
+restore_state:
+ sq->state = old_state;
+int msm_cvp_session_stop(struct msm_cvp_inst *inst,
+ struct eva_kmd_session_control *sc = NULL;
+ int curr_sq_state = -1;
+ if (arg)
+ sc = &arg->data.session_ctrl;
+ curr_sq_state = sq->state;
+ if (sq->state != QUEUE_START) {
+ "%s: Stop not allowed - curr state %d, inst %llx, sess %llx, %s type %d\n",
+ __func__, sq->state, inst, inst->session, inst->proc_name,
+ inst->session_type);
+ if (sq->state == QUEUE_STOP) {
+ "%s: Double stop session - inst %llx, sess %llx, %s of type %d\n",
+ __func__, inst, inst->session, inst->proc_name, inst->session_type);
+ dprintk(CVP_ERR, "session stop incorrect: queue not empty%d\n",
+ if (sc)
+ sc->ctrl_data[0] = sq->msg_count;
+ rc = -EUCLEAN;
+ pr_info_ratelimited(CVP_DBG_TAG "Stop session: %pK session_id = %#x\n",
+ /* Send SESSION_STOP command */
+ rc = call_hfi_op(ops_tbl, session_stop, (void *)inst->session);
+ dprintk(CVP_WARN, "%s: session stop failed rc %d\n",
+ sq->state = curr_sq_state;
+ rc = wait_for_sess_signal_receipt(inst, HAL_SESSION_STOP_DONE);
+ wake_up_all(&inst->session_queue.wq);
+int msm_cvp_session_queue_stop(struct msm_cvp_inst *inst)
+ dprintk(CVP_SESS, "Stop session queue: %pK session_id = %#x\n",
+ inst, hash32_ptr(inst->session));
+ return cvp_fence_thread_stop(inst);
+static int msm_cvp_session_ctrl(struct msm_cvp_inst *inst,
+ struct eva_kmd_session_control *ctrl = &arg->data.session_ctrl;
+ unsigned int ctrl_type;
+ ctrl_type = ctrl->ctrl_type;
+ if (!inst && ctrl_type != SESSION_CREATE) {
+ switch (ctrl_type) {
+ case SESSION_STOP:
+ rc = msm_cvp_session_stop(inst, arg);
+ case SESSION_START:
+ rc = msm_cvp_session_start(inst, arg);
+ case SESSION_CREATE:
+ case SESSION_DELETE:
+ rc = msm_cvp_session_delete(inst);
+ case SESSION_INFO:
+ dprintk(CVP_ERR, "%s Unsupported session ctrl%d\n",
+ __func__, ctrl->ctrl_type);
+static int msm_cvp_get_sysprop(struct msm_cvp_inst *inst,
+ struct eva_kmd_sys_properties *props = &arg->data.sys_properties;
+ hfi = ops_tbl->hfi_device_data;
+ if (props->prop_num > MAX_KMD_PROP_NUM_PER_PACKET) {
+ dprintk(CVP_ERR, "Too many properties %d to get\n",
+ props->prop_num);
+ for (i = 0; i < props->prop_num; i++) {
+ switch (props->prop_data[i].prop_type) {
+ case EVA_KMD_PROP_HFI_VERSION:
+ props->prop_data[i].data = hfi->version;
+ case EVA_KMD_PROP_SESSION_DUMPOFFSET:
+ props->prop_data[i].data =
+ session_prop->dump_offset;
+ case EVA_KMD_PROP_SESSION_DUMPSIZE:
+ session_prop->dump_size;
+ case EVA_KMD_PROP_SESSION_ERROR:
+ get_dma_buf(hfi->sfr.mem_data.dma_buf);
+ rc = dma_buf_fd(hfi->sfr.mem_data.dma_buf, O_RDONLY | O_CLOEXEC);
+ dprintk(CVP_WARN, "Failed get dma_buf fd %d\n", rc);
+ dma_buf_put(hfi->sfr.mem_data.dma_buf);
+ props->prop_data[i].data = rc;
+ case EVA_KMD_PROP_PWR_FDU:
+ msm_cvp_get_hw_aggregate_cycles(HFI_HW_FDU);
+ case EVA_KMD_PROP_PWR_ICA:
+ msm_cvp_get_hw_aggregate_cycles(HFI_HW_ICA);
+ case EVA_KMD_PROP_PWR_OD:
+ msm_cvp_get_hw_aggregate_cycles(HFI_HW_OD);
+ case EVA_KMD_PROP_PWR_MPU:
+ msm_cvp_get_hw_aggregate_cycles(HFI_HW_MPU);
+ case EVA_KMD_PROP_PWR_VADL:
+ msm_cvp_get_hw_aggregate_cycles(HFI_HW_VADL);
+ case EVA_KMD_PROP_PWR_TOF:
+ msm_cvp_get_hw_aggregate_cycles(HFI_HW_TOF);
+ case EVA_KMD_PROP_PWR_RGE:
+ msm_cvp_get_hw_aggregate_cycles(HFI_HW_RGE);
+ case EVA_KMD_PROP_PWR_XRA:
+ msm_cvp_get_hw_aggregate_cycles(HFI_HW_XRA);
+ case EVA_KMD_PROP_PWR_LSR:
+ msm_cvp_get_hw_aggregate_cycles(HFI_HW_LSR);
+ dprintk(CVP_ERR, "unrecognized sys property %d\n",
+ props->prop_data[i].prop_type);
+ rc = -EFAULT;
+static int msm_cvp_set_sysprop(struct msm_cvp_inst *inst,
+ struct eva_kmd_sys_property *prop_array;
+ dprintk(CVP_ERR, "Too many properties %d to set\n",
+ prop_array = &arg->data.sys_properties.prop_data[0];
+ switch (prop_array[i].prop_type) {
+ case EVA_KMD_PROP_SESSION_TYPE:
+ session_prop->type = prop_array[i].data;
+ case EVA_KMD_PROP_SESSION_KERNELMASK:
+ session_prop->kernel_mask = prop_array[i].data;
+ case EVA_KMD_PROP_SESSION_PRIORITY:
+ session_prop->priority = prop_array[i].data;
+ case EVA_KMD_PROP_SESSION_SECURITY:
+ session_prop->is_secure = prop_array[i].data;
+ case EVA_KMD_PROP_SESSION_DSPMASK:
+ session_prop->dsp_mask = prop_array[i].data;
+ session_prop->cycles[HFI_HW_FDU] = prop_array[i].data;
+ session_prop->cycles[HFI_HW_ICA] =
+ div_by_1dot5(prop_array[i].data);
+ session_prop->cycles[HFI_HW_OD] = prop_array[i].data;
+ session_prop->cycles[HFI_HW_MPU] = prop_array[i].data;
+ session_prop->cycles[HFI_HW_VADL] = prop_array[i].data;
+ session_prop->cycles[HFI_HW_TOF] = prop_array[i].data;
+ session_prop->cycles[HFI_HW_RGE] = prop_array[i].data;
+ session_prop->cycles[HFI_HW_XRA] = prop_array[i].data;
+ session_prop->cycles[HFI_HW_LSR] = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_FW:
+ session_prop->fw_cycles =
+ case EVA_KMD_PROP_PWR_DDR:
+ session_prop->ddr_bw = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_SYSCACHE:
+ session_prop->ddr_cache = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_FDU_OP:
+ session_prop->op_cycles[HFI_HW_FDU] = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_ICA_OP:
+ session_prop->op_cycles[HFI_HW_ICA] =
+ case EVA_KMD_PROP_PWR_OD_OP:
+ session_prop->op_cycles[HFI_HW_OD] = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_MPU_OP:
+ session_prop->op_cycles[HFI_HW_MPU] = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_VADL_OP:
+ session_prop->op_cycles[HFI_HW_VADL] = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_TOF_OP:
+ session_prop->op_cycles[HFI_HW_TOF] = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_RGE_OP:
+ session_prop->op_cycles[HFI_HW_RGE] = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_XRA_OP:
+ session_prop->op_cycles[HFI_HW_XRA] = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_LSR_OP:
+ session_prop->op_cycles[HFI_HW_LSR] = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_FW_OP:
+ session_prop->fw_op_cycles =
+ case EVA_KMD_PROP_PWR_DDR_OP:
+ session_prop->ddr_op_bw = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_SYSCACHE_OP:
+ session_prop->ddr_op_cache = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_FPS_FDU:
+ session_prop->fps[HFI_HW_FDU] = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_FPS_MPU:
+ session_prop->fps[HFI_HW_MPU] = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_FPS_OD:
+ session_prop->fps[HFI_HW_OD] = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_FPS_ICA:
+ session_prop->fps[HFI_HW_ICA] = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_FPS_VADL:
+ session_prop->fps[HFI_HW_VADL] = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_FPS_TOF:
+ session_prop->fps[HFI_HW_TOF] = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_FPS_RGE:
+ session_prop->fps[HFI_HW_RGE] = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_FPS_XRA:
+ session_prop->fps[HFI_HW_XRA] = prop_array[i].data;
+ case EVA_KMD_PROP_PWR_FPS_LSR:
+ session_prop->fps[HFI_HW_LSR] = prop_array[i].data;
+ session_prop->dump_offset = prop_array[i].data;
+ session_prop->dump_size = prop_array[i].data;
+ "unrecognized sys property to set %d\n",
+ prop_array[i].prop_type);
+static int cvp_drain_fence_sched_list(struct msm_cvp_inst *inst)
+ int count = 0, max_count = 0;
+ if (list_empty(&q->sched_list))
+ list_for_each_entry(f, &q->sched_list, list) {
+ ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
+ dprintk(CVP_SYNX, "%s: frame %llu %llu is in sched_list\n",
+ __func__, ktid, f->frame_id);
+ ++count;
+ wait_time = count * 1000;
+ wait_time *= inst->core->resources.msm_cvp_hw_rsp_timeout;
+ dprintk(CVP_SYNX, "%s: wait %d us for %d fence command\n",
+ __func__, wait_time, count);
+ max_count = wait_time / 100;
+ if (list_empty(&q->sched_list)) {
+ if (count < max_count) {
+ dprintk(CVP_ERR, "%s: timed out!\n", __func__);
+static void cvp_clean_fence_queue(struct msm_cvp_inst *inst, int synx_state)
+ struct cvp_fence_command *f, *d;
+ q->mode = OP_DRAINING;
+ if (list_empty(&q->wait_list))
+ goto check_sched;
+ list_for_each_entry_safe(f, d, &q->wait_list, list) {
+ dprintk(CVP_SYNX, "%s: (%#x) flush frame %llu %llu wait_list\n",
+ __func__, hash32_ptr(inst->session), ktid, f->frame_id);
+ msm_cvp_unmap_frame(inst, f->pkt->client_data.kdata);
+ inst->core->synx_ftbl->cvp_cancel_synx(inst, CVP_OUTPUT_SYNX,
+ f, synx_state);
+check_sched:
+ dprintk(CVP_SYNX, "%s: (%#x)flush frame %llu %llu sched_list\n",
+ inst->core->synx_ftbl->cvp_cancel_synx(inst, CVP_INPUT_SYNX,
+int cvp_clean_session_queues(struct msm_cvp_inst *inst)
+ u32 count = 0, max_retries = 100;
+ if (q->state == QUEUE_START || q->state == QUEUE_ACTIVE) {
+ cvp_clean_fence_queue(inst, SYNX_STATE_SIGNALED_CANCEL);
+ dprintk(CVP_WARN, "Incorrect fence cmd queue state %d\n",
+ q->state);
+ /* Waiting for all output synx sent */
+ if (++count > max_retries)
+ return -EBUSY;
+static int cvp_flush_all(struct msm_cvp_inst *inst)
+ dprintk(CVP_SESS, "session %llx (%#x)flush all starts\n",
+ dprintk(CVP_SESS, "%s: (%#x) send flush to fw\n",
+ __func__, hash32_ptr(inst->session));
+ /* Send flush to FW */
+ rc = call_hfi_op(ops_tbl, session_flush, (void *)inst->session);
+ dprintk(CVP_WARN, "%s: continue flush without fw. rc %d\n",
+ rc = wait_for_sess_signal_receipt(inst, HAL_SESSION_FLUSH_DONE);
+ dprintk(CVP_SESS, "%s: (%#x) received flush from fw\n",
+ rc = cvp_drain_fence_sched_list(inst);
+ q->mode = OP_NORMAL;
+int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct eva_kmd_arg *arg)
+ if (!inst || !arg) {
+ dprintk(CVP_ERR, "%s: invalid args\n", __func__);
+ dprintk(CVP_HFI, "%s: arg->type = %x", __func__, arg->type);
+ if (arg->type != EVA_KMD_SESSION_CONTROL &&
+ arg->type != EVA_KMD_SET_SYS_PROPERTY &&
+ arg->type != EVA_KMD_GET_SYS_PROPERTY) {
+ rc = session_state_check_init(inst);
+ "Incorrect session state %d for command %#x",
+ inst->state, arg->type);
+ switch (arg->type) {
+ case EVA_KMD_GET_SESSION_INFO:
+ struct eva_kmd_session_info *session =
+ (struct eva_kmd_session_info *)&arg->data.session;
+ rc = msm_cvp_get_session_info(inst, &session->session_id);
+ case EVA_KMD_UPDATE_POWER:
+ rc = msm_cvp_update_power(inst);
+ case EVA_KMD_REGISTER_BUFFER:
+ struct eva_kmd_buffer *buf =
+ (struct eva_kmd_buffer *)&arg->data.regbuf;
+ rc = msm_cvp_register_buffer(inst, buf);
+ case EVA_KMD_UNREGISTER_BUFFER:
+ (struct eva_kmd_buffer *)&arg->data.unregbuf;
+ rc = msm_cvp_unregister_buffer(inst, buf);
+ case EVA_KMD_RECEIVE_MSG_PKT:
+ struct eva_kmd_hfi_packet *out_pkt =
+ (struct eva_kmd_hfi_packet *)&arg->data.hfi_pkt;
+ rc = msm_cvp_session_receive_hfi(inst, out_pkt);
+ case EVA_KMD_SEND_CMD_PKT:
+ struct eva_kmd_hfi_packet *in_pkt =
+ rc = msm_cvp_session_process_hfi(inst, in_pkt,
+ arg->buf_offset, arg->buf_num);
+ case EVA_KMD_SEND_FENCE_CMD_PKT:
+ rc = msm_cvp_session_process_hfi_fence(inst, arg);
+ case EVA_KMD_SESSION_CONTROL:
+ rc = msm_cvp_session_ctrl(inst, arg);
+ case EVA_KMD_GET_SYS_PROPERTY:
+ rc = msm_cvp_get_sysprop(inst, arg);
+ case EVA_KMD_SET_SYS_PROPERTY:
+ rc = msm_cvp_set_sysprop(inst, arg);
+ case EVA_KMD_FLUSH_ALL:
+ rc = cvp_flush_all(inst);
+ case EVA_KMD_FLUSH_FRAME:
+ dprintk(CVP_WARN, "EVA_KMD_FLUSH_FRAME IOCTL deprecated\n");
+ dprintk(CVP_HFI, "%s: unknown arg type %#x\n",
+ __func__, arg->type);
+int msm_cvp_session_deinit(struct msm_cvp_inst *inst)
+ struct cvp_hal_session *session;
+ dprintk(CVP_SESS, "%s: inst %pK (%#x)\n", __func__,
+ session = (struct cvp_hal_session *)inst->session;
+ if (!session || session == (void *)0xdeadbeef)
+ rc = msm_cvp_comm_try_state(inst, MSM_CVP_CLOSE_DONE);
+ rc = msm_cvp_session_deinit_buffers(inst);
+int msm_cvp_session_init(struct msm_cvp_inst *inst)
+ /* set default frequency */
+ inst->clk_data.min_freq = 1000;
+ inst->clk_data.ddr_bw = 1000;
+ inst->clk_data.sys_cache_bw = 1000;
+ inst->prop.type = 1;
+ inst->prop.kernel_mask = 0xFFFFFFFF;
+ inst->prop.priority = 0;
+ inst->prop.is_secure = 0;
+ inst->prop.dsp_mask = 0;
+ inst->prop.fthread_nr = 3;
@@ -0,0 +1,49 @@
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+#ifndef _MSM_CVP_H_
+#define _MSM_CVP_H_
+#include "eva_shared_def.h"
+static inline bool is_buf_param_valid(u32 buf_num, u32 offset)
+ int max_buf_num;
+ max_buf_num = sizeof(struct eva_kmd_hfi_packet) /
+ sizeof(struct cvp_buf_type);
+ if (buf_num > max_buf_num)
+ if ((offset > U32_MAX/sizeof(u32)) ||
+ (offset*sizeof(u32) > U32_MAX - buf_num * sizeof(struct cvp_buf_type)))
+ if ((offset * sizeof(u32) + buf_num * sizeof(struct cvp_buf_type)) >
+ sizeof(struct eva_kmd_hfi_packet))
+int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct eva_kmd_arg *arg);
+int msm_cvp_session_init(struct msm_cvp_inst *inst);
+int msm_cvp_session_deinit(struct msm_cvp_inst *inst);
+int msm_cvp_session_queue_stop(struct msm_cvp_inst *inst);
+int msm_cvp_session_create(struct msm_cvp_inst *inst);
+int msm_cvp_session_delete(struct msm_cvp_inst *inst);
+int msm_cvp_session_start(struct msm_cvp_inst *inst, struct eva_kmd_arg *arg);
+int msm_cvp_session_stop(struct msm_cvp_inst *inst, struct eva_kmd_arg *arg);
+int msm_cvp_get_session_info(struct msm_cvp_inst *inst, u32 *session);
+int cvp_clean_session_queues(struct msm_cvp_inst *inst);
@@ -0,0 +1,2480 @@
+#include <linux/pid.h>
+#include <linux/fdtable.h>
+#include <linux/rcupdate.h>
+#include <linux/fs.h>
+#include <linux/sched/task.h>
+#define eva_buf_map dma_buf_map
+#define _buf_map_set_vaddr dma_buf_map_set_vaddr
+#define eva_buf_map iosys_map
+#define _buf_map_set_vaddr iosys_map_set_vaddr
+#define CLEAR_USE_BITMAP(idx, inst) \
+ do { \
+ clear_bit(idx, &inst->dma_cache.usage_bitmap); \
+ dprintk(CVP_MEM, "clear %x bit %d dma_cache bitmap 0x%llx\n", \
+ hash32_ptr(inst->session), smem->bitmap_index, \
+ inst->dma_cache.usage_bitmap); \
+ } while (0)
+#define SET_USE_BITMAP(idx, inst) \
+ set_bit(idx, &inst->dma_cache.usage_bitmap); \
+ dprintk(CVP_MEM, "Set %x bit %d dma_cache bitmap 0x%llx\n", \
+ hash32_ptr(inst->session), idx, \
+struct cvp_oob_pool wncc_buf_pool;
+static void _wncc_print_cvpwnccbufs_table(struct msm_cvp_inst* inst);
+static int _wncc_unmap_metadata_bufs(struct eva_kmd_hfi_packet* in_pkt,
+ struct eva_kmd_oob_wncc *wncc_oob,
+ struct eva_kmd_wncc_metadata** wncc_metadata);
+void msm_cvp_print_inst_bufs(struct msm_cvp_inst *inst, bool log);
+int print_smem(u32 tag, const char *str, struct msm_cvp_inst *inst,
+ struct msm_cvp_smem *smem)
+ char name[PKT_NAME_LEN] = "Unknown";
+ if (!(tag & msm_cvp_debug))
+ dprintk(CVP_ERR, "Invalid inst 0x%llx or smem 0x%llx\n",
+ inst, smem);
+ if (smem->dma_buf) {
+ if (i > 0)
+ strlcpy(name, cvp_hfi_defs[i].name, PKT_NAME_LEN);
+ if (!atomic_read(&smem->refcount))
+ dprintk(tag,
+ " UNUSED mapping %s: 0x%llx size %d iova %#x idx %d pkt_type %s buf_idx %#x fd %d\n",
+ str, smem->dma_buf,
+ smem->size, smem->device_addr, smem->bitmap_index, name, smem->buf_idx, smem->fd);
+ "%s: %x : 0x%llx size %d flags %#x iova %#x idx %d ref %d pkt_type %s buf_idx %#x fd %d\n",
+ str, hash32_ptr(inst->session), smem->dma_buf,
+ smem->size, smem->flags, smem->device_addr,
+ smem->bitmap_index, atomic_read(&smem->refcount),
+ name, smem->buf_idx, smem->fd);
+static void print_internal_buffer(u32 tag, const char *str,
+ struct msm_cvp_inst *inst, struct cvp_internal_buf *cbuf)
+ if (!(tag & msm_cvp_debug) || !inst || !cbuf)
+ if (cbuf->smem->dma_buf) {
+ "%s: %x : fd %d off %d 0x%llx %s size %d iova %#x\n",
+ str, hash32_ptr(inst->session), cbuf->fd,
+ cbuf->offset, cbuf->smem->dma_buf, cbuf->smem->dma_buf->name,
+ cbuf->size, cbuf->smem->device_addr);
+ "%s: %x : idx %2d fd %d off %d size %d iova %#x\n",
+ str, hash32_ptr(inst->session), cbuf->index, cbuf->fd,
+ cbuf->offset, cbuf->size, cbuf->smem->device_addr);
+void print_cvp_buffer(u32 tag, const char *str, struct msm_cvp_inst *inst,
+ struct cvp_internal_buf *cbuf)
+ if (!inst || !cbuf) {
+ "%s Invalid params inst %pK, cbuf %pK\n",
+ str, inst, cbuf);
+ print_smem(tag, str, inst, cbuf->smem);
+static void _log_smem(struct inst_snapshot *snapshot, struct msm_cvp_inst *inst,
+ struct msm_cvp_smem *smem, bool logging)
+ if (print_smem(CVP_ERR, "bufdump", inst, smem))
+ if (!logging || !snapshot)
+ if (snapshot && snapshot->smem_index < MAX_ENTRIES) {
+ struct smem_data *s;
+ s = &snapshot->smem_log[snapshot->smem_index];
+ snapshot->smem_index++;
+ s->size = smem->size;
+ s->flags = smem->flags;
+ s->device_addr = smem->device_addr;
+ s->bitmap_index = smem->bitmap_index;
+ s->refcount = atomic_read(&smem->refcount);
+ s->pkt_type = smem->pkt_type;
+ s->buf_idx = smem->buf_idx;
+static void _log_buf(struct inst_snapshot *snapshot, enum smem_prop prop,
+ struct msm_cvp_inst *inst, struct cvp_internal_buf *cbuf,
+ bool logging)
+ struct cvp_buf_data *buf = NULL;
+ print_cvp_buffer(CVP_ERR, "bufdump", inst, cbuf);
+ if (!logging)
+ if (snapshot) {
+ if (prop == SMEM_CDSP && snapshot->dsp_index < MAX_ENTRIES) {
+ index = snapshot->dsp_index;
+ buf = &snapshot->dsp_buf_log[index];
+ snapshot->dsp_index++;
+ } else if (prop == SMEM_PERSIST &&
+ snapshot->persist_index < MAX_ENTRIES) {
+ index = snapshot->persist_index;
+ buf = &snapshot->persist_buf_log[index];
+ snapshot->persist_index++;
+ if (buf) {
+ buf->device_addr = cbuf->smem->device_addr;
+ buf->size = cbuf->size;
+void print_client_buffer(u32 tag, const char *str,
+ struct msm_cvp_inst *inst, struct eva_kmd_buffer *cbuf)
+ if (!(tag & msm_cvp_debug) || !str || !inst || !cbuf)
+ "%s: %x : idx %2d fd %d off %d size %d type %d flags 0x%x reserved[0] %u\n",
+ cbuf->offset, cbuf->size, cbuf->type, cbuf->flags,
+ cbuf->reserved[0]);
+static bool __is_buf_valid(struct msm_cvp_inst *inst,
+ struct eva_kmd_buffer *buf)
+ struct cvp_internal_buf *cbuf = (struct cvp_internal_buf *)0xdeadbeef;
+ if (!inst || !inst->core || !buf) {
+ if (buf->fd < 0) {
+ dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
+ if (buf->offset) {
+ "%s: offset is deprecated, set to 0.\n",
+ mutex_lock(&inst->cvpdspbufs.lock);
+ list_for_each_entry(cbuf, &inst->cvpdspbufs.list, list) {
+ if (cbuf->fd == buf->fd) {
+ if (cbuf->size != buf->size) {
+ dprintk(CVP_ERR, "%s: buf size mismatch\n",
+ mutex_unlock(&inst->cvpdspbufs.lock);
+ if (found) {
+ print_internal_buffer(CVP_ERR, "duplicate", inst, cbuf);
+static struct file *msm_cvp_fget(unsigned int fd, struct task_struct *task,
+ fmode_t mask, unsigned int refs)
+ struct files_struct *files = task->files;
+ struct file *file;
+ if (!files)
+ rcu_read_lock();
+loop:
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0))
+ file = fcheck_files(files, fd);
+ file = files_lookup_fd_rcu(files, fd);
+ if (file) {
+ /* File object ref couldn't be taken.
+ * dup2() atomicity guarantee is the reason
+ * we loop to catch the new file (or NULL pointer)
+ if (file->f_mode & mask)
+ file = NULL;
+ else if (!get_file_rcu(file))
+ goto loop;
+ rcu_read_unlock();
+ return file;
+static struct dma_buf *cvp_dma_buf_get(struct file *file, int fd,
+ struct task_struct *task)
+ if (file->f_op != gfa_cv.dmabuf_f_op) {
+ dprintk(CVP_WARN, "fd doesn't refer to dma_buf\n");
+ return ERR_PTR(-EINVAL);
+ return file->private_data;
+int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf)
+ struct cvp_internal_buf *cbuf = NULL;
+ struct msm_cvp_smem *smem = NULL;
+ struct dma_buf *dma_buf = NULL;
+ if (!__is_buf_valid(inst, buf))
+ if (!inst->task)
+ file = msm_cvp_fget(buf->fd, inst->task, FMODE_PATH, 1);
+ if (file == NULL) {
+ dprintk(CVP_WARN, "%s fail to get file from fd %d %s\n", __func__, buf->fd, inst->proc_name);
+ dma_buf = cvp_dma_buf_get(
+ file,
+ buf->fd,
+ inst->task);
+ if (dma_buf == ERR_PTR(-EINVAL)) {
+ if (dma_buf->size < buf->size) {
+ dprintk(CVP_ERR, "%s DSP client buffer too large %d > %d\n",
+ __func__, buf->size, dma_buf->size);
+ dprintk(CVP_MEM, "dma_buf from internal %llu\n", dma_buf);
+ cbuf = cvp_kmem_cache_zalloc(&cvp_driver->buf_cache, GFP_KERNEL);
+ if (!cbuf) {
+ smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
+ smem->dma_buf = dma_buf;
+ smem->bitmap_index = MAX_DMABUF_NUMS;
+ smem->pkt_type = 0;
+ smem->buf_idx = 0;
+ smem->fd = buf->fd;
+ dprintk(CVP_MEM, "%s: dma_buf = %llx\n", __func__, dma_buf);
+ rc = msm_cvp_map_smem(inst, smem, "map dsp");
+ print_client_buffer(CVP_ERR, "map failed", inst, buf);
+ atomic_inc(&smem->refcount);
+ cbuf->smem = smem;
+ cbuf->fd = buf->fd;
+ cbuf->size = buf->size;
+ cbuf->offset = buf->offset;
+ cbuf->ownership = CLIENT;
+ cbuf->index = buf->index;
+ buf->reserved[0] = (uint32_t)smem->device_addr;
+ list_add_tail(&cbuf->list, &inst->cvpdspbufs.list);
+ fput(file);
+ if (smem) {
+ if (smem->device_addr)
+ msm_cvp_unmap_smem(inst, smem, "unmap dsp");
+ msm_cvp_smem_put_dma_buf(smem->dma_buf);
+ cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
+ if (cbuf)
+ cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
+int msm_cvp_unmap_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf)
+ bool found;
+ found = false;
+ print_client_buffer(CVP_ERR, "invalid", inst, buf);
+ if (cbuf->smem->device_addr) {
+ u64 idx = inst->unused_dsp_bufs.ktid;
+ inst->unused_dsp_bufs.smem[idx] = *(cbuf->smem);
+ inst->unused_dsp_bufs.nr++;
+ inst->unused_dsp_bufs.nr =
+ (inst->unused_dsp_bufs.nr > MAX_FRAME_BUFFER_NUMS)?
+ MAX_FRAME_BUFFER_NUMS : inst->unused_dsp_bufs.nr;
+ inst->unused_dsp_bufs.ktid = ++idx % MAX_FRAME_BUFFER_NUMS;
+ msm_cvp_unmap_smem(inst, cbuf->smem, "unmap dsp");
+ msm_cvp_smem_put_dma_buf(cbuf->smem->dma_buf);
+ atomic_dec(&cbuf->smem->refcount);
+ list_del(&cbuf->list);
+ cvp_kmem_cache_free(&cvp_driver->smem_cache, cbuf->smem);
+int msm_cvp_map_buf_wncc(struct msm_cvp_inst *inst,
+ struct cvp_internal_buf* cbuf = (struct cvp_internal_buf *)0xdeadbeef;
+ struct msm_cvp_smem* smem = NULL;
+ struct dma_buf* dma_buf = NULL;
+ if (!inst->session) {
+ dprintk(CVP_ERR, "%s: invalid session", __func__);
+ if (buf->index) {
+ dprintk(CVP_ERR, "%s: buf index is NOT 0 fd=%d",
+ __func__, buf->fd);
+ dprintk(CVP_ERR, "%s: invalid fd = %d", __func__, buf->fd);
+ dprintk(CVP_ERR, "%s: offset is not supported, set to 0.",
+ mutex_lock(&inst->cvpwnccbufs.lock);
+ list_for_each_entry(cbuf, &inst->cvpwnccbufs.list, list) {
+ dprintk(CVP_ERR, "%s: buf size mismatch",
+ mutex_unlock(&inst->cvpwnccbufs.lock);
+ dma_buf = msm_cvp_smem_get_dma_buf(buf->fd);
+ msm_cvp_smem_put_dma_buf(dma_buf);
+ dprintk(CVP_MEM, "%s: dma_buf = %llx", __func__, dma_buf);
+ rc = msm_cvp_map_smem(inst, smem, "map wncc");
+ dprintk(CVP_ERR, "%s: map failed", __func__);
+ print_client_buffer(CVP_ERR, __func__, inst, buf);
+ /* Added for PreSil/RUMI testing */
+#ifdef USE_PRESIL
+ dprintk(CVP_DBG,
+ "wncc buffer is %x for cam_presil_send_buffer"
+ " with MAP_ADDR_OFFSET %x",
+ (u64)(smem->device_addr) - MAP_ADDR_OFFSET, MAP_ADDR_OFFSET);
+ cam_presil_send_buffer((u64)smem->dma_buf, 0,
+ (u32)cbuf->offset, (u32)cbuf->size,
+ (u64)(smem->device_addr) - MAP_ADDR_OFFSET);
+ if (inst->cvpwnccbufs_table == NULL) {
+ inst->cvpwnccbufs_table =
+ (struct msm_cvp_wncc_buffer*) kzalloc(
+ sizeof(struct msm_cvp_wncc_buffer) *
+ EVA_KMD_WNCC_MAX_SRC_BUFS,
+ if (!inst->cvpwnccbufs_table) {
+ for (i = 0; i < EVA_KMD_WNCC_MAX_SRC_BUFS; i++)
+ if (inst->cvpwnccbufs_table[i].iova == 0)
+ list_add_tail(&cbuf->list, &inst->cvpwnccbufs.list);
+ inst->cvpwnccbufs_num++;
+ inst->cvpwnccbufs_table[i].fd = buf->fd;
+ inst->cvpwnccbufs_table[i].iova = smem->device_addr;
+ inst->cvpwnccbufs_table[i].size = smem->size;
+ /* buf reserved[0] used to store wncc src buf id */
+ buf->reserved[0] = i + EVA_KMD_WNCC_SRC_BUF_ID_OFFSET;
+ /* cbuf ktid used to store wncc src buf id */
+ cbuf->ktid = i + EVA_KMD_WNCC_SRC_BUF_ID_OFFSET;
+ dprintk(CVP_MEM, "%s: wncc buf iova: 0x%08X",
+ __func__, inst->cvpwnccbufs_table[i].iova);
+ if (i == EVA_KMD_WNCC_MAX_SRC_BUFS) {
+ "%s: wncc buf table full - max (%u) already registered",
+ __func__, EVA_KMD_WNCC_MAX_SRC_BUFS);
+ /* _wncc_print_cvpwnccbufs_table(inst); */
+ rc = -EDQUOT;
+ msm_cvp_unmap_smem(inst, smem, "unmap wncc");
+ cbuf = NULL;
+ smem = NULL;
+int msm_cvp_unmap_buf_wncc(struct msm_cvp_inst *inst,
+ uint32_t buf_id, buf_idx;
+ buf_id = buf->reserved[0];
+ if (buf_id < EVA_KMD_WNCC_SRC_BUF_ID_OFFSET || buf_id >=
+ (EVA_KMD_WNCC_MAX_SRC_BUFS + EVA_KMD_WNCC_SRC_BUF_ID_OFFSET)) {
+ dprintk(CVP_ERR, "%s: invalid buffer id %d",
+ __func__, buf->reserved[0]);
+ if (inst->cvpwnccbufs_num == 0) {
+ dprintk(CVP_ERR, "%s: no wncc buffers currently mapped", __func__);
+ buf_idx = buf_id - EVA_KMD_WNCC_SRC_BUF_ID_OFFSET;
+ if (inst->cvpwnccbufs_table[buf_idx].iova == 0) {
+ dprintk(CVP_ERR, "%s: buffer id %d not found",
+ __func__, buf_id);
+ buf->fd = inst->cvpwnccbufs_table[buf_idx].fd;
+ _wncc_print_cvpwnccbufs_table(inst);
+ u64 idx = inst->unused_wncc_bufs.ktid;
+ inst->unused_wncc_bufs.smem[idx] = *(cbuf->smem);
+ inst->unused_wncc_bufs.nr++;
+ inst->unused_wncc_bufs.nr =
+ (inst->unused_wncc_bufs.nr > NUM_WNCC_BUFS)?
+ NUM_WNCC_BUFS : inst->unused_wncc_bufs.nr;
+ inst->unused_wncc_bufs.ktid = ++idx % NUM_WNCC_BUFS;
+ msm_cvp_unmap_smem(inst, cbuf->smem, "unmap wncc");
+ inst->cvpwnccbufs_table[buf_idx].fd = 0;
+ inst->cvpwnccbufs_table[buf_idx].iova = 0;
+ inst->cvpwnccbufs_table[buf_idx].size = 0;
+ inst->cvpwnccbufs_num--;
+ kfree(inst->cvpwnccbufs_table);
+ inst->cvpwnccbufs_table = NULL;
+static void _wncc_print_oob(struct eva_kmd_oob_wncc* wncc_oob)
+ u32 i, j;
+ if (!wncc_oob) {
+ dprintk(CVP_DBG, "%s: wncc OOB --", __func__);
+ dprintk(CVP_DBG, "%s: num_layers: %u", __func__, wncc_oob->num_layers);
+ for (i = 0; i < wncc_oob->num_layers; i++) {
+ dprintk(CVP_DBG, "%s: layers[%u].num_addrs: %u",
+ __func__, i, wncc_oob->layers[i].num_addrs);
+ for (j = 0; j < wncc_oob->layers[i].num_addrs; j++) {
+ "%s: layers[%u].addrs[%u]: %04u 0x%08x",
+ __func__, i, j,
+ wncc_oob->layers[i].addrs[j].buffer_id,
+ wncc_oob->layers[i].addrs[j].offset);
+static void _wncc_print_cvpwnccbufs_table(struct msm_cvp_inst* inst)
+ u32 i, entries = 0;
+ dprintk(CVP_DBG, "%s: wncc buffer look-up table is empty",
+ dprintk(CVP_DBG, "%s: wncc buffer table:", __func__);
+ for (i = 0; i < EVA_KMD_WNCC_MAX_SRC_BUFS &&
+ entries < inst->cvpwnccbufs_num; i++) {
+ if (inst->cvpwnccbufs_table[i].iova != 0) {
+ "%s: buf_idx=%04d --> "
+ "fd=%03d, iova=0x%08x, size=%d",
+ __func__, i,
+ inst->cvpwnccbufs_table[i].fd,
+ inst->cvpwnccbufs_table[i].iova,
+ inst->cvpwnccbufs_table[i].size);
+ entries++;
+static void _wncc_print_metadata_buf(u32 num_layers, u32 num_addrs,
+ struct eva_kmd_wncc_metadata** wncc_metadata)
+ u32 i, j, iova;
+ if (num_layers < 1 || num_layers > EVA_KMD_WNCC_MAX_LAYERS ||
+ !wncc_metadata) {
+ dprintk(CVP_DBG, "%s: wncc metadata buffers --", __func__);
+ dprintk(CVP_DBG, "%s: num_layers: %u", __func__, num_layers);
+ dprintk(CVP_DBG, "%s: num_addrs: %u", __func__, num_addrs);
+ for (i = 0; i < num_layers; i++) {
+ for (j = 0; j < num_addrs; j++) {
+ iova = (wncc_metadata[i][j].iova_msb << 22) |
+ wncc_metadata[i][j].iova_lsb;
+ "%s: wncc_metadata[%u][%u]: "
+ "%4u %3u %4u %3u 0x%08x %1u %4d %4d %4d %4d",
+ wncc_metadata[i][j].loc_x_dec,
+ wncc_metadata[i][j].loc_x_frac,
+ wncc_metadata[i][j].loc_y_dec,
+ wncc_metadata[i][j].loc_y_frac,
+ iova,
+ wncc_metadata[i][j].scale_idx,
+ wncc_metadata[i][j].aff_coeff_3,
+ wncc_metadata[i][j].aff_coeff_2,
+ wncc_metadata[i][j].aff_coeff_1,
+ wncc_metadata[i][j].aff_coeff_0);
+static int _wncc_copy_oob_from_user(struct eva_kmd_hfi_packet* in_pkt,
+ struct eva_kmd_oob_wncc* wncc_oob)
+ u32 oob_type = 0;
+ struct eva_kmd_oob_buf* oob_buf_u;
+ struct eva_kmd_oob_wncc* wncc_oob_u;
+ struct eva_kmd_oob_wncc* wncc_oob_k;
+ unsigned int i;
+ u32 num_addrs;
+ if (!in_pkt || !wncc_oob) {
+ oob_buf_u = in_pkt->oob_buf;
+ if (!access_ok(oob_buf_u, sizeof(*oob_buf_u))) {
+ dprintk(CVP_ERR, "%s: invalid OOB buf pointer", __func__);
+ if (!access_ok(&oob_buf_u->oob_type, sizeof(oob_buf_u->oob_type))) {
+ "%s: bad OOB buf pointer, oob_type inaccessible",
+ rc = get_user(oob_type, &oob_buf_u->oob_type);
+ if (oob_type != EVA_KMD_OOB_WNCC) {
+ dprintk(CVP_ERR, "%s: incorrect OOB type (%d) for wncc",
+ __func__, oob_type);
+ wncc_oob_u = &oob_buf_u->wncc;
+ wncc_oob_k = wncc_oob;
+ if (!access_ok(&wncc_oob_u->metadata_bufs_offset,
+ sizeof(wncc_oob_u->metadata_bufs_offset))) {
+ "%s: bad OOB buf pointer, wncc.metadata_bufs_offset inaccessible",
+ rc = get_user(wncc_oob_k->metadata_bufs_offset,
+ &wncc_oob_u->metadata_bufs_offset);
+ if (wncc_oob_k->metadata_bufs_offset > ((sizeof(in_pkt->pkt_data)
+ - sizeof(struct cvp_buf_type)) / sizeof(__u32))) {
+ dprintk(CVP_ERR, "%s: invalid wncc metadata bufs offset",
+ if (!access_ok(&wncc_oob_u->num_layers,
+ sizeof(wncc_oob_u->num_layers))) {
+ "%s: bad OOB buf pointer, wncc.num_layers inaccessible",
+ rc = get_user(wncc_oob_k->num_layers, &wncc_oob_u->num_layers);
+ if (wncc_oob_k->num_layers < 1 ||
+ wncc_oob_k->num_layers > EVA_KMD_WNCC_MAX_LAYERS) {
+ dprintk(CVP_ERR, "%s: invalid wncc num layers", __func__);
+ for (i = 0; i < wncc_oob_k->num_layers; i++) {
+ if (!access_ok(&wncc_oob_u->layers[i].num_addrs,
+ sizeof(wncc_oob_u->layers[i].num_addrs))) {
+ "%s: bad OOB buf pointer, wncc.layers[%u].num_addrs inaccessible",
+ rc = get_user(wncc_oob_k->layers[i].num_addrs,
+ &wncc_oob_u->layers[i].num_addrs);
+ num_addrs = wncc_oob_k->layers[i].num_addrs;
+ if (num_addrs < 1 || num_addrs > EVA_KMD_WNCC_MAX_ADDRESSES) {
+ "%s: invalid wncc num addrs for layer %u",
+ if (!access_ok(wncc_oob_u->layers[i].addrs,
+ num_addrs * sizeof(struct eva_kmd_wncc_addr)) ||
+ !access_ok(&wncc_oob_u->layers[i].addrs[num_addrs - 1],
+ sizeof(struct eva_kmd_wncc_addr))) {
+ "%s: bad OOB buf pointer, wncc.layers[%u].addrs inaccessible",
+ rc = copy_from_user(wncc_oob_k->layers[i].addrs,
+ wncc_oob_u->layers[i].addrs,
+ num_addrs * sizeof(struct eva_kmd_wncc_addr));
+ if (false)
+ _wncc_print_oob(wncc_oob);
+static int _wncc_map_metadata_bufs(struct eva_kmd_hfi_packet* in_pkt,
+ struct cvp_buf_type* wncc_metadata_bufs;
+ struct dma_buf* dmabuf;
+ struct eva_buf_map map;
+ __u32 num_layers, metadata_bufs_offset;
+ _buf_map_set_vaddr(&map, (void *)0xdeadbeaf);
+ if (!in_pkt || !wncc_metadata || !wncc_oob) {
+ num_layers = wncc_oob->num_layers;
+ metadata_bufs_offset = wncc_oob->metadata_bufs_offset;
+ if (num_layers < 1 || num_layers > EVA_KMD_WNCC_MAX_LAYERS) {
+ if (metadata_bufs_offset > ((sizeof(in_pkt->pkt_data)
+ - num_layers * sizeof(struct cvp_buf_type)) / sizeof(__u32))) {
+ wncc_metadata_bufs = (struct cvp_buf_type*)
+ &in_pkt->pkt_data[metadata_bufs_offset];
+ dmabuf = dma_buf_get(wncc_metadata_bufs[i].fd);
+ if (IS_ERR(dmabuf)) {
+ rc = PTR_ERR(dmabuf);
+ "%s: dma_buf_get() failed for "
+ "wncc_metadata_bufs[%d], rc %d",
+ __func__, i, rc);
+ if (dmabuf->size < wncc_oob->layers[i].num_addrs *
+ sizeof(struct eva_kmd_wncc_metadata)) {
+ "%s: wncc_metadata_bufs[%d] size insufficient for num addrs in oob",
+ dma_buf_put(dmabuf);
+ rc = dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
+ "%s: dma_buf_begin_cpu_access() failed "
+ "for wncc_metadata_bufs[%d], rc %d",
+ rc = dma_buf_vmap(dmabuf, &map);
+ "%s: dma_buf_vmap() failed for "
+ "wncc_metadata_bufs[%d]",
+ dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
+ "%s: wncc_metadata_bufs[%d] map.is_iomem is %d",
+ __func__, i, map.is_iomem);
+ wncc_metadata[i] = (struct eva_kmd_wncc_metadata*)map.vaddr;
+ _wncc_unmap_metadata_bufs(in_pkt, wncc_oob, wncc_metadata);
+ if (!wncc_metadata[i]) {
+ rc = -PTR_ERR(dmabuf);
+ _buf_map_set_vaddr(&map, wncc_metadata[i]);
+ dma_buf_vunmap(dmabuf, &map);
+ wncc_metadata[i] = NULL;
+ rc = dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
+ "%s: dma_buf_end_cpu_access() failed "
+static int init_wncc_bufs(void)
+ for (i = 0; i < NUM_WNCC_BUFS; i++) {
+ wncc_buf_pool.bufs[i] = (struct eva_kmd_oob_wncc*)kzalloc(
+ sizeof(struct eva_kmd_oob_wncc), GFP_KERNEL);
+ if (!wncc_buf_pool.bufs[i]) {
+ i--;
+ goto exit_fail;
+ wncc_buf_pool.used_bitmap = 0;
+ wncc_buf_pool.allocated = true;
+exit_fail:
+ while (i >= 0) {
+ kfree(wncc_buf_pool.bufs[i]);
+static int alloc_wncc_buf(struct wncc_oob_buf *wob)
+ int rc, i;
+ mutex_lock(&wncc_buf_pool.lock);
+ if (!wncc_buf_pool.allocated) {
+ rc = init_wncc_bufs();
+ mutex_unlock(&wncc_buf_pool.lock);
+ if (!(wncc_buf_pool.used_bitmap & BIT(i))) {
+ wncc_buf_pool.used_bitmap |= BIT(i);
+ wob->bitmap_idx = i;
+ wob->buf = wncc_buf_pool.bufs[i];
+ wob->bitmap_idx = 0xff;
+ wob->buf = (struct eva_kmd_oob_wncc*)kzalloc(
+ if (!wob->buf)
+static void free_wncc_buf(struct wncc_oob_buf *wob)
+ if (!wob)
+ if (wob->bitmap_idx == 0xff) {
+ kfree(wob->buf);
+ if (wob->bitmap_idx < NUM_WNCC_BUFS) {
+ wncc_buf_pool.used_bitmap &= ~BIT(wob->bitmap_idx);
+ memset(wob->buf, 0, sizeof(struct eva_kmd_oob_wncc));
+ wob->buf = NULL;
+static int msm_cvp_proc_oob_wncc(struct msm_cvp_inst* inst,
+ struct eva_kmd_hfi_packet* in_pkt)
+ struct eva_kmd_oob_wncc* wncc_oob;
+ struct wncc_oob_buf wob;
+ struct eva_kmd_wncc_metadata* wncc_metadata[EVA_KMD_WNCC_MAX_LAYERS];
+ unsigned int i, j;
+ bool empty = false;
+ u32 buf_id, buf_idx, buf_offset, iova;
+ rc = alloc_wncc_buf(&wob);
+ wncc_oob = wob.buf;
+ rc = _wncc_copy_oob_from_user(in_pkt, wncc_oob);
+ dprintk(CVP_ERR, "%s: OOB buf copying failed", __func__);
+ memset(wncc_metadata, 0,
+ sizeof(*wncc_metadata) * EVA_KMD_WNCC_MAX_LAYERS);
+ rc = _wncc_map_metadata_bufs(in_pkt, wncc_oob, wncc_metadata);
+ dprintk(CVP_ERR, "%s: failed to map wncc metadata bufs",
+ if (inst->cvpwnccbufs_num == 0 || inst->cvpwnccbufs_table == NULL) {
+ dprintk(CVP_ERR, "%s: no wncc bufs currently mapped", __func__);
+ empty = true;
+ for (i = 0; !empty && i < wncc_oob->num_layers; i++) {
+ buf_id = wncc_oob->layers[i].addrs[j].buffer_id;
+ if (buf_id < EVA_KMD_WNCC_SRC_BUF_ID_OFFSET ||
+ buf_id >= (EVA_KMD_WNCC_SRC_BUF_ID_OFFSET +
+ EVA_KMD_WNCC_MAX_SRC_BUFS)) {
+ "%s: invalid wncc buf id %u "
+ "in layer #%u address #%u",
+ __func__, buf_id, i, j);
+ "%s: unmapped wncc buf id %u "
+ buf_offset = wncc_oob->layers[i].addrs[j].offset;
+ if (buf_offset >=
+ inst->cvpwnccbufs_table[buf_idx].size) {
+ /* NOTE: This buffer offset validation is
+ * not comprehensive since wncc src image
+ * resolution information is not known to
+ * KMD. UMD is responsible for comprehensive
+ * validation.
+ "%s: invalid wncc buf offset %u "
+ __func__, buf_offset, i, j);
+ iova = inst->cvpwnccbufs_table[buf_idx].iova +
+ buf_offset;
+ wncc_metadata[i][j].iova_lsb = iova;
+ wncc_metadata[i][j].iova_msb = iova >> 22;
+ _wncc_print_metadata_buf(wncc_oob->num_layers,
+ wncc_oob->layers[0].num_addrs, wncc_metadata);
+ if (_wncc_unmap_metadata_bufs(in_pkt, wncc_oob, wncc_metadata)) {
+ dprintk(CVP_ERR, "%s: failed to unmap wncc metadata bufs",
+ free_wncc_buf(&wob);
+int msm_cvp_proc_oob(struct msm_cvp_inst* inst,
+ struct cvp_hfi_cmd_session_hdr* cmd_hdr =
+ (struct cvp_hfi_cmd_session_hdr*)in_pkt;
+ switch (cmd_hdr->packet_type) {
+ case HFI_CMD_SESSION_CVP_WARP_NCC_FRAME:
+ rc = msm_cvp_proc_oob_wncc(inst, in_pkt);
+void msm_cvp_cache_operations(struct msm_cvp_smem *smem, u32 type,
+ u32 offset, u32 size)
+ enum smem_cache_ops cache_op;
+ if (msm_cvp_cacheop_disabled)
+ case EVA_KMD_BUFTYPE_INPUT:
+ cache_op = SMEM_CACHE_CLEAN;
+ case EVA_KMD_BUFTYPE_OUTPUT:
+ cache_op = SMEM_CACHE_INVALIDATE;
+ cache_op = SMEM_CACHE_CLEAN_INVALIDATE;
+ "%s: cache operation enabled for dma_buf: %llx, cache_op: %d, offset: %d, size: %d\n",
+ __func__, smem->dma_buf, cache_op, offset, size);
+ msm_cvp_smem_cache_operations(smem->dma_buf, cache_op, offset, size);
+static struct msm_cvp_smem *msm_cvp_session_find_smem(struct msm_cvp_inst *inst,
+ struct dma_buf *dma_buf,
+ u32 pkt_type)
+ struct msm_cvp_smem *smem;
+ struct msm_cvp_frame *frame = (struct msm_cvp_frame *)0xdeadbeef;
+ struct cvp_internal_buf *buf = (struct cvp_internal_buf *)0xdeadbeef;
+ if (inst->dma_cache.nr > MAX_DMABUF_NUMS)
+ mutex_lock(&inst->dma_cache.lock);
+ for (i = 0; i < inst->dma_cache.nr; i++)
+ if (inst->dma_cache.entries[i]->dma_buf == dma_buf) {
+ SET_USE_BITMAP(i, inst);
+ smem = inst->dma_cache.entries[i];
+ smem->bitmap_index = i;
+ smem->pkt_type = pkt_type;
+ * If we find it, it means we already increased
+ * refcount before, so we put it to avoid double
+ * incremental.
+ mutex_unlock(&inst->dma_cache.lock);
+ print_smem(CVP_MEM, "found in cache", inst, smem);
+ return smem;
+ /* earch persist list */
+ mutex_lock(&inst->persistbufs.lock);
+ list_for_each_entry(buf, &inst->persistbufs.list, list) {
+ smem = buf->smem;
+ if (smem && smem->dma_buf == dma_buf) {
+ mutex_unlock(&inst->persistbufs.lock);
+ print_smem(CVP_MEM, "found in persist", inst, smem);
+ /* Search frame list */
+ mutex_lock(&inst->frames.lock);
+ list_for_each_entry(frame, &inst->frames.list, list) {
+ for (i = 0; i < frame->nr; i++) {
+ smem = frame->bufs[i].smem;
+ mutex_unlock(&inst->frames.lock);
+ print_smem(CVP_MEM, "found in frame",
+static int msm_cvp_session_add_smem(struct msm_cvp_inst *inst,
+ struct msm_cvp_smem *smem2;
+ if (inst->dma_cache.nr < MAX_DMABUF_NUMS) {
+ inst->dma_cache.entries[inst->dma_cache.nr] = smem;
+ SET_USE_BITMAP(inst->dma_cache.nr, inst);
+ smem->bitmap_index = inst->dma_cache.nr;
+ inst->dma_cache.nr++;
+ i = smem->bitmap_index;
+ i = find_first_zero_bit(&inst->dma_cache.usage_bitmap,
+ MAX_DMABUF_NUMS);
+ if (i < MAX_DMABUF_NUMS) {
+ smem2 = inst->dma_cache.entries[i];
+ msm_cvp_unmap_smem(inst, smem2, "unmap cpu");
+ msm_cvp_smem_put_dma_buf(smem2->dma_buf);
+ cvp_kmem_cache_free(&cvp_driver->smem_cache, smem2);
+ inst->dma_cache.entries[i] = smem;
+ "%s: reached limit, fallback to buf mapping list\n"
+ , __func__);
+ dprintk(CVP_MEM, "Add entry %d into cache\n", i);
+static struct msm_cvp_smem *msm_cvp_session_get_smem(struct msm_cvp_inst *inst,
+ struct cvp_buf_type *buf,
+ bool is_persist,
+ int rc = 0, found = 1;
+ if (is_persist) {
+ if (!smem)
+ smem->flags |= SMEM_PERSIST;
+ rc = msm_cvp_map_smem(inst, smem, "map cpu");
+ if (!IS_CVP_BUF_VALID(buf, smem)) {
+ "%s: invalid offset %d or size %d persist\n",
+ __func__, buf->offset, buf->size);
+ goto exit2;
+ smem = msm_cvp_session_find_smem(inst, dma_buf, pkt_type);
+ found = 0;
+ if (is_params_pkt(pkt_type))
+ "%s: invalid buf %d %d fd %d dma 0x%llx %s %d type %#x\n",
+ __func__, buf->offset, buf->size, buf->fd,
+ dma_buf, dma_buf->name, dma_buf->size, pkt_type);
+ rc = msm_cvp_session_add_smem(inst, smem);
+ if (rc && rc != -ENOMEM)
+ dprintk(CVP_ERR, "%s: invalid offset %d or size %d found\n",
+ atomic_dec(&smem->refcount);
+exit2:
+ msm_cvp_unmap_smem(inst, smem, "unmap cpu");
+static int msm_cvp_unmap_user_persist_buf(struct msm_cvp_inst *inst,
+ u32 pkt_type, u32 buf_idx, u32 *iova)
+ struct list_head *ptr;
+ struct list_head *next;
+ struct cvp_internal_buf *pbuf;
+ if (!dma_buf)
+ list_for_each_safe(ptr, next, &inst->persistbufs.list) {
+ if (!ptr) {
+ pbuf = list_entry(ptr, struct cvp_internal_buf, list);
+ if (dma_buf == pbuf->smem->dma_buf && (pbuf->smem->flags & SMEM_PERSIST)) {
+ *iova = pbuf->smem->device_addr;
+ "Unmap persist fd %d, dma_buf %#llx iova %#x\n",
+ pbuf->fd, pbuf->smem->dma_buf, *iova);
+ list_del(&pbuf->list);
+ if (*iova) {
+ msm_cvp_unmap_smem(inst, pbuf->smem, "unmap user persist");
+ msm_cvp_smem_put_dma_buf(pbuf->smem->dma_buf);
+ pbuf->smem->device_addr = 0;
+ pbuf->smem = NULL;
+ cvp_kmem_cache_free(&cvp_driver->buf_cache, pbuf);
+ dma_buf_put(dma_buf);
+static int msm_cvp_map_user_persist_buf(struct msm_cvp_inst *inst,
+ int ret;
+ if (!inst->persistbufs.list.next) {
+ if (!ptr)
+ if (dma_buf == pbuf->smem->dma_buf) {
+ pbuf->size =
+ (pbuf->size >= buf->size) ?
+ pbuf->size : buf->size;
+ *iova = pbuf->smem->device_addr + buf->offset;
+ atomic_inc(&pbuf->smem->refcount);
+ "map persist Reuse fd %d, dma_buf %#llx\n",
+ pbuf->fd, pbuf->smem->dma_buf);
+ pbuf = cvp_kmem_cache_zalloc(&cvp_driver->buf_cache, GFP_KERNEL);
+ if (!pbuf) {
+ dprintk(CVP_ERR, "%s failed to allocate kmem obj\n",
+ smem = msm_cvp_session_get_smem(inst, buf, false, pkt_type);
+ smem = msm_cvp_session_get_smem(inst, buf, true, pkt_type);
+ ret = -ENOMEM;
+ smem->buf_idx = buf_idx;
+ pbuf->smem = smem;
+ pbuf->fd = buf->fd;
+ pbuf->size = buf->size;
+ pbuf->offset = buf->offset;
+ pbuf->ownership = CLIENT;
+ list_add_tail(&pbuf->list, &inst->persistbufs.list);
+ print_internal_buffer(CVP_MEM, "map persist", inst, pbuf);
+ *iova = smem->device_addr + buf->offset;
+ return ret;
+static u32 msm_cvp_map_frame_buf(struct msm_cvp_inst *inst,
+ struct msm_cvp_frame *frame,
+ u32 pkt_type, u32 buf_idx)
+ u32 iova = 0;
+ u32 nr;
+ u32 type;
+ if (!inst || !frame) {
+ nr = frame->nr;
+ if (nr == MAX_FRAME_BUFFER_NUMS) {
+ dprintk(CVP_ERR, "%s: max frame buffer reached\n", __func__);
+ frame->bufs[nr].fd = buf->fd;
+ frame->bufs[nr].smem = smem;
+ frame->bufs[nr].size = buf->size;
+ frame->bufs[nr].offset = buf->offset;
+ print_internal_buffer(CVP_MEM, "map cpu", inst, &frame->bufs[nr]);
+ frame->nr++;
+ type = EVA_KMD_BUFTYPE_INPUT | EVA_KMD_BUFTYPE_OUTPUT;
+ msm_cvp_cache_operations(smem, type, buf->offset, buf->size);
+ iova = smem->device_addr + buf->offset;
+ return iova;
+static void msm_cvp_unmap_frame_buf(struct msm_cvp_inst *inst,
+ struct msm_cvp_frame *frame)
+ struct cvp_internal_buf *buf;
+ type = EVA_KMD_BUFTYPE_OUTPUT;
+ for (i = 0; i < frame->nr; ++i) {
+ buf = &frame->bufs[i];
+ if (smem->bitmap_index >= MAX_DMABUF_NUMS) {
+ /* smem not in dmamap cache */
+ if (atomic_dec_and_test(&smem->refcount)) {
+ dma_heap_buffer_free(smem->dma_buf);
+ smem->buf_idx |= 0xdead0000;
+ buf->smem = NULL;
+ CLEAR_USE_BITMAP(smem->bitmap_index, inst);
+ print_smem(CVP_MEM, "Map dereference",
+ smem->buf_idx |= 0x10000000;
+ cvp_kmem_cache_free(&cvp_driver->frame_cache, frame);
+static void backup_frame_buffers(struct msm_cvp_inst *inst,
+ /* Save frame buffers before unmap them */
+ int i = frame->nr;
+ if (i == 0 || i > MAX_FRAME_BUFFER_NUMS)
+ inst->last_frame.ktid = frame->ktid;
+ inst->last_frame.nr = frame->nr;
+ if (frame->bufs[i].smem->bitmap_index < MAX_DMABUF_NUMS) {
+ * Frame buffer info can be found in dma_cache table,
+ * Skip saving
+ inst->last_frame.nr = 0;
+ inst->last_frame.smem[i] = *(frame->bufs[i].smem);
+ } while (i);
+void msm_cvp_unmap_frame(struct msm_cvp_inst *inst, u64 ktid)
+ struct msm_cvp_frame *frame = (struct msm_cvp_frame *)0xdeadbeef, *dummy1;
+ ktid &= (FENCE_BIT - 1);
+ dprintk(CVP_MEM, "%s: (%#x) unmap frame %llu\n",
+ __func__, hash32_ptr(inst->session), ktid);
+ list_for_each_entry_safe(frame, dummy1, &inst->frames.list, list) {
+ if (frame->ktid == ktid) {
+ list_del(&frame->list);
+ "pkt_type %08x sess_id %08x trans_id <> ktid %llu\n",
+ __func__, frame->pkt_type,
+ hash32_ptr(inst->session),
+ frame->ktid);
+ /* Save the previous frame mappings for debug */
+ backup_frame_buffers(inst, frame);
+ msm_cvp_unmap_frame_buf(inst, frame);
+ if (!found)
+ dprintk(CVP_WARN, "%s frame %llu not found!\n", __func__, ktid);
+ * Unmap persistent buffer before sending RELEASE_PERSIST_BUFFERS to FW
+ * This packet is sent after SESSION_STOP. The assumption is FW/HW will
+ * NOT access any of the 3 persist buffer.
+int msm_cvp_unmap_user_persist(struct msm_cvp_inst *inst,
+ unsigned int offset, unsigned int buf_num)
+ int i, ret;
+ if (!offset || !buf_num)
+ for (i = 0; i < buf_num; i++) {
+ buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
+ offset += sizeof(*buf) >> 2;
+ if (buf->fd < 0 || !buf->size)
+ ret = msm_cvp_unmap_user_persist_buf(inst, buf,
+ cmd_hdr->packet_type, i, &iova);
+ if (ret) {
+ "%s: buf %d unmap failed.\n",
+ buf->fd = iova;
+int msm_cvp_map_user_persist(struct msm_cvp_inst *inst,
+ ret = msm_cvp_map_user_persist_buf(inst, buf,
+ "%s: buf %d map failed.\n",
+int msm_cvp_map_frame(struct msm_cvp_inst *inst,
+ struct msm_cvp_frame *frame;
+ struct msm_cvp_inst *instance = (struct msm_cvp_inst *)0xdeadbeef;
+ ktid = atomic64_inc_return(&inst->core->kernel_trans_id);
+ cmd_hdr->client_data.kdata = ktid;
+ frame = cvp_kmem_cache_zalloc(&cvp_driver->frame_cache, GFP_KERNEL);
+ if (!frame)
+ frame->ktid = ktid;
+ frame->nr = 0;
+ frame->pkt_type = cmd_hdr->packet_type;
+ if (buf->fd < 0 || !buf->size) {
+ buf->fd = 0;
+ buf->size = 0;
+ iova = msm_cvp_map_frame_buf(inst, buf, frame, cmd_hdr->packet_type, i);
+ if (!iova) {
+ "%s: buf %d register failed.\n",
+ dprintk(CVP_ERR, "smem_leak_count %d\n", core->smem_leak_count);
+ mutex_lock(&core->lock);
+ list_for_each_entry(instance, &core->instances, list) {
+ msm_cvp_print_inst_bufs(instance, false);
+ list_add_tail(&frame->list, &inst->frames.list);
+ dprintk(CVP_MEM, "%s: map frame %llu\n", __func__, ktid);
+int msm_cvp_session_deinit_buffers(struct msm_cvp_inst *inst)
+ struct cvp_internal_buf *cbuf, *dummy;
+ struct eva_kmd_buffer buf;
+ struct list_head *ptr = (struct list_head *)0xdead;
+ struct list_head *next = (struct list_head *)0xdead;
+ cbuf = list_entry(ptr, struct cvp_internal_buf, list);
+ smem = cbuf->smem;
+ dprintk(CVP_ERR, "%s invalid persist smem\n", __func__);
+ if (cbuf->ownership != DRIVER) {
+ "%s: %x : fd %d %pK size %d",
+ "free user persistent", hash32_ptr(inst->session), cbuf->fd,
+ smem->dma_buf, cbuf->size);
+ * don't care refcount, has to remove mapping
+ * this is user persistent buffer
+ if (smem->device_addr) {
+ msm_cvp_unmap_smem(inst, smem,
+ "unmap persist");
+ msm_cvp_smem_put_dma_buf(
+ cbuf->smem->dma_buf);
+ smem->device_addr = 0;
+ cbuf->smem = NULL;
+ * DMM_PARAMS and WAP_NCC_PARAMS cases
+ * Leave dma_cache cleanup to unmap
+ for (i = 0; i < inst->dma_cache.nr; i++) {
+ if (atomic_read(&smem->refcount) == 0) {
+ print_smem(CVP_MEM, "free", inst, smem);
+ } else if (!(smem->flags & SMEM_PERSIST)) {
+ print_smem(CVP_WARN, "in use", inst, smem);
+ inst->dma_cache.entries[i] = NULL;
+ cbuf = (struct cvp_internal_buf *)0xdeadbeef;
+ list_for_each_entry_safe(cbuf, dummy, &inst->cvpdspbufs.list, list) {
+ print_internal_buffer(CVP_MEM, "remove dspbufs", inst, cbuf);
+ if (cbuf->ownership == CLIENT) {
+ } else if (cbuf->ownership == DSP) {
+ rc = cvp_dsp_fastrpc_unmap(inst->dsp_handle, cbuf);
+ "%s: failed to unmap buf from DSP\n",
+ rc = cvp_release_dsp_buffers(inst, cbuf);
+ "%s Fail to free buffer 0x%x\n",
+ if (inst->cvpwnccbufs_num != 0)
+ dprintk(CVP_WARN, "%s: cvpwnccbufs not empty, contains %d bufs",
+ __func__, inst->cvpwnccbufs_num);
+ list_for_each_entry_safe(cbuf, dummy, &inst->cvpwnccbufs.list, list) {
+ print_internal_buffer(CVP_MEM, "remove wnccbufs", inst, cbuf);
+ buf.fd = cbuf->fd;
+ buf.reserved[0] = cbuf->ktid;
+ msm_cvp_unmap_buf_wncc(inst, &buf);
+void msm_cvp_populate_dsp_buf_info(struct cvp_internal_buf *buf,
+ u32 session_id,
+ struct cvp_hfi_ops *dev_ops = (struct cvp_hfi_ops *) core->dev_ops;
+ struct iris_hfi_device *cvp_device = (struct iris_hfi_device *) dev_ops->hfi_device_data;
+ struct cvp_iface_q_info dsp_debugQ_info = cvp_device->dsp_iface_queues[DEBUG_Q];
+ struct cvp_dsp_trace_buf *trace_buf;
+ struct cvp_dsp_trace *dsp_debug_trace;
+ dsp_debug_trace = (struct cvp_dsp_trace *) dsp_debugQ_info.q_array.align_virtual_addr;
+ if (!dsp_debug_trace) {
+ dprintk(CVP_ERR, "dsp trace is NULL\n");
+ for (int session_idx = 0; session_idx < EVA_TRACE_MAX_SESSION_NUM; session_idx++) {
+ if (dsp_debug_trace->sessions[session_idx].session_id == session_id) {
+ u32 buf_cnt = dsp_debug_trace->sessions[session_idx].buf_cnt;
+ for (int buf_idx = 0; buf_idx < buf_cnt; buf_idx++) {
+ trace_buf = &dsp_debug_trace->sessions[session_idx].buf[buf_idx];
+ if (buf->smem->device_addr == trace_buf->iova) {
+ buf->smem->buf_idx = trace_buf->buf_idx;
+ buf->smem->pkt_type = trace_buf->pkt_type;
+ buf->smem->fd = trace_buf->fd;
+#define MAX_NUM_FRAMES_DUMP 4
+void msm_cvp_print_inst_bufs(struct msm_cvp_inst *inst, bool log)
+ struct inst_snapshot *snap = NULL;
+ int i = 0, c = 0;
+ // DSP trace related variables
+ session_id = hash32_ptr(session);
+ if (log && core->log.snapshot_index < 16) {
+ snap = &core->log.snapshot[core->log.snapshot_index];
+ snap->session = inst->session;
+ core->log.snapshot_index++;
+ dprintk(CVP_ERR, "%s - invalid param %pK\n",
+ __func__, inst);
+ "---Buffer details for inst: %pK %s of type: %d---\n",
+ inst, inst->proc_name, inst->session_type);
+ dprintk(CVP_ERR, "dma_cache entries %d\n", inst->dma_cache.nr);
+ if (inst->dma_cache.nr <= MAX_DMABUF_NUMS)
+ _log_smem(snap, inst, inst->dma_cache.entries[i], log);
+ dprintk(CVP_ERR, "frame buffer list\n");
+ i++;
+ if (i <= MAX_NUM_FRAMES_DUMP) {
+ dprintk(CVP_ERR, "frame no %d tid %llx bufs\n",
+ i, frame->ktid);
+ for (c = 0; c < frame->nr; c++)
+ _log_smem(snap, inst, frame->bufs[c].smem,
+ log);
+ if (i > MAX_NUM_FRAMES_DUMP)
+ dprintk(CVP_ERR, "Skipped %d frames' buffers\n",
+ (i - MAX_NUM_FRAMES_DUMP));
+ dprintk(CVP_ERR, "dsp buffer list:\n");
+ list_for_each_entry(buf, &inst->cvpdspbufs.list, list) {
+ // Populate DSP buffer info from debug queue to kernel instance
+ msm_cvp_populate_dsp_buf_info(buf, session, session_id, core);
+ // Log print buffer info
+ _log_buf(snap, SMEM_CDSP, inst, buf, log);
+ dprintk(CVP_ERR, "wncc buffer list:\n");
+ list_for_each_entry(buf, &inst->cvpwnccbufs.list, list)
+ print_cvp_buffer(CVP_ERR, "bufdump", inst, buf);
+ dprintk(CVP_ERR, "persist buffer list:\n");
+ list_for_each_entry(buf, &inst->persistbufs.list, list)
+ _log_buf(snap, SMEM_PERSIST, inst, buf, log);
+ dprintk(CVP_ERR, "last frame ktid %llx\n", inst->last_frame.ktid);
+ for (i = 0; i < inst->last_frame.nr; i++)
+ _log_smem(snap, inst, &inst->last_frame.smem[i], log);
+ dprintk(CVP_ERR, "unmapped wncc bufs\n");
+ for (i = 0; i < inst->unused_wncc_bufs.nr; i++)
+ _log_smem(snap, inst, &inst->unused_wncc_bufs.smem[i], log);
+ dprintk(CVP_ERR, "unmapped dsp bufs\n");
+ for (i = 0; i < inst->unused_dsp_bufs.nr; i++)
+ _log_smem(snap, inst, &inst->unused_dsp_bufs.smem[i], log);
+struct cvp_internal_buf *cvp_allocate_arp_bufs(struct msm_cvp_inst *inst,
+ u32 buffer_size)
+ struct msm_cvp_list *buf_list;
+ u32 smem_flags = SMEM_UNCACHED;
+ dprintk(CVP_ERR, "%s Invalid input\n", __func__);
+ buf_list = &inst->persistbufs;
+ if (!buffer_size)
+ /* If PERSIST buffer requires secure mapping, uncomment
+ * below flags setting
+ * smem_flags |= SMEM_SECURE | SMEM_NON_PIXEL;
+ buf = cvp_kmem_cache_zalloc(&cvp_driver->buf_cache, GFP_KERNEL);
+ if (!buf) {
+ dprintk(CVP_ERR, "%s Out of memory\n", __func__);
+ goto fail_kzalloc;
+ buf->smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
+ if (!buf->smem) {
+ goto err_no_smem;
+ buf->smem->flags = smem_flags;
+ rc = msm_cvp_smem_alloc(buffer_size, 1, 0, /* 0: no mapping in kernel space */
+ &(inst->core->resources), buf->smem);
+ dprintk(CVP_ERR, "Failed to allocate ARP memory\n");
+ buf->smem->pkt_type = buf->smem->buf_idx = 0;
+ atomic_inc(&buf->smem->refcount);
+ buf->size = buf->smem->size;
+ buf->type = HFI_BUFFER_INTERNAL_PERSIST_1;
+ buf->ownership = DRIVER;
+ mutex_lock(&buf_list->lock);
+ list_add_tail(&buf->list, &buf_list->list);
+ mutex_unlock(&buf_list->lock);
+ return buf;
+ cvp_kmem_cache_free(&cvp_driver->smem_cache, buf->smem);
+err_no_smem:
+ cvp_kmem_cache_free(&cvp_driver->buf_cache, buf);
+fail_kzalloc:
+int cvp_release_arp_buffers(struct msm_cvp_inst *inst)
+ dprintk(CVP_ERR, "Invalid instance pointer = %pK\n", inst);
+ dprintk(CVP_ERR, "Invalid core pointer = %pK\n", core);
+ dprintk(CVP_ERR, "Invalid device pointer = %pK\n", ops_tbl);
+ dprintk(CVP_MEM, "release persist buffer!\n");
+ /* Workaround for FW: release buffer means release all */
+ if (inst->state > MSM_CVP_CORE_INIT_DONE && inst->state <= MSM_CVP_CLOSE_DONE) {
+ rc = call_hfi_op(ops_tbl, session_release_buffers,
+ (void *)inst->session);
+ rc = wait_for_sess_signal_receipt(inst,
+ HAL_SESSION_RELEASE_BUFFER_DONE);
+ "%s: wait release_arp signal failed, rc %d\n",
+ dprintk_rl(CVP_WARN, "Fail to send Rel prst buf\n");
+ buf = list_entry(ptr, struct cvp_internal_buf, list);
+ dprintk(CVP_ERR, "%s invalid smem\n", __func__);
+ if (buf->ownership == DRIVER) {
+ "free arp", hash32_ptr(inst->session), buf->fd,
+ smem->dma_buf, buf->size);
+ list_del(&buf->list);
+ msm_cvp_smem_free(smem);
+int cvp_allocate_dsp_bufs(struct msm_cvp_inst *inst,
+ struct cvp_internal_buf *buf,
+ u32 buffer_size,
+ u32 secure_type)
+ if (!buf)
+ switch (secure_type) {
+ case 1:
+ smem_flags |= SMEM_SECURE | SMEM_PIXEL;
+ case 2:
+ smem_flags |= SMEM_SECURE | SMEM_NON_PIXEL;
+ dprintk(CVP_ERR, "%s Invalid secure_type %d\n",
+ __func__, secure_type);
+ dprintk(CVP_MEM, "%s smem_flags 0x%x\n", __func__, smem_flags);
+ goto fail_kzalloc_smem_cache;
+ rc = msm_cvp_smem_alloc(buffer_size, 1, 0,
+ dprintk(CVP_ERR, "Failed to allocate DSP buf\n");
+ dprintk(CVP_MEM, "%s dma_buf %pK\n", __func__, buf->smem->dma_buf);
+ buf->ownership = DSP;
+fail_kzalloc_smem_cache:
+int cvp_release_dsp_buffers(struct msm_cvp_inst *inst,
+ struct cvp_internal_buf *buf)
+ dprintk(CVP_ERR, "Invalid buffer pointer = %pK\n", inst);
+ if (buf->ownership == DSP) {
+ "%s: %x : fd %x %s size %d",
+ __func__, hash32_ptr(inst->session), buf->fd,
+ smem->dma_buf->name, buf->size);
+ "%s: wrong owner %d %x : fd %x %s size %d",
+ __func__, buf->ownership, hash32_ptr(inst->session),
+ buf->fd, smem->dma_buf->name, buf->size);
+int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
+ print_client_buffer(CVP_HFI, "register", inst, buf);
+ if (buf->index)
+ rc = msm_cvp_map_buf_dsp(inst, buf);
+ rc = msm_cvp_map_buf_wncc(inst, buf);
+ dprintk(CVP_DSP, "%s: fd %d, iova 0x%x\n", __func__,
+ buf->fd, buf->reserved[0]);
+int msm_cvp_unregister_buffer(struct msm_cvp_inst *inst,
+ print_client_buffer(CVP_HFI, "unregister", inst, buf);
+ rc = msm_cvp_unmap_buf_dsp(inst, buf);
+ rc = msm_cvp_unmap_buf_wncc(inst, buf);
@@ -0,0 +1,244 @@
+#ifndef _MSM_CVP_BUF_H_
+#define _MSM_CVP_BUF_H_
+#include <linux/poll.h>
+#include <linux/refcount.h>
+#define MAX_FRAME_BUFFER_NUMS 40
+#define MAX_DMABUF_NUMS 64
+#define IS_CVP_BUF_VALID(buf, smem) \
+ ((buf->size <= smem->size) && \
+ (buf->size <= smem->size - buf->offset))
+struct msm_cvp_inst;
+struct msm_cvp_platform_resources;
+struct msm_cvp_list;
+enum smem_cache_ops {
+ SMEM_CACHE_CLEAN,
+ SMEM_CACHE_INVALIDATE,
+ SMEM_CACHE_CLEAN_INVALIDATE,
+enum smem_prop {
+ SMEM_UNCACHED = 0x1,
+ SMEM_CACHED = 0x2,
+ SMEM_SECURE = 0x4,
+ SMEM_CDSP = 0x8,
+ SMEM_NON_PIXEL = 0x10,
+ SMEM_PIXEL = 0x20,
+ SMEM_CAMERA = 0x40,
+ SMEM_PERSIST = 0x100,
+struct msm_cvp_list {
+static inline void INIT_MSM_CVP_LIST(struct msm_cvp_list *mlist)
+ mutex_init(&mlist->lock);
+ INIT_LIST_HEAD(&mlist->list);
+static inline void DEINIT_MSM_CVP_LIST(struct msm_cvp_list *mlist)
+ mutex_destroy(&mlist->lock);
+struct cvp_dma_mapping_info {
+ struct device *dev;
+ struct iommu_domain *domain;
+ struct sg_table *table;
+ struct dma_buf *buf;
+ void *cb_info;
+struct msm_cvp_smem {
+ atomic_t refcount;
+ u32 bitmap_index;
+ u32 pkt_type;
+ u32 buf_idx;
+ u32 fd;
+ struct cvp_dma_mapping_info mapping_info;
+struct msm_cvp_wncc_buffer {
+struct cvp_dmamap_cache {
+ unsigned long usage_bitmap;
+ struct msm_cvp_smem *entries[MAX_DMABUF_NUMS];
+ unsigned int nr;
+static inline void INIT_DMAMAP_CACHE(struct cvp_dmamap_cache *cache)
+ mutex_init(&cache->lock);
+ cache->usage_bitmap = 0;
+ cache->nr = 0;
+static inline void DEINIT_DMAMAP_CACHE(struct cvp_dmamap_cache *cache)
+ mutex_destroy(&cache->lock);
+#define INPUT_FENCE_BITMASK 0x1
+#define OUTPUT_FENCE_BITMASK 0x2
+/* Track source of dma_buf allocator/owner */
+enum buffer_owner {
+ DRIVER, /* Allocated by KMD, for CPU driver */
+ CLIENT, /* Allocated by Client (DSP or CPU) */
+ DSP, /* Allocated by KMD, for DSP driver */
+ MAX_OWNER
+struct cvp_internal_buf {
+ s32 fd;
+ enum buffer_owner ownership;
+struct msm_cvp_frame {
+ struct cvp_internal_buf bufs[MAX_FRAME_BUFFER_NUMS];
+struct cvp_frame_bufs {
+ struct msm_cvp_smem smem[MAX_FRAME_BUFFER_NUMS];
+struct wncc_oob_buf {
+ u32 bitmap_idx;
+ struct eva_kmd_oob_wncc *buf;
+#define NUM_WNCC_BUFS 8
+struct cvp_oob_pool {
+ bool allocated;
+ u32 used_bitmap;
+ struct eva_kmd_oob_wncc *bufs[NUM_WNCC_BUFS];
+extern struct cvp_oob_pool wncc_buf_pool;
+void print_cvp_buffer(u32 tag, const char *str,
+ struct cvp_internal_buf *cbuf);
+ struct eva_kmd_buffer *cbuf);
+int print_smem(u32 tag, const char *str,
+ struct msm_cvp_smem *smem);
+/*Kernel DMA buffer and IOMMU mapping functions*/
+ void *res, struct msm_cvp_smem *smem);
+int msm_cvp_smem_free(struct msm_cvp_smem *smem);
+ unsigned int flags);
+ const char *str);
+struct dma_buf *msm_cvp_smem_get_dma_buf(int fd);
+void msm_cvp_smem_put_dma_buf(void *dma_buf);
+ enum smem_cache_ops cache_op,
+ unsigned long offset,
+ unsigned long size);
+int msm_cvp_map_ipcc_regs(u32 *iova);
+int msm_cvp_unmap_ipcc_regs(u32 iova);
+/* CVP driver internal buffer management functions*/
+ u32 buffer_size);
+int cvp_release_arp_buffers(struct msm_cvp_inst *inst);
+int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst,
+ struct eva_kmd_buffer *buf);
+int msm_cvp_unmap_buf_dsp(struct msm_cvp_inst *inst,
+int msm_cvp_map_buf_dsp_new(struct msm_cvp_inst *inst,
+ struct eva_kmd_buffer *buf,
+ int32_t pid,
+ uint32_t *iova);
+int msm_cvp_unmap_buf_dsp_new(struct msm_cvp_inst *inst,
+int msm_cvp_map_buf_wncc(struct msm_cvp_inst* inst,
+ struct eva_kmd_buffer* buf);
+int msm_cvp_unmap_buf_wncc(struct msm_cvp_inst* inst,
+ struct eva_kmd_hfi_packet* in_pkt);
+void msm_cvp_cache_operations(struct msm_cvp_smem *smem,
+ u32 type, u32 offset, u32 size);
+ unsigned int offset, unsigned int buf_num);
+void msm_cvp_unmap_frame(struct msm_cvp_inst *inst, u64 ktid);
+int msm_cvp_session_deinit_buffers(struct msm_cvp_inst *inst);
+ u32 secure_type);
+ struct cvp_internal_buf *buf);
@@ -0,0 +1,494 @@
+static bool __mmrm_client_check_scaling_supported(
+ struct mmrm_client_desc *client)
+#ifdef CVP_MMRM_ENABLED
+ return mmrm_client_check_scaling_supported(
+ client->client_type,
+ client->client_info.desc.client_domain);
+static struct mmrm_client *__mmrm_client_register(
+ return mmrm_client_register(client);
+static int __mmrm_client_deregister(struct mmrm_client *client)
+ return mmrm_client_deregister(client);
+static int __mmrm_client_set_value_in_range(struct mmrm_client *client,
+ struct mmrm_client_data *data,
+ struct mmrm_client_res_value *val)
+ return mmrm_client_set_value_in_range(client, data, val);
+int msm_cvp_mmrm_notifier_cb(
+ struct mmrm_client_notifier_data *notifier_data)
+ if (!notifier_data) {
+ dprintk(CVP_WARN, "%s Invalid notifier data: %pK\n",
+ __func__, notifier_data);
+ if (notifier_data->cb_type == MMRM_CLIENT_RESOURCE_VALUE_CHANGE) {
+ struct iris_hfi_device *dev = notifier_data->pvt_data;
+ "%s: Clock %s throttled from %ld to %ld \n",
+ __func__, dev->mmrm_desc.client_info.desc.name,
+ notifier_data->cb_data.val_chng.old_val,
+ notifier_data->cb_data.val_chng.new_val);
+ /*TODO: if need further handling to notify eva client */
+ dprintk(CVP_WARN, "%s Invalid cb type: %d\n",
+ __func__, notifier_data->cb_type);
+int msm_cvp_set_clocks(struct msm_cvp_core *core)
+ if (!core || !core->dev_ops) {
+ dprintk(CVP_ERR, "%s Invalid args: %pK\n", __func__, core);
+ rc = call_hfi_op(ops_tbl, scale_clocks,
+ ops_tbl->hfi_device_data, core->curr_freq);
+int msm_cvp_mmrm_register(struct iris_hfi_device *device)
+ struct clock_info *cl = NULL;
+ char *name;
+ bool isSupport;
+ name = (char *)device->mmrm_desc.client_info.desc.name;
+ device->mmrm_cvp=NULL;
+ device->mmrm_desc.client_type=MMRM_CLIENT_CLOCK;
+ device->mmrm_desc.priority=MMRM_CLIENT_PRIOR_LOW;
+ device->mmrm_desc.pvt_data = device;
+ device->mmrm_desc.notifier_callback_fn = msm_cvp_mmrm_notifier_cb;
+ device->mmrm_desc.client_info.desc.client_domain=MMRM_CLIENT_DOMAIN_CVP;
+ iris_hfi_for_each_clock(device, cl) {
+ if (cl->has_scaling) { /* only clk source enabled in dtsi */
+ device->mmrm_desc.client_info.desc.clk=cl->clk;
+ device->mmrm_desc.client_info.desc.client_id=cl->clk_id;
+ strlcpy(name, cl->name,
+ sizeof(device->mmrm_desc.client_info.desc.name));
+ isSupport = __mmrm_client_check_scaling_supported(&(device->mmrm_desc));
+ if (!isSupport) {
+ dprintk(CVP_PWR, "%s: mmrm not supported, flag: %d\n",
+ __func__, isSupport);
+ "%s: Register for %s, clk_id %d\n",
+ __func__, device->mmrm_desc.client_info.desc.name,
+ device->mmrm_desc.client_info.desc.client_id);
+ device->mmrm_cvp = __mmrm_client_register(&(device->mmrm_desc));
+ if (device->mmrm_cvp == NULL) {
+ "%s: Failed mmrm_client_register with mmrm_cvp: %pK\n",
+ __func__, device->mmrm_cvp);
+ rc = -ENOENT;
+ "%s: mmrm_client_register done: %pK, type:%d, uid:%ld\n",
+ __func__, device->mmrm_cvp,
+ device->mmrm_cvp->client_type,
+ device->mmrm_cvp->client_uid);
+int msm_cvp_mmrm_deregister(struct iris_hfi_device *device)
+ "%s invalid args: device %pK \n",
+ __func__, device);
+ if (!device->mmrm_cvp) { // when mmrm not supported
+ "%s device->mmrm_cvp not initialized \n",
+ /* set clk value to 0 before deregister */
+ if ((cl->has_scaling) && (__clk_is_enabled(cl->clk))){
+ // set min freq and cur freq to 0;
+ rc = msm_cvp_mmrm_set_value_in_range(device,
+ 0, 0);
+ "%s Failed set clock %s: %d\n",
+ __func__, cl->name, rc);
+ rc = __mmrm_client_deregister(device->mmrm_cvp);
+ "%s: Failed mmrm_client_deregister with rc: %d\n",
+ device->mmrm_cvp = NULL;
+int msm_cvp_mmrm_set_value_in_range(struct iris_hfi_device *device,
+ u32 freq_min, u32 freq_cur)
+ struct mmrm_client_res_value val;
+ struct mmrm_client_data data;
+ "%s: set clock rate for mmrm_cvp: %pK, type :%d, uid: %ld\n",
+ device->mmrm_cvp->client_type, device->mmrm_cvp->client_uid);
+ val.min = freq_min;
+ val.cur = freq_cur;
+ data.num_hw_blocks = 1;
+ data.flags = 0; /* Not MMRM_CLIENT_DATA_FLAG_RESERVE_ONLY */
+ "%s: set clock rate to min %u cur %u: %d\n",
+ __func__, val.min, val.cur, rc);
+ rc = __mmrm_client_set_value_in_range(device->mmrm_cvp, &data, &val);
+ "%s: Failed to set clock rate to min %u cur %u: %d\n",
+int msm_cvp_set_clocks_impl(struct iris_hfi_device *device, u32 freq)
+ int fsrc2clk = 3;
+ // ratio factor for clock source : clk
+ u32 freq_min = device->res->allowed_clks_tbl[0].clock_rate * fsrc2clk;
+ dprintk(CVP_PWR, "%s: entering with freq : %ld\n", __func__, freq);
+ if (cl->has_scaling) {/* has_scaling */
+ device->clk_freq = freq;
+ if (msm_cvp_clock_voting)
+ freq = msm_cvp_clock_voting;
+ freq = freq * fsrc2clk;
+ "%s: clock source rate set to: %ld\n",
+ __func__, freq);
+ if (device->mmrm_cvp != NULL) {
+ /* min freq : 1st element value in the table */
+ freq_min, freq);
+ "Failed set clock %s: %d\n",
+ cl->name, rc);
+ "%s: set clock with clk_set_rate\n",
+ rc = clk_set_rate(cl->clk, freq);
+ "Failed set clock %u %s: %d\n",
+ freq, cl->name, rc);
+ dprintk(CVP_PWR, "Scaling clock %s to %u\n",
+ cl->name, freq);
+int msm_cvp_scale_clocks(struct iris_hfi_device *device)
+ struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+ u32 rate = 0;
+ allowed_clks_tbl = device->res->allowed_clks_tbl;
+ rate = device->clk_freq ? device->clk_freq :
+ allowed_clks_tbl[0].clock_rate;
+ dprintk(CVP_PWR, "%s: scale clock rate %d\n", __func__, rate);
+ rc = msm_cvp_set_clocks_impl(device, rate);
+int msm_cvp_prepare_enable_clk(struct iris_hfi_device *device,
+ if (strcmp(cl->name, name))
+ * For the clocks we control, set the rate prior to preparing
+ * them. Since we don't really have a load at this point,
+ * scale it to the lowest frequency possible
+ if (!cl->clk) {
+ dprintk(CVP_PWR, "%s %s already enabled by framework",
+ __func__, cl->name);
+ if (cl->has_scaling) {
+ clk_set_rate(cl->clk,
+ clk_round_rate(cl->clk, 0));
+ rc = clk_prepare_enable(cl->clk);
+ dprintk(CVP_ERR, "Failed to enable clock %s\n",
+ cl->name);
+ if (!__clk_is_enabled(cl->clk)) {
+ dprintk(CVP_ERR, "%s: clock %s not enabled\n",
+ clk_disable_unprepare(cl->clk);
+ dprintk(CVP_PWR, "Clock: %s prepared and enabled\n",
+ dprintk(CVP_ERR, "%s clock %s not found\n", __func__, name);
+int msm_cvp_disable_unprepare_clk(struct iris_hfi_device *device,
+ iris_hfi_for_each_clock_reverse(device, cl) {
+ dprintk(CVP_PWR, "%s %s always enabled by framework",
+ dprintk(CVP_PWR, "Clock: %s disable and unprepare\n",
+int msm_cvp_init_clocks(struct iris_hfi_device *device)
+ dprintk(CVP_PWR, "%s: scalable? %d, count %d\n",
+ cl->name, cl->has_scaling, cl->count);
+ cl->clk = clk_get(&device->res->pdev->dev, cl->name);
+ if (IS_ERR(cl->clk)) {
+ rc = PTR_ERR(cl->clk);
+ "Failed to get clock: %s, rc %d\n",
+ cl->clk = NULL;
+ goto err_clk_get;
+ device->clk_freq = 0;
+err_clk_get:
+void msm_cvp_deinit_clocks(struct iris_hfi_device *device)
+ if (cl->clk) {
+ clk_put(cl->clk);
+int msm_cvp_set_bw(struct msm_cvp_core *core, struct bus_info *bus, unsigned long bw)
+ rc = call_hfi_op(ops_tbl, vote_bus, ops_tbl->hfi_device_data, bus, bw);
+int cvp_set_bw(struct bus_info *bus, unsigned long bw)
+ if (!bus->client)
+ dprintk(CVP_PWR, "bus->name = %s to bw = %u\n",
+ bus->name, bw);
+ rc = icc_set_bw(bus->client, bw, 0);
+ dprintk(CVP_ERR, "Failed voting bus %s to ab %u\n",
@@ -0,0 +1,28 @@
+#ifndef _MSM_CVP_CLOCKS_H_
+#define _MSM_CVP_CLOCKS_H_
+int msm_cvp_set_clocks(struct msm_cvp_core *core);
+int msm_cvp_mmrm_register(struct iris_hfi_device *device);
+int msm_cvp_mmrm_deregister(struct iris_hfi_device *device);
+ u32 freq_min, u32 freq_cur);
+int msm_cvp_set_clocks_impl(struct iris_hfi_device *device, u32 freq);
+int msm_cvp_scale_clocks(struct iris_hfi_device *device);
+int msm_cvp_init_clocks(struct iris_hfi_device *device);
+void msm_cvp_deinit_clocks(struct iris_hfi_device *device);
+int msm_cvp_set_bw(struct msm_cvp_core *core, struct bus_info *bus, unsigned long bw);
+int cvp_set_bw(struct bus_info *bus, unsigned long bw);
@@ -0,0 +1,1431 @@
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <asm/div64.h>
+#define IS_ALREADY_IN_STATE(__p, __d) (\
+ (__p >= __d)\
+static void handle_session_error(enum hal_command_response cmd, void *data);
+static void dump_hfi_queue(struct iris_hfi_device *device)
+ struct cvp_iface_q_info *qinfo;
+ u32 *read_ptr, read_idx;
+ dprintk(CVP_ERR, "HFI queues in order of cmd(rd, wr), msg and dbg:\n");
+ * mb() to ensure driver reads the updated header values from
+ * main memory.
+ for (i = 0; i <= CVP_IFACEQ_DBGQ_IDX; i++) {
+ qinfo = &device->iface_queues[i];
+ queue = (struct cvp_hfi_queue_header *)qinfo->q_hdr;
+ dprintk(CVP_ERR, "HFI queue not init, fail to dump\n");
+ dprintk(CVP_ERR, "queue details: r:w %d:%d r:t %d %d\n",
+ queue->qhdr_read_idx, queue->qhdr_write_idx,
+ queue->qhdr_rx_req, queue->qhdr_tx_req);
+ if (queue->qhdr_read_idx != queue->qhdr_write_idx) {
+ "queue payload: %x %x %x %x %x %x %x %x %x\n",
+ read_ptr[0], read_ptr[1], read_ptr[2],
+ read_ptr[3], read_ptr[4], read_ptr[5],
+ read_ptr[6], read_ptr[7], read_ptr[8]);
+void print_hfi_queue_info(struct cvp_hfi_ops *ops_tbl)
+ if (ops_tbl && ops_tbl->hfi_device_data) {
+ call_hfi_op(ops_tbl, flush_debug_queue, ops_tbl->hfi_device_data);
+ dump_hfi_queue(ops_tbl->hfi_device_data);
+static void handle_sys_init_done(enum hal_command_response cmd, void *data)
+ struct msm_cvp_cb_cmd_done *response = data;
+ struct cvp_hal_sys_init_done *sys_init_msg;
+ if (!IS_HAL_SYS_CMD(cmd)) {
+ dprintk(CVP_ERR, "%s - invalid cmd\n", __func__);
+ index = SYS_MSG_INDEX(cmd);
+ if (!response) {
+ "Failed to get valid response for sys init\n");
+ dprintk(CVP_ERR, "Wrong device_id received\n");
+ sys_init_msg = &response->data.sys_init_done;
+ if (!sys_init_msg) {
+ dprintk(CVP_ERR, "sys_init_done message not proper\n");
+ /* This should come from sys_init_done */
+ core->resources.max_inst_count =
+ sys_init_msg->max_sessions_supported ?
+ min_t(u32, sys_init_msg->max_sessions_supported,
+ MAX_SUPPORTED_INSTANCES) : MAX_SUPPORTED_INSTANCES;
+ core->resources.max_secure_inst_count =
+ core->resources.max_secure_inst_count ?
+ core->resources.max_secure_inst_count :
+ core->resources.max_inst_count;
+ memcpy(core->capabilities, sys_init_msg->capabilities,
+ sys_init_msg->codec_count * sizeof(struct msm_cvp_capability));
+ "%s: max_inst_count %d, max_secure_inst_count %d\n",
+ __func__, core->resources.max_inst_count,
+ core->resources.max_secure_inst_count);
+ complete(&(core->completions[index]));
+static void put_inst_helper(struct kref *kref)
+ if (!kref)
+ inst = container_of(kref,
+ struct msm_cvp_inst, kref);
+ msm_cvp_destroy(inst);
+void cvp_put_inst(struct msm_cvp_inst *inst)
+ if (!inst || (kref_read(&inst->kref) < 1)) {
+ dprintk(CVP_ERR, "Invalid session %llx\n", inst);
+ WARN_ON(true);
+ kref_put(&inst->kref, put_inst_helper);
+struct msm_cvp_inst *cvp_get_inst(struct msm_cvp_core *core,
+ void *session_id)
+ bool matches = false;
+ * This is as good as !list_empty(!inst->list), but at this point
+ * we don't really know if inst was kfree'd via close syscall before
+ * hardware could respond. So manually walk thru the list of active
+ * sessions
+ if (inst == session_id) {
+ * Even if the instance is valid, we really shouldn't
+ * be receiving or handling callbacks when we've deleted
+ * our session with HFI
+ matches = !!inst->session;
+ * kref_* is atomic_int backed, so no need for inst->lock. But we can
+ * always acquire inst->lock and release it in cvp_put_inst
+ * for a stronger locking system.
+ inst = (matches && kref_get_unless_zero(&inst->kref)) ? inst : NULL;
+struct msm_cvp_inst *cvp_get_inst_validate(struct msm_cvp_core *core,
+ s = cvp_get_inst(core, session_id);
+ dprintk(CVP_WARN, "%s session doesn't exit\n", __func__);
+ ops_tbl = s->core->dev_ops;
+ rc = call_hfi_op(ops_tbl, validate_session, s->session, __func__);
+ s = NULL;
+ return s;
+static void handle_session_set_buf_done(enum hal_command_response cmd,
+ void *data)
+ dprintk(CVP_ERR, "Invalid set_buf_done response\n");
+ inst = cvp_get_inst(cvp_driver->cvp_core, response->session_id);
+ dprintk(CVP_WARN, "set_buf_done has an inactive session\n");
+ if (response->status) {
+ "set ARP buffer error from FW : %#x\n",
+ response->status);
+ if (IS_HAL_SESSION_CMD(cmd))
+ complete(&inst->completions[SESSION_MSG_INDEX(cmd)]);
+ dprintk(CVP_ERR, "set_buf_done: invalid cmd: %d\n", cmd);
+static void handle_session_release_buf_done(enum hal_command_response cmd,
+ struct list_head *ptr, *next;
+ u32 buf_found = false;
+ u32 address;
+ dprintk(CVP_ERR, "Invalid release_buf_done response\n");
+ "%s: Got a response for an inactive session\n",
+ address = response->data.buffer_addr;
+ if (address == buf->smem->device_addr + buf->offset) {
+ dprintk(CVP_SESS, "releasing persist: %#x\n",
+ buf->smem->device_addr);
+ buf_found = true;
+ if (response->status)
+ dprintk(CVP_ERR, "HFI release persist buf err 0x%x\n",
+ inst->error_code = response->status;
+ dprintk(CVP_ERR, "Invalid inst cmd response: %d\n", cmd);
+static void handle_sys_release_res_done(
+ enum hal_command_response cmd, void *data)
+ complete(&core->completions[
+ SYS_MSG_INDEX(HAL_SYS_RELEASE_RESOURCE_DONE)]);
+void change_cvp_inst_state(struct msm_cvp_inst *inst, enum instance_state state)
+ dprintk(CVP_ERR, "Invalid parameter %s\n", __func__);
+ "Inst: %pK is in bad state can't change state to %d\n",
+ inst, state);
+ dprintk(CVP_SESS, "Moved inst: %pK from state: %d to state: %d\n",
+ inst, inst->state, state);
+ inst->state = state;
+static int signal_session_msg_receipt(enum hal_command_response cmd,
+ struct msm_cvp_inst *inst)
+ dprintk(CVP_ERR, "Invalid(%pK) instance id\n", inst);
+ if (IS_HAL_SESSION_CMD(cmd)) {
+int wait_for_sess_signal_receipt(struct msm_cvp_inst *inst,
+ enum hal_command_response cmd)
+ if (!IS_HAL_SESSION_CMD(cmd)) {
+ ops_tbl = (struct cvp_hfi_ops *)(inst->core->dev_ops);
+ rc = wait_for_completion_timeout(
+ &inst->completions[SESSION_MSG_INDEX(cmd)],
+ inst->core->resources.msm_cvp_hw_rsp_timeout));
+ dprintk(CVP_WARN, "Wait interrupted or timed out: %d\n",
+ SESSION_MSG_INDEX(cmd));
+ if (inst->state != MSM_CVP_CORE_INVALID)
+ print_hfi_queue_info(ops_tbl);
+ } else if (inst->state == MSM_CVP_CORE_INVALID) {
+ rc = inst->error_code;
+ inst->prev_error_code = inst->error_code;
+ inst->error_code = CVP_ERR_NONE;
+static int wait_for_state(struct msm_cvp_inst *inst,
+ enum instance_state flipped_state,
+ enum instance_state desired_state,
+ enum hal_command_response hal_cmd)
+ if (IS_ALREADY_IN_STATE(flipped_state, desired_state)) {
+ dprintk(CVP_INFO, "inst: %pK is already in state: %d\n",
+ inst, inst->state);
+ goto err_same_state;
+ dprintk(CVP_SESS, "Waiting for hal_cmd: %d\n", hal_cmd);
+ rc = wait_for_sess_signal_receipt(inst, hal_cmd);
+ change_cvp_inst_state(inst, desired_state);
+err_same_state:
+static void handle_session_init_done(enum hal_command_response cmd, void *data)
+ "Failed to get valid response for session init\n");
+ dprintk(CVP_WARN, "%s:Got a response for an inactive session %#x\n",
+ __func__, response->session_id);
+ list_for_each_entry(inst, &core->instances, list)
+ cvp_print_inst(CVP_WARN, inst);
+ "Session %#x init err response from FW : 0x%x\n",
+ hash32_ptr(inst->session), response->status);
+ dprintk(CVP_SESS, "%s: cvp session %#x\n", __func__,
+ hash32_ptr(inst->session));
+ signal_session_msg_receipt(cmd, inst);
+static void handle_event_change(enum hal_command_response cmd, void *data)
+ dprintk(CVP_WARN, "%s is not supported on CVP!\n", __func__);
+static void handle_session_dump_notify(enum hal_command_response cmd,
+ "Failed to get valid response during dump notify\n");
+ dprintk(CVP_WARN, "%s:Got a response for an inactive session\n",
+ inst->event_handler.event = CVP_DUMP_EVENT;
+ wake_up_all(&inst->event_handler.wq);
+ dprintk(CVP_ERR,"Event_handler woken up\n");
+static void handle_release_res_done(enum hal_command_response cmd, void *data)
+ "Failed to get valid response for release resource\n");
+static void handle_session_ctrl(enum hal_command_response cmd, void *data)
+ dprintk(CVP_ERR, "HFI sess ctrl err 0x%x HAL cmd %d\n",
+ response->status, cmd);
+static void handle_session_error(enum hal_command_response cmd, void *data)
+ //unsigned long flags = 0;
+ //int i;
+ "Failed to get valid response for session error\n");
+ dprintk(CVP_WARN, "%s: response for an inactive session\n",
+ dprintk(CVP_ERR, "Sess error 0x%x received for inst %pK sess %x\n",
+ response->status, inst, hash32_ptr(inst->session));
+ //if (inst->state != MSM_CVP_CORE_INVALID) {
+ // change_cvp_inst_state(inst, MSM_CVP_CORE_INVALID);
+ // if (cvp_clean_session_queues(inst))
+ // dprintk(CVP_WARN, "Failed to clean sess queues\n");
+ // for (i = 0; i < ARRAY_SIZE(inst->completions); i++)
+ // complete(&inst->completions[i]);
+ // spin_lock_irqsave(&inst->event_handler.lock, flags);
+ // inst->event_handler.event = CVP_SSR_EVENT;
+ // spin_unlock_irqrestore(
+ // &inst->event_handler.lock, flags);
+ // wake_up_all(&inst->event_handler.wq);
+ //}
+void handle_sys_error(enum hal_command_response cmd, void *data)
+ struct iris_hfi_device *hfi_device;
+ enum cvp_core_state cur_state;
+ "Failed to get valid response for sys error\n");
+ "Got SYS_ERR but unable to identify core\n");
+ core->ssr_count++;
+ if (core->state == CVP_CORE_UNINIT) {
+ "%s: Core %pK already moved to state %d\n",
+ __func__, core, core->state);
+ cur_state = core->state;
+ dprintk(CVP_WARN, "SYS_ERROR from core %pK cmd %x total: %d\n",
+ core, cmd, core->ssr_count);
+ hfi_device = ops_tbl->hfi_device_data;
+ if (hfi_device->error == CVP_ERR_NOC_ERROR) {
+ dprintk(CVP_WARN, "Got NOC error");
+ msm_cvp_noc_error_info(core);
+ if (inst->state != MSM_CVP_CORE_INVALID) {
+ change_cvp_inst_state(inst, MSM_CVP_CORE_INVALID);
+ if (cvp_clean_session_queues(inst))
+ dprintk(CVP_ERR, "Failed to clean fences\n");
+ for (i = 0; i < ARRAY_SIZE(inst->completions); i++)
+ complete(&inst->completions[i]);
+ inst->event_handler.event = CVP_SSR_EVENT;
+ spin_unlock_irqrestore(
+ &inst->event_handler.lock, flags);
+ if (!core->trigger_ssr)
+ if (hfi_device->error != CVP_ERR_NOC_ERROR)
+ msm_cvp_print_inst_bufs(inst, false);
+ /* handle the hw error before core released to get full debug info */
+ msm_cvp_handle_hw_error(core);
+ dprintk(CVP_CORE, "Calling core_release\n");
+ rc = call_hfi_op(ops_tbl, core_release, ops_tbl->hfi_device_data);
+ dprintk(CVP_ERR, "core_release failed\n");
+ core->state = cur_state;
+ dprintk(CVP_WARN, "SYS_ERROR handled.\n");
+ BUG_ON(core->resources.fatal_ssr);
+void msm_cvp_comm_session_clean(struct msm_cvp_inst *inst)
+ dprintk(CVP_ERR, "%s invalid params\n", __func__);
+ if (!inst->session || inst->session == (void *)0xdeadbeef) {
+ dprintk(CVP_SESS, "%s: inst %pK session already cleaned\n",
+ dprintk(CVP_SESS, "%s: inst %pK\n", __func__, inst);
+ rc = call_hfi_op(ops_tbl, session_clean,
+ "Session clean failed :%pK\n", inst);
+ inst->session = NULL;
+static void handle_session_close(enum hal_command_response cmd, void *data)
+ "Failed to get valid response for session close\n");
+ dprintk(CVP_WARN, "%s: response for an inactive session %#x\n",
+ dprintk(CVP_ERR, "HFI sess close fail 0x%x\n",
+ show_stats(inst);
+void cvp_handle_cmd_response(enum hal_command_response cmd, void *data)
+ dprintk(CVP_HFI, "Command response = %d\n", cmd);
+ switch (cmd) {
+ handle_sys_init_done(cmd, data);
+ handle_sys_release_res_done(cmd, data);
+ handle_session_init_done(cmd, data);
+ case HAL_SESSION_RELEASE_RESOURCE_DONE:
+ handle_release_res_done(cmd, data);
+ handle_session_close(cmd, data);
+ handle_event_change(cmd, data);
+ handle_session_ctrl(cmd, data);
+ case HAL_SYS_WATCHDOG_TIMEOUT:
+ handle_sys_error(cmd, data);
+ handle_session_error(cmd, data);
+ handle_session_set_buf_done(cmd, data);
+ handle_session_release_buf_done(cmd, data);
+ handle_session_dump_notify(cmd, data);
+ dprintk(CVP_HFI, "response unhandled: %d\n", cmd);
+static inline enum msm_cvp_thermal_level msm_comm_cvp_thermal_level(int level)
+ switch (level) {
+ return CVP_THERMAL_NORMAL;
+ return CVP_THERMAL_LOW;
+ return CVP_THERMAL_HIGH;
+ return CVP_THERMAL_CRITICAL;
+static int msm_comm_session_abort(struct msm_cvp_inst *inst)
+ int rc = 0, abort_completion = 0;
+ if (1)
+ /* Activate code below for Watchdog timeout testing */
+ abort_completion = SESSION_MSG_INDEX(HAL_SESSION_ABORT_DONE);
+ dprintk(CVP_WARN, "%s: inst %pK session %x\n", __func__,
+ rc = call_hfi_op(ops_tbl, session_abort, (void *)inst->session);
+ "%s session_abort failed rc: %d\n", __func__, rc);
+ &inst->completions[abort_completion],
+ dprintk(CVP_ERR, "%s: inst %pK session %x abort timed out\n",
+ __func__, inst, hash32_ptr(inst->session));
+ msm_cvp_comm_generate_sys_error(inst);
+void msm_cvp_comm_handle_thermal_event(void)
+ dprintk(CVP_WARN, "deprecated %s called\n", __func__);
+int msm_cvp_comm_check_core_init(struct msm_cvp_core *core)
+ if (core->state >= CVP_CORE_INIT_DONE) {
+ dprintk(CVP_INFO, "CVP core: is already in state: %d\n",
+ core->state);
+ dprintk(CVP_CORE, "Waiting for SYS_INIT_DONE\n");
+ &core->completions[SYS_MSG_INDEX(HAL_SYS_INIT_DONE)],
+ msecs_to_jiffies(core->resources.msm_cvp_hw_rsp_timeout));
+ dprintk(CVP_ERR, "%s: Wait interrupted or timed out: %d\n",
+ __func__, SYS_MSG_INDEX(HAL_SYS_INIT_DONE));
+ core->state = CVP_CORE_INIT_DONE;
+ dprintk(CVP_CORE, "SYS_INIT_DONE!!!\n");
+static int msm_comm_init_core_done(struct msm_cvp_inst *inst)
+ rc = msm_cvp_comm_check_core_init(inst->core);
+ dprintk(CVP_ERR, "%s - failed to initialize core\n", __func__);
+ change_cvp_inst_state(inst, MSM_CVP_CORE_INIT_DONE);
+static int msm_comm_init_core(struct msm_cvp_inst *inst)
+ if (!inst || !inst->core || !inst->core->dev_ops)
+ if (core->state >= CVP_CORE_INIT) {
+ dprintk(CVP_CORE, "CVP core: is already in state: %d\n",
+ goto core_already_inited;
+ if (!core->capabilities) {
+ core->capabilities = kcalloc(CVP_MAX_SESSIONS,
+ sizeof(struct msm_cvp_capability), GFP_KERNEL);
+ "%s: failed to allocate capabilities\n",
+ goto fail_cap_alloc;
+ "%s: capabilities memory is expected to be freed\n",
+ dprintk(CVP_CORE, "%s: core %pK\n", __func__, core);
+ rc = call_hfi_op(ops_tbl, core_init, ops_tbl->hfi_device_data);
+ goto fail_core_init;
+ core->state = CVP_CORE_INIT;
+ core->trigger_ssr = false;
+core_already_inited:
+ change_cvp_inst_state(inst, MSM_CVP_CORE_INIT);
+fail_core_init:
+ kfree(core->capabilities);
+fail_cap_alloc:
+ core->capabilities = NULL;
+int msm_cvp_deinit_core(struct msm_cvp_inst *inst)
+ dprintk(CVP_ERR, "%s invalid parameters\n", __func__);
+ change_cvp_inst_state(inst, MSM_CVP_CORE_UNINIT);
+static int msm_comm_session_init_done(int flipped_state,
+ dprintk(CVP_SESS, "inst %pK: waiting for session init done\n", inst);
+ rc = wait_for_state(inst, flipped_state, MSM_CVP_OPEN_DONE,
+ HAL_SESSION_INIT_DONE);
+ dprintk(CVP_ERR, "Session init failed for inst %pK\n", inst);
+static int msm_comm_session_init(int flipped_state,
+ if (IS_ALREADY_IN_STATE(flipped_state, MSM_CVP_OPEN)) {
+ rc = call_hfi_op(ops_tbl, session_init, ops_tbl->hfi_device_data,
+ inst, &inst->session);
+ if (rc || !inst->session) {
+ "Failed to call session init for: %pK, %pK, %d\n",
+ inst->core->dev_ops, inst, inst->session_type);
+ change_cvp_inst_state(inst, MSM_CVP_OPEN);
+static int msm_comm_session_close(int flipped_state,
+ if (IS_ALREADY_IN_STATE(flipped_state, MSM_CVP_CLOSE)) {
+ "inst: %pK is already in state: %d\n",
+ rc = call_hfi_op(ops_tbl, session_end, (void *) inst->session);
+ "Failed to send close\n");
+ change_cvp_inst_state(inst, MSM_CVP_CLOSE);
+int msm_cvp_comm_suspend(void)
+ "%s: Failed to find cvp core\n", __func__);
+ ops_tbl = (struct cvp_hfi_ops *)core->dev_ops;
+ dprintk(CVP_ERR, "%s Invalid device handle\n", __func__);
+ rc = call_hfi_op(ops_tbl, suspend, ops_tbl->hfi_device_data);
+static int get_flipped_state(int present_state, int desired_state)
+ int flipped_state;
+ if (present_state == MSM_CVP_CORE_INIT_DONE && desired_state > MSM_CVP_CLOSE)
+ flipped_state = MSM_CVP_CORE_UNINIT;
+ else if (present_state == MSM_CVP_CORE_INVALID)
+ flipped_state = MSM_CVP_CLOSE;
+ flipped_state = present_state;
+ return flipped_state;
+static char state_names[MSM_CVP_CORE_INVALID + 1][32] = {
+ "Invlid entry",
+ "CORE_UNINIT_DONE",
+ "CORE_INIT",
+ "CORE_INIT_DONE",
+ "OPEN",
+ "OPEN_DONE",
+ "CLOSE",
+ "CLOSE_DONE",
+ "CORE_UNINIT",
+ "CORE_INVALID"
+int msm_cvp_comm_try_state(struct msm_cvp_inst *inst, int state)
+ dprintk(CVP_ERR, "%s: invalid params %pK", __func__, inst);
+ mutex_lock(&inst->sync_lock);
+ if (inst->state == MSM_CVP_CORE_INVALID &&
+ core->state == CVP_CORE_UNINIT) {
+ dprintk(CVP_ERR, "%s: inst %pK & core are in invalid\n",
+ mutex_unlock(&inst->sync_lock);
+ flipped_state = get_flipped_state(inst->state, state);
+ "inst: %pK (%#x) cur_state %s dest_state %s flipped_state = %s\n",
+ inst, hash32_ptr(inst->session), state_names[inst->state],
+ state_names[state], state_names[flipped_state]);
+ switch (flipped_state) {
+ case MSM_CVP_CORE_UNINIT_DONE:
+ case MSM_CVP_CORE_INIT:
+ rc = msm_comm_init_core(inst);
+ if (rc || state <= get_flipped_state(inst->state, state))
+ /* defined in linux/compiler_attributes.h */
+ fallthrough;
+ case MSM_CVP_CORE_INIT_DONE:
+ rc = msm_comm_init_core_done(inst);
+ case MSM_CVP_OPEN:
+ rc = msm_comm_session_init(flipped_state, inst);
+ case MSM_CVP_OPEN_DONE:
+ rc = msm_comm_session_init_done(flipped_state, inst);
+ case MSM_CVP_CLOSE:
+ dprintk(CVP_INFO, "to CVP_CLOSE state\n");
+ rc = msm_comm_session_close(flipped_state, inst);
+ case MSM_CVP_CLOSE_DONE:
+ dprintk(CVP_INFO, "to CVP_CLOSE_DONE state\n");
+ rc = wait_for_state(inst, flipped_state, MSM_CVP_CLOSE_DONE,
+ HAL_SESSION_END_DONE);
+ msm_cvp_comm_session_clean(inst);
+ case MSM_CVP_CORE_UNINIT:
+ case MSM_CVP_CORE_INVALID:
+ dprintk(CVP_INFO, "Sending core uninit\n");
+ rc = msm_cvp_deinit_core(inst);
+ dprintk(CVP_ERR, "State not recognized\n");
+ if (rc == -ETIMEDOUT) {
+ "Timedout move from state: %s to %s\n",
+ state_names[inst->state],
+ state_names[state]);
+int msm_cvp_noc_error_info(struct msm_cvp_core *core)
+ static u32 last_fault_count = 0;
+ dprintk(CVP_WARN, "%s: Invalid parameters: %pK\n",
+ __func__, core);
+ if (!core->smmu_fault_count ||
+ core->smmu_fault_count == last_fault_count)
+ last_fault_count = core->smmu_fault_count;
+ dprintk(CVP_ERR, "cvp ssr count %d %d %d\n", core->ssr_count,
+ core->resources.max_ssr_allowed,
+ core->smmu_fault_count);
+ call_hfi_op(ops_tbl, noc_error_info, ops_tbl->hfi_device_data);
+ if (core->smmu_fault_count >= core->resources.max_ssr_allowed)
+ BUG_ON(!core->resources.non_fatal_pagefaults);
+int msm_cvp_trigger_ssr(struct msm_cvp_core *core,
+ dprintk(CVP_WARN, "%s: Invalid parameters\n", __func__);
+ core->ssr_type = type;
+ schedule_work(&core->ssr_work);
+void msm_cvp_ssr_handler(struct work_struct *work)
+ if (!work)
+ core = container_of(work, struct msm_cvp_core, ssr_work);
+ if (core->ssr_type == SSR_SESSION_ABORT) {
+ struct msm_cvp_inst *inst = NULL, *s;
+ dprintk(CVP_ERR, "Session abort triggered\n");
+ "Session to abort: inst %#x ref %x\n",
+ inst, kref_read(&inst->kref));
+ if (inst != NULL) {
+ dprintk(CVP_WARN, "No active CVP session to abort\n");
+send_again:
+ if (core->state == CVP_CORE_INIT_DONE) {
+ dprintk(CVP_WARN, "%s: ssr type %d at %llu\n", __func__,
+ core->ssr_type, get_aon_time());
+ * In current implementation user-initiated SSR triggers
+ * a fatal error from hardware. However, there is no way
+ * to know if fatal error is due to SSR or not. Handle
+ * user SSR as non-fatal.
+ core->trigger_ssr = true;
+ rc = call_hfi_op(ops_tbl, core_trigger_ssr,
+ ops_tbl->hfi_device_data, core->ssr_type);
+ if (rc == -EAGAIN) {
+ dprintk(CVP_WARN, "Retry ssr\n");
+ goto send_again;
+ dprintk(CVP_ERR, "%s: trigger_ssr failed\n",
+ dprintk(CVP_WARN, "%s: cvp core %pK not initialized\n",
+void msm_cvp_comm_generate_sys_error(struct msm_cvp_inst *inst)
+ enum hal_command_response cmd = HAL_SYS_ERROR;
+ dprintk(CVP_ERR, "%s: invalid input parameters\n", __func__);
+ dprintk(CVP_WARN, "%s: inst %pK\n", __func__, inst);
+int msm_cvp_comm_kill_session(struct msm_cvp_inst *inst)
+ } else if (!inst->session || inst->session == (void *)0xdeadbeef) {
+ dprintk(CVP_ERR, "%s: no session to kill for inst %pK\n",
+ dprintk(CVP_WARN, "%s: inst %pK, session %x state %d\n", __func__,
+ inst, hash32_ptr(inst->session), inst->state);
+ * We're internally forcibly killing the session, if fw is aware of
+ * the session send session_abort to firmware to clean up and release
+ * the session, else just kill the session inside the driver.
+ if (inst->state >= MSM_CVP_OPEN_DONE &&
+ inst->state < MSM_CVP_CLOSE_DONE) {
+ msm_comm_session_abort(inst);
+ if (inst->state >= MSM_CVP_CORE_UNINIT) {
+static int set_internal_buf_on_fw(struct msm_cvp_inst *inst,
+ struct msm_cvp_smem *handle)
+ if (!inst || !inst->core || !inst->core->dev_ops || !handle) {
+ dprintk(CVP_ERR, "%s - invalid params\n", __func__);
+ iova = handle->device_addr;
+ size = handle->size;
+ dprintk(CVP_SESS, "%s: allocated ARP buffer : %x\n", __func__, iova);
+ rc = call_hfi_op(ops_tbl, session_set_buffers,
+ (void *) inst->session, iova, size);
+ dprintk(CVP_ERR, "cvp_session_set_buffers failed\n");
+/* Set ARP buffer for CVP firmware to handle concurrency */
+int cvp_comm_set_arp_buffers(struct msm_cvp_inst *inst)
+ buf = cvp_allocate_arp_bufs(inst, ARP_BUF_SIZE);
+ goto error;
+ rc = set_internal_buf_on_fw(inst, buf->smem);
+ rc = wait_for_sess_signal_receipt(inst, HAL_SESSION_SET_BUFFER_DONE);
+ dprintk(CVP_WARN, "wait for set_buffer_done timeout %d\n", rc);
+error:
+ if (rc != -ENOMEM)
+ cvp_release_arp_buffers(inst);
+bool is_cvp_inst_valid(struct msm_cvp_inst *inst)
+ struct msm_cvp_inst *sess;
+ list_for_each_entry(sess, &core->instances, list) {
+ if (inst == sess) {
+ if (kref_read(&inst->kref)) {
+int cvp_print_inst(u32 tag, struct msm_cvp_inst *inst)
+ dprintk(CVP_ERR, "%s invalid inst %pK\n", __func__, inst);
+ dprintk(tag, "%s inst stype %d %pK id = %#x ptype %#x prio %#x secure %#x kmask %#x dmask %#x, kref %#x state %#x\n",
+ inst->proc_name, inst->session_type, inst, hash32_ptr(inst->session),
+ inst->prop.type, inst->prop.priority, inst->prop.is_secure,
+ inst->prop.kernel_mask, inst->prop.dsp_mask,
+ kref_read(&inst->kref), inst->state);
@@ -0,0 +1,36 @@
+#ifndef _MSM_CVP_COMMON_H_
+#define _MSM_CVP_COMMON_H_
+void cvp_put_inst(struct msm_cvp_inst *inst);
+ void *session_id);
+bool is_cvp_inst_valid(struct msm_cvp_inst *inst);
+void cvp_change_inst_state(struct msm_cvp_inst *inst,
+ enum instance_state state);
+int msm_cvp_comm_try_state(struct msm_cvp_inst *inst, int state);
+int msm_cvp_deinit_core(struct msm_cvp_inst *inst);
+int msm_cvp_comm_suspend(void);
+void msm_cvp_comm_session_clean(struct msm_cvp_inst *inst);
+int msm_cvp_comm_kill_session(struct msm_cvp_inst *inst);
+void msm_cvp_comm_generate_sys_error(struct msm_cvp_inst *inst);
+void handle_sys_error(enum hal_command_response cmd, void *data);
+int msm_cvp_comm_smem_cache_operations(struct msm_cvp_inst *inst,
+ struct msm_cvp_smem *mem, enum smem_cache_ops cache_ops);
+int msm_cvp_comm_check_core_init(struct msm_cvp_core *core);
+ enum hal_command_response cmd);
+int cvp_comm_set_arp_buffers(struct msm_cvp_inst *inst);
+int cvp_comm_release_persist_buffers(struct msm_cvp_inst *inst);
+int msm_cvp_noc_error_info(struct msm_cvp_core *core);
+int cvp_print_inst(u32 tag, struct msm_cvp_inst *inst);
+unsigned long long get_aon_time(void);
@@ -0,0 +1,544 @@
+#define MAX_EVENTS 30
+#define NUM_CYCLES16X16_HCD_FRAME 95
+#define NUM_CYCLES16X16_DMM_FRAME 600
+#define NUM_CYCLES16X16_NCC_FRAME 400
+#define NUM_CYCLES16X16_DS_FRAME 80
+#define NUM_CYCLESFW_FRAME 1680000
+#define NUM_DMM_MAX_FEATURE_POINTS 500
+#define CYCLES_MARGIN_IN_POWEROF2 3
+static atomic_t nr_insts;
+void *cvp_kmem_cache_zalloc(struct cvp_kmem_cache *k, gfp_t flags)
+ atomic_inc(&k->nr_objs);
+ return kmem_cache_zalloc(k->cache, flags);
+void cvp_kmem_cache_free(struct cvp_kmem_cache *k, void *obj)
+ atomic_dec(&k->nr_objs);
+ kmem_cache_free(k->cache, obj);
+int msm_cvp_poll(void *instance, struct file *filp,
+ struct poll_table_struct *wait)
+EXPORT_SYMBOL(msm_cvp_poll);
+int msm_cvp_private(void *cvp_inst, unsigned int cmd,
+ struct msm_cvp_inst *inst = (struct msm_cvp_inst *)cvp_inst;
+ rc = msm_cvp_handle_syscall(inst, arg);
+EXPORT_SYMBOL(msm_cvp_private);
+static bool msm_cvp_check_for_inst_overload(struct msm_cvp_core *core,
+ u32 *instance_count)
+ u32 secure_instance_count = 0;
+ bool overload = false;
+ (*instance_count)++;
+ /* This flag is not updated yet for the current instance */
+ if (inst->flags & CVP_SECURE)
+ secure_instance_count++;
+ /* Instance count includes current instance as well. */
+ if ((*instance_count >= core->resources.max_inst_count) ||
+ (secure_instance_count >=
+ core->resources.max_secure_inst_count))
+ overload = true;
+ return overload;
+static int __init_session_queue(struct msm_cvp_inst *inst)
+ spin_lock_init(&inst->session_queue.lock);
+ INIT_LIST_HEAD(&inst->session_queue.msgs);
+ inst->session_queue.msg_count = 0;
+ init_waitqueue_head(&inst->session_queue.wq);
+ inst->session_queue.state = QUEUE_ACTIVE;
+static void __init_fence_queue(struct msm_cvp_inst *inst)
+ mutex_init(&inst->fence_cmd_queue.lock);
+ INIT_LIST_HEAD(&inst->fence_cmd_queue.wait_list);
+ INIT_LIST_HEAD(&inst->fence_cmd_queue.sched_list);
+ init_waitqueue_head(&inst->fence_cmd_queue.wq);
+ inst->fence_cmd_queue.state = QUEUE_ACTIVE;
+ inst->fence_cmd_queue.mode = OP_NORMAL;
+ spin_lock_init(&inst->session_queue_fence.lock);
+ INIT_LIST_HEAD(&inst->session_queue_fence.msgs);
+ inst->session_queue_fence.msg_count = 0;
+ init_waitqueue_head(&inst->session_queue_fence.wq);
+ inst->session_queue_fence.state = QUEUE_ACTIVE;
+static void __deinit_fence_queue(struct msm_cvp_inst *inst)
+ mutex_destroy(&inst->fence_cmd_queue.lock);
+ inst->fence_cmd_queue.state = QUEUE_INVALID;
+ inst->fence_cmd_queue.mode = OP_INVALID;
+static void __deinit_session_queue(struct msm_cvp_inst *inst)
+ struct cvp_session_msg *msg, *tmpmsg;
+ /* free all messages */
+ spin_lock(&inst->session_queue.lock);
+ list_for_each_entry_safe(msg, tmpmsg, &inst->session_queue.msgs, node) {
+ list_del_init(&msg->node);
+ inst->session_queue.state = QUEUE_INVALID;
+ spin_unlock(&inst->session_queue.lock);
+struct msm_cvp_inst *msm_cvp_open(int session_type, struct task_struct *task)
+ u32 instance_count;
+ dprintk(CVP_ERR, "%s CVP core not initialized\n", __func__);
+ goto err_invalid_core;
+ if (!msm_cvp_auto_pil && session_type == MSM_CVP_BOOT) {
+ dprintk(CVP_SESS, "Auto PIL disabled, bypass CVP init at boot");
+ core->resources.max_inst_count = MAX_SUPPORTED_INSTANCES;
+ if (msm_cvp_check_for_inst_overload(core, &instance_count)) {
+ dprintk(CVP_ERR, "Instance num reached Max, rejecting session");
+ cvp_print_inst(CVP_ERR, inst);
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ dprintk(CVP_ERR, "Failed to allocate memory\n");
+ pr_info(CVP_DBG_TAG "%s opening cvp instance: %pK type %d cnt %d\n",
+ "sess", task->comm, inst, session_type, instance_count);
+ mutex_init(&inst->sync_lock);
+ mutex_init(&inst->lock);
+ spin_lock_init(&inst->event_handler.lock);
+ INIT_MSM_CVP_LIST(&inst->persistbufs);
+ INIT_DMAMAP_CACHE(&inst->dma_cache);
+ INIT_MSM_CVP_LIST(&inst->cvpdspbufs);
+ INIT_MSM_CVP_LIST(&inst->cvpwnccbufs);
+ INIT_MSM_CVP_LIST(&inst->frames);
+ inst->cvpwnccbufs_num = 0;
+ init_waitqueue_head(&inst->event_handler.wq);
+ kref_init(&inst->kref);
+ inst->session_type = session_type;
+ inst->state = MSM_CVP_CORE_UNINIT_DONE;
+ inst->core = core;
+ inst->clk_data.min_freq = 0;
+ inst->clk_data.curr_freq = 0;
+ inst->clk_data.ddr_bw = 0;
+ inst->clk_data.sys_cache_bw = 0;
+ inst->clk_data.bitrate = 0;
+ for (i = SESSION_MSG_INDEX(SESSION_MSG_START);
+ i <= SESSION_MSG_INDEX(SESSION_MSG_END); i++) {
+ init_completion(&inst->completions[i]);
+ msm_cvp_session_init(inst);
+ __init_fence_queue(inst);
+ list_add_tail(&inst->list, &core->instances);
+ atomic_inc(&nr_insts);
+ rc = __init_session_queue(inst);
+ rc = msm_cvp_comm_try_state(inst, MSM_CVP_CORE_INIT_DONE);
+ "Failed to move cvp instance to init state\n");
+ inst->debugfs_root =
+ msm_cvp_debugfs_init_inst(inst, core->debugfs_root);
+ strlcpy(inst->proc_name, task->comm, TASK_COMM_LEN);
+ __deinit_session_queue(inst);
+ __deinit_fence_queue(inst);
+ list_del(&inst->list);
+ mutex_destroy(&inst->sync_lock);
+ mutex_destroy(&inst->lock);
+ DEINIT_MSM_CVP_LIST(&inst->persistbufs);
+ DEINIT_DMAMAP_CACHE(&inst->dma_cache);
+ DEINIT_MSM_CVP_LIST(&inst->cvpdspbufs);
+ DEINIT_MSM_CVP_LIST(&inst->cvpwnccbufs);
+ DEINIT_MSM_CVP_LIST(&inst->frames);
+ kfree(inst);
+ inst = NULL;
+err_invalid_core:
+EXPORT_SYMBOL(msm_cvp_open);
+static void msm_cvp_clean_sess_queue(struct msm_cvp_inst *inst,
+ struct cvp_session_queue *sq)
+ struct cvp_session_msg *mptr, *dummy;
+ u64 ktid = 0LL;
+check_again:
+ if (sq->msg_count && sq->state != QUEUE_ACTIVE) {
+ ktid = mptr->pkt.client_data.kdata;
+ if (ktid) {
+ msm_cvp_unmap_frame(inst, ktid);
+ cvp_kmem_cache_free(&cvp_driver->msg_cache, mptr);
+ ktid = 0LL;
+ goto check_again;
+static int msm_cvp_cleanup_instance(struct msm_cvp_inst *inst)
+ bool empty;
+ int rc, max_retries;
+ struct cvp_session_queue *sq, *sqf;
+ struct msm_cvp_inst *tmp;
+ sqf = &inst->session_queue_fence;
+ max_retries = inst->core->resources.msm_cvp_hw_rsp_timeout >> 5;
+ msm_cvp_session_queue_stop(inst);
+wait_dsp:
+ empty = list_empty(&inst->cvpdspbufs.list);
+ if (!empty && max_retries > 0) {
+ goto wait_dsp;
+ if (!empty) {
+ dprintk(CVP_WARN, "Failed sess %pK DSP frame pending\n", inst);
+ * A session is either DSP session or CPU session, cannot have both
+ * DSP and frame buffers
+ goto stop_session;
+ max_retries = inst->core->resources.msm_cvp_hw_rsp_timeout >> 1;
+wait_frame:
+ empty = list_empty(&inst->frames.list);
+ msm_cvp_clean_sess_queue(inst, sqf);
+ msm_cvp_clean_sess_queue(inst, sq);
+ goto wait_frame;
+ "Failed to process frames before session %pK close\n",
+ inst);
+ list_for_each_entry(frame, &inst->frames.list, list)
+ dprintk(CVP_WARN, "Unprocessed frame %08x ktid %llu\n",
+ frame->pkt_type, frame->ktid);
+ inst->core->synx_ftbl->cvp_dump_fence_queue(inst);
+stop_session:
+ tmp = cvp_get_inst_validate(inst->core, inst);
+ if (!tmp) {
+ dprintk(CVP_ERR, "%s has a invalid session %llx\n",
+ goto release_arp;
+ /* STOP SESSION to avoid SMMU fault after releasing ARP */
+ dprintk(CVP_WARN, "%s: cannot stop session rc %d\n",
+ /*Fail stop session, release arp later may cause smmu fault*/
+ dprintk(CVP_WARN, "%s: wait for sess_stop fail, rc %d\n",
+ /* Continue to release ARP anyway */
+release_arp:
+ cvp_put_inst(tmp);
+ if (cvp_release_arp_buffers(inst))
+ dprintk_rl(CVP_WARN,
+ "Failed to release persist buffers\n");
+ if (inst->core->resources.pm_qos.off_vote_cnt > 0)
+ inst->core->resources.pm_qos.off_vote_cnt--;
+ dprintk(CVP_INFO, "%s Unexpected pm_qos off vote %d\n",
+ __func__,
+ inst->core->resources.pm_qos.off_vote_cnt);
+int msm_cvp_destroy(struct msm_cvp_inst *inst)
+ if (inst->session_type == MSM_CVP_DSP) {
+ cvp_dsp_del_sess(inst->dsp_handle, inst);
+ inst->task = NULL;
+ /* Ensure no path has core->clk_lock and core->lock sequence */
+ /* inst->list lives in core->instances */
+ atomic_dec(&nr_insts);
+ msm_cvp_debugfs_deinit_inst(inst);
+ core->synx_ftbl->cvp_sess_deinit_synx(inst);
+ pr_info(CVP_DBG_TAG
+ "closed cvp instance: %pK session_id = %d type %d %d\n",
+ inst->proc_name, inst, hash32_ptr(inst->session),
+ inst->session_type, core->smem_leak_count);
+ inst->session = (void *)0xdeadbeef;
+ if (atomic_read(&inst->smem_count) > 0) {
+ dprintk(CVP_WARN, "Session closed with %d unmapped smems\n",
+ atomic_read(&inst->smem_count));
+ core->smem_leak_count += atomic_read(&inst->smem_count);
+ "sys-stat: nr_insts %d msgs %d, frames %d, bufs %d, smems %d\n",
+ atomic_read(&nr_insts),
+ atomic_read(&cvp_driver->msg_cache.nr_objs),
+ atomic_read(&cvp_driver->frame_cache.nr_objs),
+ atomic_read(&cvp_driver->buf_cache.nr_objs),
+ atomic_read(&cvp_driver->smem_cache.nr_objs));
+static void close_helper(struct kref *kref)
+ inst = container_of(kref, struct msm_cvp_inst, kref);
+int msm_cvp_close(void *instance)
+ struct msm_cvp_inst *inst = instance;
+ dprintk_rl(CVP_ERR, "%s: invalid params\n", __func__);
+ "to close instance: %pK session_id = %d type %d state %d\n",
+ inst->session_type, inst->state);
+ if (inst->session == 0) {
+ if (inst->state >= MSM_CVP_CORE_INIT_DONE &&
+ inst->state < MSM_CVP_OPEN_DONE) {
+ /* Session is not created, no ARP */
+ inst->state = MSM_CVP_CORE_UNINIT;
+ if (inst->state == MSM_CVP_CORE_UNINIT)
+ if (inst->session_type != MSM_CVP_BOOT) {
+ rc = msm_cvp_cleanup_instance(inst);
+ msm_cvp_session_deinit(inst);
+ rc = msm_cvp_comm_try_state(inst, MSM_CVP_CORE_UNINIT);
+ "Failed to move inst %pK to uninit state\n", inst);
+ kref_put(&inst->kref, close_helper);
+EXPORT_SYMBOL(msm_cvp_close);
+int msm_cvp_suspend(void)
+ return msm_cvp_comm_suspend();
+EXPORT_SYMBOL(msm_cvp_suspend);
@@ -0,0 +1,40 @@
+#ifndef _MSM_CVP_CORE_H_
+#define _MSM_CVP_CORE_H_
+#include "msm_cvp_synx.h"
+#define DDR_TYPE_LPDDR4 0x6
+#define DDR_TYPE_LPDDR4X 0x7
+#define DDR_TYPE_LPDDR4Y 0x8
+#define DDR_TYPE_LPDDR5 0x9
+enum session_type {
+ MSM_CVP_USER = 1,
+ MSM_CVP_KERNEL,
+ MSM_CVP_BOOT,
+ MSM_CVP_DSP,
+ MSM_CVP_UNKNOWN,
+ MSM_CVP_MAX_DEVICES = MSM_CVP_UNKNOWN,
+struct msm_cvp_inst *msm_cvp_open(int session_type, struct task_struct *task);
+int msm_cvp_close(void *instance);
+int msm_cvp_suspend(void);
+ struct poll_table_struct *pt);
+ struct eva_kmd_arg *arg);
+#define MAX_SSR_STRING_LEN 10
+int msm_cvp_debug = CVP_ERR | CVP_WARN | CVP_FW;
+EXPORT_SYMBOL(msm_cvp_debug);
+int msm_cvp_debug_out = CVP_OUT_PRINTK;
+EXPORT_SYMBOL(msm_cvp_debug_out);
+int msm_cvp_fw_debug = 0x18;
+int msm_cvp_fw_debug_mode = 1;
+int msm_cvp_fw_low_power_mode = 1;
+bool msm_cvp_fw_coverage = !true;
+bool msm_cvp_auto_pil = true;
+bool msm_cvp_cacheop_enabled = true;
+bool msm_cvp_thermal_mitigation_disabled = !true;
+bool msm_cvp_cacheop_disabled = !true;
+int msm_cvp_clock_voting = !1;
+bool msm_cvp_syscache_disable = !true;
+bool msm_cvp_dsp_disable = !true;
+bool msm_cvp_mmrm_enabled = true;
+bool msm_cvp_mmrm_enabled = !true;
+bool msm_cvp_dcvs_disable = !true;
+int msm_cvp_minidump_enable = !1;
+int cvp_kernel_fence_enabled = 2;
+int msm_cvp_hw_wd_recovery = 1;
+#define MAX_DBG_BUF_SIZE 4096
+struct cvp_core_inst_pair {
+static int core_info_open(struct inode *inode, struct file *file)
+ file->private_data = inode->i_private;
+ dprintk(CVP_INFO, "%s: Enter\n", __func__);
+static u32 write_str(char *buffer,
+ size_t size, const char *fmt, ...)
+ va_list args;
+ u32 len;
+ va_start(args, fmt);
+ len = vscnprintf(buffer, size, fmt, args);
+ va_end(args);
+ return len;
+static ssize_t core_info_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+ struct msm_cvp_core *core = file->private_data;
+ struct cvp_hal_fw_info fw_info = { {0} };
+ char *dbuf, *cur, *end;
+ int i = 0, rc = 0;
+ ssize_t len = 0;
+ dprintk(CVP_ERR, "Invalid params, core: %pK\n", core);
+ dbuf = kzalloc(MAX_DBG_BUF_SIZE, GFP_KERNEL);
+ dprintk(CVP_ERR, "%s: Allocation failed!\n", __func__);
+ cur = dbuf;
+ end = cur + MAX_DBG_BUF_SIZE;
+ cur += write_str(cur, end - cur, "===============================\n");
+ cur += write_str(cur, end - cur, "CORE %d: %pK\n", 0, core);
+ cur += write_str(cur, end - cur, "Core state: %d\n", core->state);
+ rc = call_hfi_op(ops_tbl, get_fw_info, ops_tbl->hfi_device_data, &fw_info);
+ dprintk(CVP_WARN, "Failed to read FW info\n");
+ goto err_fw_info;
+ cur += write_str(cur, end - cur,
+ "FW version : %s\n", &fw_info.version);
+ "base addr: 0x%x\n", fw_info.base_addr);
+ "register_base: 0x%x\n", fw_info.register_base);
+ "register_size: %u\n", fw_info.register_size);
+ cur += write_str(cur, end - cur, "irq: %u\n", fw_info.irq);
+err_fw_info:
+ for (i = SYS_MSG_START; i < SYS_MSG_END; i++) {
+ cur += write_str(cur, end - cur, "completions[%d]: %s\n", i,
+ completion_done(&core->completions[SYS_MSG_INDEX(i)]) ?
+ "pending" : "done");
+ len = simple_read_from_buffer(buf, count, ppos,
+ dbuf, cur - dbuf);
+ kfree(dbuf);
+static const struct file_operations core_info_fops = {
+ .open = core_info_open,
+ .read = core_info_read,
+static int trigger_ssr_open(struct inode *inode, struct file *file)
+static ssize_t trigger_ssr_write(struct file *filp, const char __user *buf,
+ unsigned long ssr_trigger_val = 0;
+ struct msm_cvp_core *core = filp->private_data;
+ size_t size = MAX_SSR_STRING_LEN;
+ char kbuf[MAX_SSR_STRING_LEN + 1] = {0};
+ if (!count)
+ if (count < size)
+ size = count;
+ if (copy_from_user(kbuf, buf, size)) {
+ dprintk(CVP_WARN, "%s User memory fault\n", __func__);
+ rc = kstrtoul(kbuf, 0, &ssr_trigger_val);
+ dprintk(CVP_WARN, "returning error err %d\n", rc);
+ msm_cvp_trigger_ssr(core, ssr_trigger_val);
+ rc = count;
+static const struct file_operations ssr_fops = {
+ .open = trigger_ssr_open,
+ .write = trigger_ssr_write,
+static int cvp_power_get(void *data, u64 *val)
+ if (!ops_tbl)
+ if (!hfi_device)
+ *val = hfi_device->power_enabled;
+#define MIN_PC_INTERVAL 1000
+#define MAX_PC_INTERVAL 1000000
+static int cvp_power_set(void *data, u64 val)
+ if (val >= MAX_PC_INTERVAL) {
+ hfi_device->res->sw_power_collapsible = 0;
+ } else if (val > MIN_PC_INTERVAL) {
+ hfi_device->res->sw_power_collapsible = 1;
+ hfi_device->res->msm_cvp_pwr_collapse_delay =
+ (unsigned int)val;
+ if (val > 0) {
+ rc = call_hfi_op(ops_tbl, resume, ops_tbl->hfi_device_data);
+ dprintk(CVP_ERR, "debugfs fail to power on cvp\n");
+DEFINE_DEBUGFS_ATTRIBUTE(cvp_pwr_fops, cvp_power_get, cvp_power_set, "%llu\n");
+struct dentry *msm_cvp_debugfs_init_drv(void)
+ struct dentry *dir = NULL;
+ dir = debugfs_create_dir("msm_cvp", NULL);
+ if (IS_ERR_OR_NULL(dir)) {
+ dir = NULL;
+ goto failed_create_dir;
+ debugfs_create_x32("debug_level", 0644, dir, &msm_cvp_debug);
+ debugfs_create_x32("fw_level", 0644, dir, &msm_cvp_fw_debug);
+ debugfs_create_u32("fw_debug_mode", 0644, dir, &msm_cvp_fw_debug_mode);
+ debugfs_create_u32("fw_low_power_mode", 0644, dir,
+ &msm_cvp_fw_low_power_mode);
+ debugfs_create_u32("debug_output", 0644, dir, &msm_cvp_debug_out);
+ debugfs_create_u32("minidump_enable", 0644, dir,
+ &msm_cvp_minidump_enable);
+ debugfs_create_bool("fw_coverage", 0644, dir, &msm_cvp_fw_coverage);
+ debugfs_create_bool("auto_pil", 0644, dir, &msm_cvp_auto_pil);
+ debugfs_create_u32("kernel_fence", 0644, dir, &cvp_kernel_fence_enabled);
+ debugfs_create_bool("disable_thermal_mitigation", 0644, dir,
+ &msm_cvp_thermal_mitigation_disabled);
+ debugfs_create_bool("enable_cacheop", 0644, dir,
+ &msm_cvp_cacheop_enabled);
+ debugfs_create_bool("disable_cvp_syscache", 0644, dir,
+ &msm_cvp_syscache_disable);
+ debugfs_create_bool("disable_dcvs", 0644, dir,
+ &msm_cvp_dcvs_disable);
+ debugfs_create_file("cvp_power", 0644, dir, NULL, &cvp_pwr_fops);
+ return dir;
+failed_create_dir:
+ if (dir)
+ dprintk(CVP_WARN, "Failed to create debugfs\n");
+static int _clk_rate_set(void *data, u64 val)
+ unsigned int tbl_size, i;
+ if (val == 0) {
+ struct iris_hfi_device *hdev = ops_tbl->hfi_device_data;
+ msm_cvp_clock_voting = 0;
+ call_hfi_op(ops_tbl, scale_clocks, hdev, hdev->clk_freq);
+ for (i = 0; i < tbl_size; i++)
+ if (val <= tbl[i].clock_rate)
+ if (i == tbl_size)
+ msm_cvp_clock_voting = tbl[tbl_size-1].clock_rate;
+ msm_cvp_clock_voting = tbl[i].clock_rate;
+ dprintk(CVP_WARN, "Override cvp_clk_rate with %d\n",
+ msm_cvp_clock_voting);
+ call_hfi_op(ops_tbl, scale_clocks, ops_tbl->hfi_device_data,
+static int _clk_rate_get(void *data, u64 *val)
+ *val = msm_cvp_clock_voting;
+ *val = hdev->clk_freq;
+DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, _clk_rate_get, _clk_rate_set, "%llu\n");
+static int _dsp_dbg_set(void *data, u64 val)
+ gfa_cv.debug_mask = (uint32_t)val;
+ cvp_dsp_send_debug_mask();
+static int _dsp_dbg_get(void *data, u64 *val)
+ *val = gfa_cv.debug_mask;
+DEFINE_DEBUGFS_ATTRIBUTE(dsp_debug_fops, _dsp_dbg_get, _dsp_dbg_set, "%llu\n");
+static int _max_ssr_set(void *data, u64 val)
+ if (val < 1) {
+ "Invalid max_ssr_allowed value %llx\n", val);
+ core->resources.max_ssr_allowed = (unsigned int)val;
+static int _max_ssr_get(void *data, u64 *val)
+ *val = core->resources.max_ssr_allowed;
+DEFINE_DEBUGFS_ATTRIBUTE(max_ssr_fops, _max_ssr_get, _max_ssr_set, "%llu\n");
+static int _ssr_stall_set(void *data, u64 val)
+ core->resources.fatal_ssr = (val >= 1) ? true : false;
+static int _ssr_stall_get(void *data, u64 *val)
+ *val = core->resources.fatal_ssr ? 1 : 0;
+DEFINE_DEBUGFS_ATTRIBUTE(ssr_stall_fops, _ssr_stall_get, _ssr_stall_set, "%llu\n");
+struct dentry *msm_cvp_debugfs_init_core(struct msm_cvp_core *core,
+ struct dentry *parent)
+ char debugfs_name[MAX_DEBUGFS_NAME];
+ snprintf(debugfs_name, MAX_DEBUGFS_NAME, "core%d", 0);
+ dir = debugfs_create_dir(debugfs_name, parent);
+ if (!debugfs_create_file("info", 0444, dir, core, &core_info_fops)) {
+ dprintk(CVP_ERR, "debugfs_create_file: fail\n");
+ if (!debugfs_create_file("trigger_ssr", 0200,
+ dir, core, &ssr_fops)) {
+ if (!debugfs_create_file("clock_rate", 0644, dir,
+ NULL, &clk_rate_fops)) {
+ dprintk(CVP_ERR, "debugfs_create_file: clock_rate fail\n");
+ if (!debugfs_create_file("dsp_debug_level", 0644, dir,
+ NULL, &dsp_debug_fops)) {
+ dprintk(CVP_ERR, "debugfs_create: dsp_debug_level fail\n");
+ if (!debugfs_create_file("max_ssr_allowed", 0644, dir,
+ NULL, &max_ssr_fops)) {
+ dprintk(CVP_ERR, "debugfs_create: max_ssr_allowed fail\n");
+ if (!debugfs_create_file("ssr_stall", 0644, dir,
+ NULL, &ssr_stall_fops)) {
+ dprintk(CVP_ERR, "debugfs_create: ssr_stall fail\n");
+ debugfs_create_u32("hw_wd_recovery", 0644, dir,
+ &msm_cvp_hw_wd_recovery);
+static int inst_info_open(struct inode *inode, struct file *file)
+ dprintk(CVP_INFO, "Open inode ptr: %pK\n", inode->i_private);
+static int publish_unreleased_reference(struct msm_cvp_inst *inst,
+ char **dbuf, char *end)
+ dprintk(CVP_SESS, "%s deprecated function\n", __func__);
+static ssize_t inst_info_read(struct file *file, char __user *buf,
+ struct cvp_core_inst_pair *idata = file->private_data;
+ struct msm_cvp_inst *inst, *temp = NULL;
+ if (!idata || !idata->core || !idata->inst) {
+ core = idata->core;
+ inst = idata->inst;
+ list_for_each_entry(temp, &core->instances, list) {
+ if (temp == inst)
+ inst = ((temp == inst) && kref_get_unless_zero(&inst->kref)) ?
+ inst : NULL;
+ dprintk(CVP_ERR, "%s: Instance has become obsolete", __func__);
+ len = -ENOMEM;
+ goto failed_alloc;
+ cur += write_str(cur, end - cur, "==============================\n");
+ cur += write_str(cur, end - cur, "INSTANCE: %pK (%s)\n", inst,
+ inst->session_type == MSM_CVP_USER ? "User" : "Kernel");
+ cur += write_str(cur, end - cur, "core: %pK\n", inst->core);
+ cur += write_str(cur, end - cur, "state: %d\n", inst->state);
+ cur += write_str(cur, end - cur, "secure: %d\n",
+ !!(inst->flags & CVP_SECURE));
+ for (i = SESSION_MSG_START; i < SESSION_MSG_END; i++) {
+ completion_done(&inst->completions[SESSION_MSG_INDEX(i)]) ?
+ publish_unreleased_reference(inst, &cur, end);
+failed_alloc:
+static int inst_info_release(struct inode *inode, struct file *file)
+ dprintk(CVP_INFO, "Release inode ptr: %pK\n", inode->i_private);
+ file->private_data = NULL;
+static const struct file_operations inst_info_fops = {
+ .open = inst_info_open,
+ .read = inst_info_read,
+ .release = inst_info_release,
+struct dentry *msm_cvp_debugfs_init_inst(struct msm_cvp_inst *inst,
+ struct dentry *dir = NULL, *info = NULL;
+ struct cvp_core_inst_pair *idata = NULL;
+ dprintk(CVP_ERR, "Invalid params, inst: %pK\n", inst);
+ snprintf(debugfs_name, MAX_DEBUGFS_NAME, "inst_%pK", inst);
+ idata = kzalloc(sizeof(*idata), GFP_KERNEL);
+ if (!idata) {
+ idata->core = inst->core;
+ idata->inst = inst;
+ info = debugfs_create_file("info", 0444, dir,
+ idata, &inst_info_fops);
+ if (!info) {
+ dprintk(CVP_ERR, "debugfs_create_file: info fail\n");
+ goto failed_create_file;
+ dir->d_inode->i_private = info->d_inode->i_private;
+ inst->debug.pdata[FRAME_PROCESSING].sampling = true;
+failed_create_file:
+ debugfs_remove_recursive(dir);
+ kfree(idata);
+void msm_cvp_debugfs_deinit_inst(struct msm_cvp_inst *inst)
+ struct dentry *dentry = NULL;
+ if (!inst || !inst->debugfs_root)
+ dentry = inst->debugfs_root;
+ if (dentry->d_inode) {
+ dprintk(CVP_INFO, "Destroy %pK\n", dentry->d_inode->i_private);
+ kfree(dentry->d_inode->i_private);
+ dentry->d_inode->i_private = NULL;
+ debugfs_remove_recursive(dentry);
+ inst->debugfs_root = NULL;
@@ -0,0 +1,205 @@
+#ifndef __MSM_CVP_DEBUG__
+#define __MSM_CVP_DEBUG__
+#include "msm_cvp_events.h"
+#ifndef CVP_DBG_LABEL
+#define CVP_DBG_LABEL "msm_cvp"
+#define CVP_DBG_TAG CVP_DBG_LABEL ": %4s: "
+/* To enable messages OR these values and
+ * echo the result to debugfs file.
+ * To enable all messages set debug_level = 0x101F
+enum cvp_msg_prio {
+ CVP_ERR = 0x000001,
+ CVP_WARN = 0x000002,
+ CVP_INFO = 0x000004,
+ CVP_CMD = 0x000008,
+ CVP_PROF = 0x000010,
+ CVP_PKT = 0x000020,
+ CVP_MEM = 0x000040,
+ CVP_SYNX = 0x000080,
+ CVP_CORE = 0x000100,
+ CVP_REG = 0x000200,
+ CVP_PWR = 0x000400,
+ CVP_DSP = 0x000800,
+ CVP_FW = 0x001000,
+ CVP_SESS = 0x002000,
+ CVP_HFI = 0x004000,
+ CVP_VM = 0x008000,
+ CVP_DBG = CVP_MEM | CVP_SYNX | CVP_CORE | CVP_REG | CVP_CMD |
+ CVP_PWR | CVP_DSP | CVP_SESS | CVP_HFI | CVP_PKT | CVP_VM,
+enum cvp_msg_out {
+ CVP_OUT_PRINTK = 0,
+enum msm_cvp_debugfs_event {
+ MSM_CVP_DEBUGFS_EVENT_ETB,
+ MSM_CVP_DEBUGFS_EVENT_EBD,
+ MSM_CVP_DEBUGFS_EVENT_FTB,
+ MSM_CVP_DEBUGFS_EVENT_FBD,
+extern int msm_cvp_debug;
+extern int msm_cvp_debug_out;
+extern int msm_cvp_fw_debug;
+extern int msm_cvp_fw_debug_mode;
+extern int msm_cvp_fw_low_power_mode;
+extern bool msm_cvp_fw_coverage;
+extern bool msm_cvp_auto_pil;
+extern bool msm_cvp_thermal_mitigation_disabled;
+extern bool msm_cvp_cacheop_disabled;
+extern int msm_cvp_clock_voting;
+extern bool msm_cvp_syscache_disable;
+extern bool msm_cvp_dsp_disable;
+extern bool msm_cvp_mmrm_enabled;
+extern bool msm_cvp_dcvs_disable;
+extern int msm_cvp_minidump_enable;
+extern int cvp_kernel_fence_enabled;
+extern int msm_cvp_hw_wd_recovery;
+#define dprintk(__level, __fmt, arg...) \
+ if (msm_cvp_debug & __level) { \
+ if (msm_cvp_debug_out == CVP_OUT_PRINTK) { \
+ pr_info(CVP_DBG_TAG __fmt, \
+ get_debug_level_str(__level), \
+ ## arg); \
+/* dprintk_rl is designed for printing frequent recurring errors */
+#define dprintk_rl(__level, __fmt, arg...) \
+ pr_info_ratelimited(CVP_DBG_TAG __fmt, \
+#define MSM_CVP_ERROR(value) \
+ do { if (value) \
+ dprintk(CVP_ERR, "BugOn"); \
+ WARN_ON(value); \
+struct dentry *msm_cvp_debugfs_init_drv(void);
+ struct dentry *parent);
+void msm_cvp_debugfs_deinit_inst(struct msm_cvp_inst *inst);
+static inline char *get_debug_level_str(int level)
+ case CVP_ERR:
+ return "err";
+ case CVP_WARN:
+ return "warn";
+ case CVP_INFO:
+ return "info";
+ case CVP_CMD:
+ return "cmd";
+ case CVP_DBG:
+ return "dbg";
+ case CVP_PROF:
+ return "prof";
+ case CVP_PKT:
+ return "pkt";
+ case CVP_MEM:
+ return "mem";
+ case CVP_SYNX:
+ return "synx";
+ case CVP_CORE:
+ return "core";
+ case CVP_REG:
+ return "reg";
+ case CVP_PWR:
+ return "pwr";
+ case CVP_DSP:
+ return "dsp";
+ case CVP_FW:
+ return "fw";
+ case CVP_SESS:
+ return "sess";
+ case CVP_HFI:
+ return "hfi";
+ case CVP_VM:
+ return "vm";
+ return "???";
+static inline void show_stats(struct msm_cvp_inst *i)
+ int x;
+ for (x = 0; x < MAX_PROFILING_POINTS; x++) {
+ if (i->debug.pdata[x].name[0] &&
+ (msm_cvp_debug & CVP_PROF)) {
+ if (i->debug.samples) {
+ dprintk(CVP_PROF, "%s averaged %d ms/sample\n",
+ i->debug.pdata[x].name,
+ i->debug.pdata[x].cumulative /
+ i->debug.samples);
+ dprintk(CVP_PROF, "%s Samples: %d\n",
+static inline void msm_cvp_res_handle_fatal_hw_error(
+ struct msm_cvp_platform_resources *resources,
+ bool enable_fatal)
+ enable_fatal &= resources->debug_timeout;
+ MSM_CVP_ERROR(enable_fatal);
+static inline void msm_cvp_handle_hw_error(struct msm_cvp_core *core)
+ bool enable_fatal = true;
+ if (core->trigger_ssr) {
+ enable_fatal = false;
+ /* CVP driver can decide FATAL handling of HW errors
+ * based on multiple factors. This condition check will
+ * be enhanced later.
+ msm_cvp_res_handle_fatal_hw_error(&core->resources, enable_fatal);
@@ -0,0 +1,2234 @@
+#include <linux/rpmsg.h>
+#include <linux/of_fdt.h>
+static atomic_t nr_maps;
+struct cvp_dsp_apps gfa_cv;
+static int cvp_reinit_dsp(void);
+static void cvp_remove_dsp_sessions(void);
+static int __fastrpc_driver_register(struct fastrpc_driver *driver)
+#ifdef CVP_FASTRPC_ENABLED
+ return fastrpc_driver_register(driver);
+static void __fastrpc_driver_unregister(struct fastrpc_driver *driver)
+ return fastrpc_driver_unregister(driver);
+static int __fastrpc_driver_invoke(struct fastrpc_device *dev,
+ enum fastrpc_driver_invoke_nums invoke_num,
+ unsigned long invoke_param)
+ return fastrpc_driver_invoke(dev, invoke_num, invoke_param);
+#endif /* End of CVP_FASTRPC_ENABLED */
+static int cvp_dsp_send_cmd(struct cvp_dsp_cmd_msg *cmd, uint32_t len)
+ struct cvp_dsp_apps *me = &gfa_cv;
+ dprintk(CVP_DSP, "%s: cmd = %d\n", __func__, cmd->type);
+ if (IS_ERR_OR_NULL(me->chan)) {
+ dprintk(CVP_ERR, "%s: DSP GLink is not ready\n", __func__);
+ rc = rpmsg_send(me->chan->ept, cmd, len);
+ dprintk(CVP_ERR, "%s: DSP rpmsg_send failed rc=%d\n",
+static int cvp_dsp_send_cmd_sync(struct cvp_dsp_cmd_msg *cmd,
+ uint32_t len, struct cvp_dsp_rsp_msg *rsp)
+ me->pending_dsp2cpu_rsp.type = cmd->type;
+ rc = cvp_dsp_send_cmd(cmd, len);
+ dprintk(CVP_ERR, "%s: cvp_dsp_send_cmd failed rc=%d\n",
+ if (!wait_for_completion_timeout(&me->completions[cmd->type],
+ msecs_to_jiffies(CVP_DSP_RESPONSE_TIMEOUT))) {
+ dprintk(CVP_ERR, "%s cmd %d timeout\n", __func__, cmd->type);
+ rsp->ret = me->pending_dsp2cpu_rsp.ret;
+ rsp->dsp_state = me->pending_dsp2cpu_rsp.dsp_state;
+ me->pending_dsp2cpu_rsp.type = CVP_INVALID_RPMSG_TYPE;
+static int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
+ uint32_t size_in_bytes,
+ struct cvp_dsp_rsp_msg *rsp)
+ struct cvp_dsp_cmd_msg cmd;
+ cmd.type = CPU2DSP_SEND_HFI_QUEUE;
+ cmd.msg_ptr = (uint64_t)phys_addr;
+ cmd.msg_ptr_len = size_in_bytes;
+ cmd.ddr_type = cvp_of_fdt_get_ddrtype();
+ if (cmd.ddr_type < 0) {
+ "%s: Incorrect DDR type value %d, use default %d\n",
+ __func__, cmd.ddr_type, DDR_TYPE_LPDDR5);
+ /*return -EINVAL;*/
+ cmd.ddr_type = DDR_TYPE_LPDDR5;
+ "%s: address of buffer, PA=0x%pK size_buff=%d ddr_type=%d\n",
+ __func__, phys_addr, size_in_bytes, cmd.ddr_type);
+ rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg), rsp);
+ "%s: cvp_dsp_send_cmd failed rc = %d\n",
+static int cvp_hyp_assign_to_dsp(uint64_t addr, uint32_t size)
+ uint64_t hlosVMid = BIT(VMID_HLOS);
+ struct qcom_scm_vmperm dspVM[DSP_VM_NUM] = {
+ {VMID_HLOS, PERM_READ | PERM_WRITE | PERM_EXEC},
+ {VMID_CDSP_Q6, PERM_READ | PERM_WRITE | PERM_EXEC}
+ if (!me->hyp_assigned) {
+ rc = qcom_scm_assign_mem(addr, size, &hlosVMid, dspVM, DSP_VM_NUM);
+ dprintk(CVP_ERR, "%s failed. rc=%d\n", __func__, rc);
+ me->addr = addr;
+ me->size = size;
+ me->hyp_assigned = true;
+static int cvp_hyp_assign_from_dsp(void)
+ uint64_t dspVMids = BIT(VMID_HLOS) | BIT(VMID_CDSP_Q6);
+ struct qcom_scm_vmperm hlosVM[HLOS_VM_NUM] = {
+ if (me->hyp_assigned) {
+ rc = qcom_scm_assign_mem(me->addr, me->size, &dspVMids, hlosVM, HLOS_VM_NUM);
+ me->addr = 0;
+ me->size = 0;
+ me->hyp_assigned = false;
+static int cvp_dsp_rpmsg_probe(struct rpmsg_device *rpdev)
+ const char *edge_name = NULL;
+ int ret = 0;
+ ret = of_property_read_string(rpdev->dev.parent->of_node,
+ "label", &edge_name);
+ dprintk(CVP_ERR, "glink edge 'label' not found in node\n");
+ if (strcmp(edge_name, "cdsp")) {
+ "%s: Failed to probe rpmsg device.Node name:%s\n",
+ __func__, edge_name);
+ mutex_lock(&me->tx_lock);
+ me->chan = rpdev;
+ me->state = DSP_PROBED;
+ mutex_unlock(&me->tx_lock);
+ complete(&me->completions[CPU2DSP_MAX_CMD]);
+static int eva_fastrpc_dev_unmap_dma(
+ struct fastrpc_device *frpc_device,
+static int delete_dsp_session(struct msm_cvp_inst *inst,
+ struct cvp_dsp_fastrpc_driver_entry *frpc_node)
+ struct msm_cvp_list *buf_list = NULL;
+ struct list_head *ptr_dsp_buf = NULL, *next_dsp_buf = NULL;
+ struct cvp_internal_buf *buf = NULL;
+ struct task_struct *task = NULL;
+ if (!inst)
+ buf_list = &inst->cvpdspbufs;
+ ptr_dsp_buf = &buf_list->list;
+ list_for_each_safe(ptr_dsp_buf, next_dsp_buf, &buf_list->list) {
+ if (!ptr_dsp_buf)
+ buf = list_entry(ptr_dsp_buf, struct cvp_internal_buf, list);
+ dprintk(CVP_DSP, "fd in list 0x%x\n", buf->fd);
+ dprintk(CVP_DSP, "Empyt smem\n");
+ dprintk(CVP_DSP, "%s find device addr 0x%x\n",
+ __func__, buf->smem->device_addr);
+ rc = eva_fastrpc_dev_unmap_dma(
+ frpc_node->cvp_fastrpc_device,
+ buf);
+ "%s Failed to unmap buffer 0x%x\n",
+ rc = cvp_release_dsp_buffers(inst, buf);
+ "%s Failed to free buffer 0x%x\n",
+ task = inst->task;
+ dprintk(CVP_WARN, "%s Unexpected pm_qos off vote %d\n",
+ dprintk(CVP_ERR, "Warning: Failed to close cvp instance\n");
+ if (task)
+ put_task_struct(task);
+ dprintk(CVP_DSP, "%s DSP2CPU_DETELE_SESSION Done\n", __func__);
+static int eva_fastrpc_driver_get_name(
+ for (i = 0; i < MAX_FASTRPC_DRIVER_NUM; i++) {
+ if (me->cvp_fastrpc_name[i].status == DRIVER_NAME_AVAILABLE) {
+ frpc_node->driver_name_idx = i;
+ frpc_node->cvp_fastrpc_driver.driver.name =
+ me->cvp_fastrpc_name[i].name;
+ me->cvp_fastrpc_name[i].status = DRIVER_NAME_USED;
+ dprintk(CVP_DSP, "%s -> handle 0x%x get name %s\n",
+ __func__, frpc_node->cvp_fastrpc_driver.handle,
+ frpc_node->cvp_fastrpc_driver.driver.name);
+ return -1;
+static void eva_fastrpc_driver_release_name(
+ me->cvp_fastrpc_name[frpc_node->driver_name_idx].status =
+ DRIVER_NAME_AVAILABLE;
+/* The function may not return for up to 50ms */
+static bool dequeue_frpc_node(struct cvp_dsp_fastrpc_driver_entry *node)
+ struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL;
+ struct list_head *ptr = NULL, *next = NULL;
+ u32 refcount, max_count = 10;
+ bool rc = false;
+ if (!node)
+search_again:
+ ptr = &me->fastrpc_driver_list.list;
+ mutex_lock(&me->fastrpc_driver_list.lock);
+ list_for_each_safe(ptr, next, &me->fastrpc_driver_list.list) {
+ frpc_node = list_entry(ptr,
+ struct cvp_dsp_fastrpc_driver_entry, list);
+ if (frpc_node == node) {
+ refcount = atomic_read(&frpc_node->refcount);
+ if (refcount > 0) {
+ mutex_unlock(&me->fastrpc_driver_list.lock);
+ usleep_range(5000, 10000);
+ if (max_count-- == 0) {
+ dprintk(CVP_ERR, "%s timeout %d\n",
+ __func__, refcount);
+ goto search_again;
+ list_del(&frpc_node->list);
+ rc = true;
+static struct cvp_dsp_fastrpc_driver_entry *pop_frpc_node(void)
+ frpc_node = NULL;
+ if (frpc_node) {
+ dprintk(CVP_ERR, "%s timeout\n",
+ return frpc_node;
+static void cvp_dsp_rpmsg_remove(struct rpmsg_device *rpdev)
+ u32 max_num_retries = 100;
+ dprintk(CVP_WARN, "%s: CDSP SSR triggered\n", __func__);
+ mutex_lock(&me->rx_lock);
+ while (max_num_retries > 0) {
+ if (me->pending_dsp2cpu_cmd.type !=
+ CVP_INVALID_RPMSG_TYPE) {
+ mutex_unlock(&me->rx_lock);
+ usleep_range(1000, 5000);
+ max_num_retries--;
+ if (!max_num_retries)
+ dprintk(CVP_ERR, "stuck processing pending DSP cmds\n");
+ cvp_hyp_assign_from_dsp();
+ me->chan = NULL;
+ me->state = DSP_UNINIT;
+ /* Wait HW finish current frame processing */
+ usleep_range(20000, 50000);
+ cvp_remove_dsp_sessions();
+ dprintk(CVP_WARN, "%s: CDSP SSR handled nr_maps %d\n", __func__,
+ atomic_read(&nr_maps));
+static int cvp_dsp_rpmsg_callback(struct rpmsg_device *rpdev,
+ void *data, int len, void *priv, u32 addr)
+ struct cvp_dsp_rsp_msg *rsp = (struct cvp_dsp_rsp_msg *)data;
+ dprintk(CVP_DSP, "%s: type = 0x%x ret = 0x%x len = 0x%x\n",
+ __func__, rsp->type, rsp->ret, len);
+ if (rsp->type < CPU2DSP_MAX_CMD && len == sizeof(*rsp)) {
+ if (me->pending_dsp2cpu_rsp.type == rsp->type) {
+ memcpy(&me->pending_dsp2cpu_rsp, rsp,
+ sizeof(struct cvp_dsp_rsp_msg));
+ complete(&me->completions[rsp->type]);
+ dprintk(CVP_ERR, "%s: CPU2DSP resp %d, pending %d\n",
+ __func__, rsp->type,
+ me->pending_dsp2cpu_rsp.type);
+ } else if (rsp->type < CVP_DSP_MAX_CMD &&
+ len == sizeof(struct cvp_dsp2cpu_cmd)) {
+ if (me->pending_dsp2cpu_cmd.type != CVP_INVALID_RPMSG_TYPE) {
+ "%s: DSP2CPU cmd:%d pending %d %d expect %d\n",
+ me->pending_dsp2cpu_cmd.type, len,
+ sizeof(struct cvp_dsp2cpu_cmd));
+ memcpy(&me->pending_dsp2cpu_cmd, rsp,
+ dprintk(CVP_ERR, "%s: Invalid type: %d\n", __func__, rsp->type);
+ dprintk(CVP_ERR, "concurrent dsp cmd type = %d, rsp type = %d\n",
+ me->pending_dsp2cpu_cmd.type,
+static bool dsp_session_exist(void)
+int cvp_dsp_suspend(bool force)
+ struct cvp_dsp_rsp_msg rsp;
+ bool retried = false;
+ /* If not forced to suspend, check if DSP requested PC earlier */
+ if (force == false)
+ if (dsp_session_exist())
+ if (me->state != DSP_SUSPEND)
+ cmd.type = CPU2DSP_SUSPEND;
+ if (me->state != DSP_READY)
+ /* Use cvp_dsp_send_cmd_sync after dsp driver is ready */
+ rc = cvp_dsp_send_cmd_sync(&cmd,
+ sizeof(struct cvp_dsp_cmd_msg),
+ &rsp);
+ if (rsp.ret == CPU2DSP_EUNAVAILABLE)
+ goto fatal_exit;
+ if (rsp.ret == CPU2DSP_EFATAL) {
+ dprintk(CVP_ERR, "%s: suspend dsp got EFATAL error\n",
+ if (!retried) {
+ retried = true;
+ rc = cvp_reinit_dsp();
+ me->state = DSP_SUSPEND;
+ dprintk(CVP_DSP, "DSP suspended, nr_map: %d\n", atomic_read(&nr_maps));
+fatal_exit:
+ me->state = DSP_INVALID;
+int cvp_dsp_resume(void)
+ cmd.type = CPU2DSP_RESUME;
+ * Deadlock against DSP2CPU_CREATE_SESSION in dsp_thread
+ * Probably get rid of this entirely as discussed before
+ dprintk(CVP_WARN, "%s DSP not in SUSPEND state\n", __func__);
+static void cvp_remove_dsp_sessions(void)
+ struct list_head *s = NULL, *next_s = NULL;
+ while ((frpc_node = pop_frpc_node())) {
+ s = &frpc_node->dsp_sessions.list;
+ if (!s || !(s->next))
+ list_for_each_safe(s, next_s,
+ &frpc_node->dsp_sessions.list) {
+ if (!s || !next_s)
+ inst = list_entry(s, struct msm_cvp_inst,
+ dsp_list);
+ if (inst) {
+ mutex_lock(&frpc_node->dsp_sessions.lock);
+ list_del(&inst->dsp_list);
+ frpc_node->session_cnt--;
+ mutex_unlock(&frpc_node->dsp_sessions.lock);
+ delete_dsp_session(inst, frpc_node);
+ dprintk(CVP_DSP, "%s DEINIT_MSM_CVP_LIST 0x%x\n",
+ __func__, frpc_node->dsp_sessions);
+ DEINIT_MSM_CVP_LIST(&frpc_node->dsp_sessions);
+ dprintk(CVP_DSP, "%s list_del fastrpc node 0x%x\n",
+ __func__, frpc_node);
+ __fastrpc_driver_unregister(
+ &frpc_node->cvp_fastrpc_driver);
+ "%s Unregistered fastrpc handle 0x%x\n",
+ __func__, frpc_node->handle);
+ mutex_lock(&me->driver_name_lock);
+ eva_fastrpc_driver_release_name(frpc_node);
+ mutex_unlock(&me->driver_name_lock);
+ kfree(frpc_node);
+ dprintk(CVP_WARN, "%s: EVA SSR handled for CDSP\n", __func__);
+int cvp_dsp_shutdown(void)
+ cmd.type = CPU2DSP_SHUTDOWN;
+ if (me->state == DSP_INVALID)
+ me->state = DSP_INACTIVE;
+ rc = cvp_dsp_send_cmd_sync(&cmd, sizeof(struct cvp_dsp_cmd_msg), &rsp);
+ "%s: cvp_dsp_send_cmd failed with rc = %d\n",
+ rc = cvp_hyp_assign_from_dsp();
+static const struct rpmsg_device_id cvp_dsp_rpmsg_match[] = {
+ { CVP_APPS_DSP_GLINK_GUID },
+ { },
+static struct rpmsg_driver cvp_dsp_rpmsg_client = {
+ .id_table = cvp_dsp_rpmsg_match,
+ .probe = cvp_dsp_rpmsg_probe,
+ .remove = cvp_dsp_rpmsg_remove,
+ .callback = cvp_dsp_rpmsg_callback,
+ .drv = {
+ .name = "qcom,msm_cvp_dsp_rpmsg",
+static void cvp_dsp_set_queue_hdr_defaults(struct cvp_hfi_queue_header *q_hdr)
+void cvp_dsp_init_hfi_queue_hdr(struct iris_hfi_device *device)
+ iface_q = &device->dsp_iface_queues[i];
+ device->dsp_iface_q_table.align_virtual_addr, i);
+ cvp_dsp_set_queue_hdr_defaults(iface_q->q_hdr);
+ device->dsp_iface_q_table.align_virtual_addr;
+ q_tbl_hdr->device_addr = (void *)device;
+ iface_q = &device->dsp_iface_queues[CVP_IFACEQ_CMDQ_IDX];
+ iface_q = &device->dsp_iface_queues[CVP_IFACEQ_MSGQ_IDX];
+ iface_q = &device->dsp_iface_queues[CVP_IFACEQ_DBGQ_IDX];
+static int __reinit_dsp(void)
+ uint64_t addr;
+ uint32_t size;
+ if (core && core->dev_ops)
+ /* Force shutdown DSP */
+ * Workaround to force delete DSP session resources
+ * To be removed after DSP optimization ready
+ dprintk(CVP_WARN, "Reinit EVA DSP interface: nr_map %d\n",
+ /* Resend HFI queue */
+ dprintk(CVP_ERR, "%s: DSP HFI queue released\n", __func__);
+ addr = (uint64_t)device->dsp_iface_q_table.mem_data.dma_handle;
+ size = device->dsp_iface_q_table.mem_data.size;
+ if (!addr || !size) {
+ dprintk(CVP_DSP, "%s: HFI queue is not ready\n", __func__);
+ rc = cvp_hyp_assign_to_dsp(addr, size);
+ dprintk(CVP_ERR, "%s: cvp_hyp_assign_to_dsp. rc=%d\n",
+ rc = cvp_dsp_send_cmd_hfi_queue((phys_addr_t *)addr, size, &rsp);
+ dprintk(CVP_WARN, "%s: Send HFI Queue failed rc = %d\n",
+ if (rsp.ret) {
+ dprintk(CVP_ERR, "%s: DSP error %d %d\n", __func__,
+ rsp.ret, rsp.dsp_state);
+static int cvp_reinit_dsp(void)
+ rc = __reinit_dsp();
+static void cvp_put_fastrpc_node(struct cvp_dsp_fastrpc_driver_entry *node)
+ if (node && (atomic_read(&node->refcount) > 0))
+ atomic_dec(&node->refcount);
+static struct cvp_dsp_fastrpc_driver_entry *cvp_get_fastrpc_node_with_handle(
+ uint32_t handle)
+ struct cvp_dsp_fastrpc_driver_entry *frpc_node = NULL, *tmp_node = NULL;
+ tmp_node = list_entry(ptr,
+ if (handle == tmp_node->handle) {
+ frpc_node = tmp_node;
+ atomic_inc(&frpc_node->refcount);
+ dprintk(CVP_DSP, "Find tmp_node with handle 0x%x\n",
+ handle);
+ dprintk(CVP_DSP, "%s found fastrpc probe handle %pK pid 0x%x\n",
+ __func__, frpc_node, handle);
+static void eva_fastrpc_driver_unregister(uint32_t handle, bool force_exit);
+static int cvp_fastrpc_probe(struct fastrpc_device *rpc_dev)
+ dprintk(CVP_DSP, "%s fastrpc probe handle 0x%x\n",
+ __func__, rpc_dev->handle);
+ frpc_node = cvp_get_fastrpc_node_with_handle(rpc_dev->handle);
+ frpc_node->cvp_fastrpc_device = rpc_dev;
+ complete(&frpc_node->fastrpc_probe_completion);
+ cvp_put_fastrpc_node(frpc_node);
+static int cvp_fastrpc_callback(struct fastrpc_device *rpc_dev,
+ enum fastrpc_driver_status fastrpc_proc_num)
+ dprintk(CVP_DSP, "%s handle 0x%x, proc %d\n", __func__,
+ rpc_dev->handle, fastrpc_proc_num);
+ /* fastrpc drive down when process gone
+ * any handling can happen here, such as
+ * eva_fastrpc_driver_unregister(rpc_dev->handle, true);
+ eva_fastrpc_driver_unregister(rpc_dev->handle, true);
+static struct fastrpc_driver cvp_fastrpc_client = {
+ .probe = cvp_fastrpc_probe,
+ .callback = cvp_fastrpc_callback,
+static int eva_fastrpc_dev_map_dma(struct fastrpc_device *frpc_device,
+ uint32_t dsp_remote_map,
+ uint64_t *v_dsp_addr)
+ struct fastrpc_dev_map_dma frpc_map_buf = {0};
+ if (dsp_remote_map == 1) {
+ frpc_map_buf.buf = buf->smem->dma_buf;
+ frpc_map_buf.size = buf->smem->size;
+ frpc_map_buf.attrs = 0;
+ "%s frpc_map_buf size %d, dma_buf %pK, map %pK, 0x%x\n",
+ __func__, frpc_map_buf.size, frpc_map_buf.buf,
+ &frpc_map_buf, (unsigned long)&frpc_map_buf);
+ rc = __fastrpc_driver_invoke(frpc_device, FASTRPC_DEV_MAP_DMA,
+ (unsigned long)(&frpc_map_buf));
+ "%s Failed to map buffer 0x%x\n", __func__, rc);
+ buf->fd = (s32)frpc_map_buf.v_dsp_addr;
+ *v_dsp_addr = frpc_map_buf.v_dsp_addr;
+ atomic_inc(&nr_maps);
+ dprintk(CVP_DSP, "%s Buffer not mapped to dsp\n", __func__);
+static int eva_fastrpc_dev_unmap_dma(struct fastrpc_device *frpc_device,
+ struct fastrpc_dev_unmap_dma frpc_unmap_buf = {0};
+ /* Only if buffer is mapped to dsp */
+ if (buf->fd != 0) {
+ frpc_unmap_buf.buf = buf->smem->dma_buf;
+ rc = __fastrpc_driver_invoke(frpc_device, FASTRPC_DEV_UNMAP_DMA,
+ (unsigned long)(&frpc_unmap_buf));
+ dprintk_rl(CVP_ERR, "%s Failed to unmap buffer %d\n",
+ if (atomic_read(&nr_maps) > 0)
+ atomic_dec(&nr_maps);
+ dprintk(CVP_DSP, "%s buffer not mapped to dsp\n", __func__);
+static int eva_fastrpc_dev_get_pid(struct fastrpc_device *frpc_device, int *pid)
+ struct fastrpc_dev_get_hlos_pid get_pid = {0};
+ rc = __fastrpc_driver_invoke(frpc_device, FASTRPC_DEV_GET_HLOS_PID,
+ (unsigned long)(&get_pid));
+ dprintk(CVP_ERR, "%s Failed to get PID %x\n",
+ *pid = get_pid.hlos_pid;
+static void eva_fastrpc_driver_add_sess(
+ struct cvp_dsp_fastrpc_driver_entry *frpc,
+ mutex_lock(&frpc->dsp_sessions.lock);
+ if (inst)
+ list_add_tail(&inst->dsp_list, &frpc->dsp_sessions.list);
+ dprintk(CVP_ERR, "%s incorrect input %pK\n", __func__, inst);
+ frpc->session_cnt++;
+ mutex_unlock(&frpc->dsp_sessions.lock);
+ dprintk(CVP_DSP, "add dsp sess %pK fastrpc_driver %pK\n", inst, frpc);
+int cvp_dsp_fastrpc_unmap(uint32_t handle, struct cvp_internal_buf *buf)
+ struct fastrpc_device *frpc_device = NULL;
+ frpc_node = cvp_get_fastrpc_node_with_handle(handle);
+ if (!frpc_node) {
+ dprintk(CVP_ERR, "%s no frpc node for dsp handle %d\n",
+ __func__, handle);
+ frpc_device = frpc_node->cvp_fastrpc_device;
+ rc = eva_fastrpc_dev_unmap_dma(frpc_device, buf);
+ dprintk(CVP_ERR, "%s Fail to unmap buffer 0x%x\n",
+int cvp_dsp_del_sess(uint32_t handle, struct msm_cvp_inst *inst)
+ list_for_each_safe(ptr, next, &frpc_node->dsp_sessions.list) {
+ sess = list_entry(ptr, struct msm_cvp_inst, dsp_list);
+ if (sess == inst) {
+ dprintk(CVP_DSP, "%s Find sess %pK to be deleted\n",
+static int eva_fastrpc_driver_register(uint32_t handle)
+ bool skip_deregister = true;
+ dprintk(CVP_DSP, "%s -> cvp_get_fastrpc_node_with_handle hdl 0x%x\n",
+ if (frpc_node == NULL) {
+ dprintk(CVP_DSP, "%s new fastrpc node hdl 0x%x\n",
+ frpc_node = kzalloc(sizeof(*frpc_node), GFP_KERNEL);
+ dprintk(CVP_DSP, "%s allocate frpc node fail\n",
+ memset(frpc_node, 0, sizeof(*frpc_node));
+ /* Setup fastrpc_node */
+ frpc_node->handle = handle;
+ frpc_node->cvp_fastrpc_driver = cvp_fastrpc_client;
+ frpc_node->cvp_fastrpc_driver.handle = handle;
+ rc = eva_fastrpc_driver_get_name(frpc_node);
+ dprintk(CVP_ERR, "%s fastrpc get name fail err %d\n",
+ goto fail_fastrpc_driver_get_name;
+ /* Init completion */
+ init_completion(&frpc_node->fastrpc_probe_completion);
+ list_add_tail(&frpc_node->list, &me->fastrpc_driver_list.list);
+ INIT_MSM_CVP_LIST(&frpc_node->dsp_sessions);
+ dprintk(CVP_DSP, "Add frpc node 0x%x to list\n", frpc_node);
+ /* register fastrpc device to this session */
+ rc = __fastrpc_driver_register(&frpc_node->cvp_fastrpc_driver);
+ dprintk(CVP_ERR, "%s fastrpc driver reg fail err %d\n",
+ skip_deregister = true;
+ goto fail_fastrpc_driver_register;
+ /* signal wait reuse dsp timeout setup for now */
+ if (!wait_for_completion_timeout(
+ &frpc_node->fastrpc_probe_completion,
+ dprintk(CVP_ERR, "%s fastrpc driver_register timeout %#x\n",
+ skip_deregister = false;
+ dprintk(CVP_DSP, "%s fastrpc probe frpc_node %pK hdl 0x%x\n",
+fail_fastrpc_driver_register:
+ dequeue_frpc_node(frpc_node);
+ if (!skip_deregister)
+ __fastrpc_driver_unregister(&frpc_node->cvp_fastrpc_driver);
+fail_fastrpc_driver_get_name:
+static void eva_fastrpc_driver_unregister(uint32_t handle, bool force_exit)
+ struct cvp_dsp2cpu_cmd *dsp2cpu_cmd = &me->pending_dsp2cpu_cmd;
+ dprintk(CVP_DSP, "%s Unregister fastrpc driver hdl %#x hdl %#x, f %d\n",
+ __func__, handle, dsp2cpu_cmd->pid, (uint32_t)force_exit);
+ if (handle != dsp2cpu_cmd->pid)
+ dprintk(CVP_ERR, "Unregister pid != hndl %#x %#x\n",
+ handle, dsp2cpu_cmd->pid);
+ /* Foundd fastrpc node */
+ dprintk(CVP_DSP, "%s fastrpc handle 0x%x unregistered\n",
+ if ((frpc_node->session_cnt == 0) || force_exit) {
+ dprintk(CVP_DSP, "%s session cnt %d, force %d\n",
+ __func__, frpc_node->session_cnt, (uint32_t)force_exit);
+ if (!dequeue_frpc_node(frpc_node))
+ /* Don't find the node */
+void cvp_dsp_send_debug_mask(void)
+ cmd.type = CPU2DSP_SET_DEBUG_LEVEL;
+ cmd.eva_dsp_debug_mask = me->debug_mask;
+ "%s: debug mask 0x%x\n",
+ __func__, cmd.eva_dsp_debug_mask);
+void cvp_dsp_send_hfi_queue(void)
+ struct cvp_dsp_rsp_msg rsp = {0};
+ dprintk(CVP_DSP, "Entering %s\n", __func__);
+ if (me->state != DSP_PROBED && me->state != DSP_INACTIVE) {
+ "%s: Either DSP is not probed or is not in proper state. me->state = %d\n",
+ __func__, me->state);
+ "%s: DSP probe Successful, going ahead with hyp_assign, me->state = %d\n",
+ if (me->state == DSP_PROBED) {
+ cvp_dsp_init_hfi_queue_hdr(device);
+ "%s: Done init of HFI queue headers\n", __func__);
+ if (rsp.ret == CPU2DSP_EUNSUPPORTED) {
+ dprintk(CVP_WARN, "%s unsupported cmd %d\n",
+ __func__, rsp.type);
+ if (rsp.ret == CPU2DSP_EFATAL || rsp.ret == CPU2DSP_EUNAVAILABLE) {
+ dprintk(CVP_ERR, "%s fatal error returned %d %d\n",
+ __func__, rsp.dsp_state, rsp.ret);
+ } else if (rsp.ret == CPU2DSP_EINVALSTATE) {
+ dprintk(CVP_ERR, "%s dsp invalid state %d\n",
+ __func__, rsp.dsp_state);
+ if (cvp_reinit_dsp()) {
+ dprintk(CVP_ERR, "%s reinit dsp fail\n", __func__);
+ dprintk(CVP_DSP, "%s: dsp initialized\n", __func__);
+ me->state = DSP_READY;
+/* 32 or 64 bit CPU Side Ptr <-> 2 32 bit DSP Pointers. Dirty Fix. */
+static void *get_inst_from_dsp(uint32_t session_cpu_high, uint32_t session_cpu_low)
+ struct msm_cvp_inst *sess_inst;
+ void *inst;
+ if ((session_cpu_high == 0) && (sizeof(void *) == BITPTRSIZE32)) {
+ inst = (void *)((uintptr_t)session_cpu_low);
+ } else if ((session_cpu_high != 0) && (sizeof(void *) == BITPTRSIZE64)) {
+ inst = (void *)((uintptr_t)(((uint64_t)session_cpu_high) << 32
+ | session_cpu_low));
+ "%s Invalid _cpu_high = 0x%x _cpu_low = 0x%x\n",
+ __func__, session_cpu_high, session_cpu_low);
+ list_for_each_entry(sess_inst, &core->instances, list) {
+ if (sess_inst->session_type == MSM_CVP_DSP) {
+ if (sess_inst == (struct msm_cvp_inst *)inst) {
+static void print_power(const struct eva_power_req *pwr_req)
+ if (pwr_req) {
+ dprintk(CVP_DSP, "Clock: Fdu %d Ica %d Od %d Mpu %d Fw %d",
+ pwr_req->clock_fdu, pwr_req->clock_ica,
+ pwr_req->clock_od, pwr_req->clock_mpu,
+ pwr_req->clock_fw);
+ dprintk(CVP_DSP, "OpClock: Fdu %d Ica %d Od %d Mpu %d Fw %d",
+ pwr_req->op_clock_fdu, pwr_req->op_clock_ica,
+ pwr_req->op_clock_od, pwr_req->op_clock_mpu,
+ pwr_req->op_clock_fw);
+ dprintk(CVP_DSP, "Actual Bw: Ddr %d, SysCache %d",
+ pwr_req->bw_ddr, pwr_req->bw_sys_cache);
+ dprintk(CVP_DSP, "OpBw: Ddr %d, SysCache %d",
+ pwr_req->op_bw_ddr, pwr_req->op_bw_sys_cache);
+void __dsp_cvp_sess_create(struct cvp_dsp_cmd_msg *cmd)
+ uint64_t inst_handle = 0;
+ uint32_t pid;
+ struct pid *pid_s = NULL;
+ struct fastrpc_device *frpc_device;
+ cmd->ret = 0;
+ "%s sess Type %d Mask %d Prio %d Sec %d hdl 0x%x\n",
+ __func__, dsp2cpu_cmd->session_type,
+ dsp2cpu_cmd->kernel_mask,
+ dsp2cpu_cmd->session_prio,
+ dsp2cpu_cmd->is_secure,
+ dsp2cpu_cmd->pid);
+ rc = eva_fastrpc_driver_register(dsp2cpu_cmd->pid);
+ dprintk(CVP_ERR, "%s Register fastrpc driver fail\n", __func__);
+ cmd->ret = -1;
+ frpc_node = cvp_get_fastrpc_node_with_handle(dsp2cpu_cmd->pid);
+ dprintk(CVP_WARN, "%s cannot get fastrpc node from pid %x\n",
+ __func__, dsp2cpu_cmd->pid);
+ goto fail_lookup;
+ if (!frpc_node->cvp_fastrpc_device) {
+ dprintk(CVP_WARN, "%s invalid fastrpc device from pid %x\n",
+ goto fail_pid;
+ rc = eva_fastrpc_dev_get_pid(frpc_device, &pid);
+ pid_s = find_get_pid(pid);
+ if (pid_s == NULL) {
+ dprintk(CVP_WARN, "%s incorrect pid %x\n", __func__, pid);
+ dprintk(CVP_DSP, "%s get pid_s 0x%x from hdl 0x%x\n", __func__,
+ pid_s, dsp2cpu_cmd->pid);
+ task = get_pid_task(pid_s, PIDTYPE_TGID);
+ if (!task) {
+ dprintk(CVP_WARN, "%s task doesn't exist\n", __func__);
+ inst = msm_cvp_open(MSM_CVP_DSP, task);
+ dprintk(CVP_ERR, "%s Failed create instance\n", __func__);
+ goto fail_msm_cvp_open;
+ inst->dsp_handle = dsp2cpu_cmd->pid;
+ inst->prop.kernel_mask = dsp2cpu_cmd->kernel_mask;
+ inst->prop.type = dsp2cpu_cmd->session_type;
+ inst->prop.priority = dsp2cpu_cmd->session_prio;
+ inst->prop.is_secure = dsp2cpu_cmd->is_secure;
+ inst->prop.dsp_mask = dsp2cpu_cmd->dsp_access_mask;
+ eva_fastrpc_driver_add_sess(frpc_node, inst);
+ dprintk(CVP_ERR, "Warning: send Session Create failed\n");
+ goto fail_get_session_info;
+ dprintk(CVP_DSP, "%s DSP Session Create done\n", __func__);
+ /* Get session id */
+ rc = msm_cvp_get_session_info(inst, &cmd->session_id);
+ dprintk(CVP_ERR, "Warning: get session index failed %d\n", rc);
+ inst_handle = (uint64_t)inst;
+ cmd->session_cpu_high = (uint32_t)((inst_handle & HIGH32) >> 32);
+ cmd->session_cpu_low = (uint32_t)(inst_handle & LOW32);
+ inst->task = task;
+ "%s CREATE_SESS id 0x%x, cpu_low 0x%x, cpu_high 0x%x, inst %pK, inst->session %pK\n",
+ __func__, cmd->session_id, cmd->session_cpu_low,
+ cmd->session_cpu_high, inst, inst->session);
+fail_get_session_info:
+ msm_cvp_close(inst);
+fail_msm_cvp_open:
+fail_pid:
+fail_lookup:
+ /* unregister fastrpc driver */
+ eva_fastrpc_driver_unregister(dsp2cpu_cmd->pid, false);
+void __dsp_cvp_sess_delete(struct cvp_dsp_cmd_msg *cmd)
+ "%s sess id 0x%x low 0x%x high 0x%x, pid 0x%x\n",
+ __func__, dsp2cpu_cmd->session_id,
+ dsp2cpu_cmd->session_cpu_low,
+ dsp2cpu_cmd->session_cpu_high,
+ dprintk(CVP_ERR, "%s pid 0x%x not registered with fastrpc\n",
+ inst = (struct msm_cvp_inst *)get_inst_from_dsp(
+ dsp2cpu_cmd->session_cpu_low);
+ if (!inst || !is_cvp_inst_valid(inst)) {
+ dprintk(CVP_ERR, "%s incorrect session ID %llx\n", __func__, inst);
+ goto dsp_fail_delete;
+ dprintk(CVP_DSP, "%s DSP2CPU_DETELE_SESSION Done, nr_maps %d\n",
+ __func__, atomic_read(&nr_maps));
+dsp_fail_delete:
+void __dsp_cvp_power_req(struct cvp_dsp_cmd_msg *cmd)
+ "%s sess id 0x%x, low 0x%x, high 0x%x\n",
+ dsp2cpu_cmd->session_cpu_high);
+ goto dsp_fail_power_req;
+ print_power(&dsp2cpu_cmd->power_req);
+ inst->prop.cycles[HFI_HW_FDU] = dsp2cpu_cmd->power_req.clock_fdu;
+ inst->prop.cycles[HFI_HW_ICA] = dsp2cpu_cmd->power_req.clock_ica;
+ inst->prop.cycles[HFI_HW_OD] = dsp2cpu_cmd->power_req.clock_od;
+ inst->prop.cycles[HFI_HW_MPU] = dsp2cpu_cmd->power_req.clock_mpu;
+ inst->prop.fw_cycles = dsp2cpu_cmd->power_req.clock_fw;
+ inst->prop.ddr_bw = dsp2cpu_cmd->power_req.bw_ddr;
+ inst->prop.ddr_cache = dsp2cpu_cmd->power_req.bw_sys_cache;
+ inst->prop.op_cycles[HFI_HW_FDU] = dsp2cpu_cmd->power_req.op_clock_fdu;
+ inst->prop.op_cycles[HFI_HW_ICA] = dsp2cpu_cmd->power_req.op_clock_ica;
+ inst->prop.op_cycles[HFI_HW_OD] = dsp2cpu_cmd->power_req.op_clock_od;
+ inst->prop.op_cycles[HFI_HW_MPU] = dsp2cpu_cmd->power_req.op_clock_mpu;
+ inst->prop.fw_op_cycles = dsp2cpu_cmd->power_req.op_clock_fw;
+ inst->prop.ddr_op_bw = dsp2cpu_cmd->power_req.op_bw_ddr;
+ inst->prop.ddr_op_cache = dsp2cpu_cmd->power_req.op_bw_sys_cache;
+ *May need to define more error types
+ * Check UMD implementation
+ dprintk(CVP_ERR, "%s Failed update power\n", __func__);
+ dprintk(CVP_DSP, "%s DSP2CPU_POWER_REQUEST Done\n", __func__);
+dsp_fail_power_req:
+void __dsp_cvp_buf_register(struct cvp_dsp_cmd_msg *cmd)
+ struct eva_kmd_arg *kmd;
+ struct eva_kmd_buffer *kmd_buf;
+ "%s sess id 0x%x, low 0x%x, high 0x%x, pid 0x%x\n",
+ kmd = kzalloc(sizeof(*kmd), GFP_KERNEL);
+ if (!kmd) {
+ dprintk(CVP_ERR, "%s kzalloc failure\n", __func__);
+ kmd->type = EVA_KMD_REGISTER_BUFFER;
+ kmd_buf = (struct eva_kmd_buffer *)&(kmd->data.regbuf);
+ kmd_buf->type = EVA_KMD_BUFTYPE_INPUT;
+ kmd_buf->index = dsp2cpu_cmd->sbuf.index;
+ kmd_buf->fd = dsp2cpu_cmd->sbuf.fd;
+ kmd_buf->size = dsp2cpu_cmd->sbuf.size;
+ kmd_buf->offset = dsp2cpu_cmd->sbuf.offset;
+ kmd_buf->pixelformat = 0;
+ kmd_buf->flags = EVA_KMD_FLAG_UNSECURE;
+ rc = msm_cvp_register_buffer(inst, kmd_buf);
+ dprintk(CVP_ERR, "%s Failed to register buffer\n", __func__);
+ goto dsp_fail_buf_reg;
+ dprintk(CVP_DSP, "%s register buffer done\n", __func__);
+ cmd->sbuf.iova = kmd_buf->reserved[0];
+ cmd->sbuf.size = kmd_buf->size;
+ cmd->sbuf.fd = kmd_buf->fd;
+ cmd->sbuf.index = kmd_buf->index;
+ cmd->sbuf.offset = kmd_buf->offset;
+ cmd->sbuf.fd, cmd->sbuf.iova);
+dsp_fail_buf_reg:
+ kfree(kmd);
+void __dsp_cvp_buf_deregister(struct cvp_dsp_cmd_msg *cmd)
+ "%s : sess id 0x%x, low 0x%x, high 0x%x, hdl 0x%x\n",
+ kmd->type = EVA_KMD_UNREGISTER_BUFFER;
+ kmd_buf->type = EVA_KMD_UNREGISTER_BUFFER;
+ rc = msm_cvp_unregister_buffer(inst, kmd_buf);
+ dprintk(CVP_ERR, "%s Failed to deregister buffer\n", __func__);
+ goto fail_dsp_buf_dereg;
+ dprintk(CVP_DSP, "%s deregister buffer done\n", __func__);
+fail_dsp_buf_dereg:
+void __dsp_cvp_mem_alloc(struct cvp_dsp_cmd_msg *cmd)
+ uint64_t v_dsp_addr = 0;
+ "%s sess id 0x%x, low 0x%x, high 0x%x, hdl 0x%x\n",
+ dprintk(CVP_ERR, "%s Failed to find fastrpc node 0x%x\n",
+ goto fail_fastrpc_node;
+ goto fail_kzalloc_buf;
+ rc = cvp_allocate_dsp_bufs(inst, buf,
+ dsp2cpu_cmd->sbuf.size,
+ dsp2cpu_cmd->sbuf.type);
+ goto fail_allocate_dsp_buf;
+ rc = eva_fastrpc_dev_map_dma(frpc_device, buf,
+ dsp2cpu_cmd->sbuf.dsp_remote_map,
+ &v_dsp_addr);
+ dprintk(CVP_ERR, "%s Failed to map buffer 0x%x\n", __func__,
+ goto fail_fastrpc_dev_map_dma;
+ list_add_tail(&buf->list, &inst->cvpdspbufs.list);
+ dprintk(CVP_DSP, "%s allocate buffer done, addr 0x%llx\n",
+ __func__, v_dsp_addr);
+ cmd->sbuf.size = buf->smem->size;
+ cmd->sbuf.fd = buf->fd;
+ cmd->sbuf.offset = 0;
+ cmd->sbuf.iova = buf->smem->device_addr;
+ cmd->sbuf.v_dsp_addr = v_dsp_addr;
+ dprintk(CVP_DSP, "%s: size %d, iova 0x%x, v_dsp_addr 0x%llx\n",
+ __func__, cmd->sbuf.size, cmd->sbuf.iova, cmd->sbuf.v_dsp_addr);
+ dprintk(CVP_DSP, "%s: DSP2CPU_session_id 0x%x, smem_fd 0x%x, smem_refcount %d\n",
+ __func__, dsp2cpu_cmd->session_id, buf->smem->fd, buf->smem->refcount);
+fail_fastrpc_dev_map_dma:
+ cvp_release_dsp_buffers(inst, buf);
+fail_allocate_dsp_buf:
+fail_kzalloc_buf:
+fail_fastrpc_node:
+void __dsp_cvp_mem_free(struct cvp_dsp_cmd_msg *cmd)
+ "%s sess 0x%x, low 0x%x, high 0x%x, hnl 0x%x, iova 0x%x, fd 0x%x\n",
+ dsp2cpu_cmd->pid, dsp2cpu_cmd->sbuf.iova,
+ dsp2cpu_cmd->sbuf.fd);
+ dprintk(CVP_ERR, "%s Failed to get inst\n",
+ list_for_each_safe(ptr, next, &buf_list->list) {
+ /* Verify with device addr */
+ if ((buf->smem->device_addr == dsp2cpu_cmd->sbuf.iova) &&
+ (buf->fd == dsp2cpu_cmd->sbuf.fd)) {
+ dprintk(CVP_DSP, "fd in list 0x%x, fd from dsp 0x%x\n",
+ buf->fd, dsp2cpu_cmd->sbuf.fd);
+ goto fail_fastrpc_dev_unmap_dma;
+ goto fail_release_buf;
+fail_release_buf:
+fail_fastrpc_dev_unmap_dma:
+void __dsp_cvp_sess_start(struct cvp_dsp_cmd_msg *cmd)
+ if (sq->state == QUEUE_START) {
+ dprintk(CVP_WARN, "DSP double started session %llx\n", inst);
+ rc = msm_cvp_session_start(inst, (struct eva_kmd_arg *)NULL);
+ dprintk(CVP_ERR, "%s Failed to start session %llx\n", __func__, inst);
+ dprintk(CVP_DSP, "%s session started\n", __func__);
+void __dsp_cvp_sess_stop(struct cvp_dsp_cmd_msg *cmd)
+ "%s sess id 0x%x low 0x%x high 0x%x, pid 0x%x, inst_kref_refcount 0x%x\n",
+ dsp2cpu_cmd->pid, kref_read(&inst->kref));
+ rc = msm_cvp_session_stop(inst, (struct eva_kmd_arg *)NULL);
+ dprintk(CVP_ERR, "%s Failed to stop session\n", __func__);
+ dprintk(CVP_DSP, "%s session stoppd\n", __func__);
+static int cvp_dsp_thread(void *data)
+ int rc = 0, old_state;
+ dprintk(CVP_ERR, "%s: Failed to find core\n", __func__);
+ rc = wait_for_completion_interruptible(
+ &me->completions[CPU2DSP_MAX_CMD]);
+ if (me->state == DSP_UNINIT)
+ /* Set the cmd to 0 to avoid sending previous session values in case the command fails*/
+ memset(&cmd, 0, sizeof(struct cvp_dsp_cmd_msg));
+ cmd.type = me->pending_dsp2cpu_cmd.type;
+ if (rc == -ERESTARTSYS) {
+ dprintk(CVP_WARN, "%s received interrupt signal\n", __func__);
+ if (me->state == DSP_UNINIT) {
+ /* DSP SSR may have happened */
+ switch (me->pending_dsp2cpu_cmd.type) {
+ case DSP2CPU_POWERON:
+ if (me->state == DSP_READY) {
+ cmd.ret = 0;
+ old_state = me->state;
+ dprintk(CVP_WARN, "%s Failed to resume cvp\n",
+ me->state = old_state;
+ cmd.ret = 1;
+ case DSP2CPU_POWEROFF:
+ case DSP2CPU_CREATE_SESSION:
+ __dsp_cvp_sess_create(&cmd);
+ case DSP2CPU_DETELE_SESSION:
+ __dsp_cvp_sess_delete(&cmd);
+ case DSP2CPU_POWER_REQUEST:
+ __dsp_cvp_power_req(&cmd);
+ case DSP2CPU_REGISTER_BUFFER:
+ __dsp_cvp_buf_register(&cmd);
+ case DSP2CPU_DEREGISTER_BUFFER:
+ __dsp_cvp_buf_deregister(&cmd);
+ case DSP2CPU_MEM_ALLOC:
+ __dsp_cvp_mem_alloc(&cmd);
+ case DSP2CPU_MEM_FREE:
+ __dsp_cvp_mem_free(&cmd);
+ case DSP2CPU_START_SESSION:
+ __dsp_cvp_sess_start(&cmd);
+ case DSP2CPU_STOP_SESSION:
+ __dsp_cvp_sess_stop(&cmd);
+ dprintk(CVP_ERR, "unrecognaized dsp cmds: %d\n",
+ me->pending_dsp2cpu_cmd.type);
+ me->pending_dsp2cpu_cmd.type = CVP_INVALID_RPMSG_TYPE;
+ /* Responds to DSP */
+ rc = cvp_dsp_send_cmd(&cmd, sizeof(struct cvp_dsp_cmd_msg));
+ "%s: cvp_dsp_send_cmd failed rc = %d cmd type=%d\n",
+ __func__, rc, cmd.type);
+ dprintk(CVP_DBG, "dsp thread exit\n");
+int cvp_dsp_device_init(void)
+ char name[CVP_FASTRPC_DRIVER_NAME_SIZE] = "qcom,fastcv0\0";
+ add_va_node_to_list(CVP_DBG_DUMP, &gfa_cv, sizeof(struct cvp_dsp_apps),
+ "cvp_dsp_apps-gfa_cv", false);
+ mutex_init(&me->tx_lock);
+ mutex_init(&me->rx_lock);
+ for (i = 0; i <= CPU2DSP_MAX_CMD; i++)
+ init_completion(&me->completions[i]);
+ INIT_MSM_CVP_LIST(&me->fastrpc_driver_list);
+ mutex_init(&me->driver_name_lock);
+ me->cvp_fastrpc_name[i].status = DRIVER_NAME_AVAILABLE;
+ snprintf(me->cvp_fastrpc_name[i].name, sizeof(name), name);
+ name[11]++;
+ rc = register_rpmsg_driver(&cvp_dsp_rpmsg_client);
+ "%s : register_rpmsg_driver failed rc = %d\n",
+ goto register_bail;
+ snprintf(tname, sizeof(tname), "cvp-dsp-thread");
+ me->dsp_thread = kthread_run(cvp_dsp_thread, me, tname);
+ if (!me->dsp_thread) {
+register_bail:
+void cvp_dsp_device_exit(void)
+ DEINIT_MSM_CVP_LIST(&me->fastrpc_driver_list);
+ complete_all(&me->completions[i]);
+ mutex_destroy(&me->tx_lock);
+ mutex_destroy(&me->rx_lock);
+ mutex_destroy(&me->driver_name_lock);
+ unregister_rpmsg_driver(&cvp_dsp_rpmsg_client);
@@ -0,0 +1,315 @@
+#ifndef MSM_CVP_DSP_H
+#define MSM_CVP_DSP_H
+#include <fastrpc.h>
+struct fastrpc_device {
+ int handle;
+enum fastrpc_driver_status {
+ FASTRPC_CVP_B,
+enum fastrpc_driver_invoke_nums {
+ FASTRPC_DEV_MAP_DMA = 1,
+ FASTRPC_DEV_UNMAP_DMA,
+ FASTRPC_DEV_GET_HLOS_PID,
+struct fastrpc_driver {
+ struct device_driver driver;
+ int (*probe)(struct fastrpc_device *dev);
+ int (*callback)(struct fastrpc_device *dev,
+ enum fastrpc_driver_status status);
+#define CVP_APPS_DSP_GLINK_GUID "cvp-glink-apps-dsp"
+#define CVP_APPS_DSP_SMD_GUID "cvp-smd-apps-dsp"
+#define VMID_CDSP_Q6 (30)
+#define HLOS_VM_NUM 1
+#define DSP_VM_NUM 2
+#define CVP_DSP_MAX_RESERVED 5
+#define CVP_DSP2CPU_RESERVED 8
+#define CVP_DSP_RESPONSE_TIMEOUT 1000
+#define CVP_INVALID_RPMSG_TYPE 0xBADDFACE
+#define MAX_FRAME_BUF_NUM 16
+#define BITPTRSIZE32 (4)
+#define BITPTRSIZE64 (8)
+#define HIGH32 (0xFFFFFFFF00000000LL)
+#define LOW32 (0xFFFFFFFFLL)
+#define CVP_FASTRPC_DRIVER_NAME_SIZE 16
+/* Supports up to 8 DSP sessions in 8 processes */
+#define MAX_DSP_SESSION_NUM (8)
+#define MAX_FASTRPC_DRIVER_NUM (MAX_DSP_SESSION_NUM)
+int cvp_dsp_device_init(void);
+void cvp_dsp_device_exit(void);
+void cvp_dsp_send_hfi_queue(void);
+void cvp_dsp_init_hfi_queue_hdr(struct iris_hfi_device *device);
+enum CPU2DSP_STATUS {
+ CPU2DSP_SUCCESS = 0,
+ CPU2DSP_EFAIL = 1,
+ CPU2DSP_EFATAL = 2,
+ CPU2DSP_EUNAVAILABLE = 3,
+ CPU2DSP_EINVALSTATE = 4,
+ CPU2DSP_EUNSUPPORTED = 5,
+enum CVP_DSP_COMMAND {
+ CPU2DSP_SEND_HFI_QUEUE = 0,
+ CPU2DSP_SUSPEND = 1,
+ CPU2DSP_RESUME = 2,
+ CPU2DSP_SHUTDOWN = 3,
+ CPU2DSP_REGISTER_BUFFER = 4,
+ CPU2DSP_DEREGISTER_BUFFER = 5,
+ CPU2DSP_INIT = 6,
+ CPU2DSP_SET_DEBUG_LEVEL = 7,
+ CPU2DSP_MAX_CMD = 8,
+ DSP2CPU_POWERON = 11,
+ DSP2CPU_POWEROFF = 12,
+ DSP2CPU_CREATE_SESSION = 13,
+ DSP2CPU_DETELE_SESSION = 14,
+ DSP2CPU_POWER_REQUEST = 15,
+ DSP2CPU_POWER_CANCEL = 16,
+ DSP2CPU_REGISTER_BUFFER = 17,
+ DSP2CPU_DEREGISTER_BUFFER = 18,
+ DSP2CPU_MEM_ALLOC = 19,
+ DSP2CPU_MEM_FREE = 20,
+ DSP2CPU_START_SESSION = 21,
+ DSP2CPU_STOP_SESSION = 22,
+ CVP_DSP_MAX_CMD = 23,
+struct eva_power_req {
+ uint32_t clock_fdu;
+ uint32_t clock_ica;
+ uint32_t clock_od;
+ uint32_t clock_mpu;
+ uint32_t clock_fw;
+ uint32_t bw_ddr;
+ uint32_t bw_sys_cache;
+ uint32_t op_clock_fdu;
+ uint32_t op_clock_ica;
+ uint32_t op_clock_od;
+ uint32_t op_clock_mpu;
+ uint32_t op_clock_fw;
+ uint32_t op_bw_ddr;
+ uint32_t op_bw_sys_cache;
+struct eva_mem_remote {
+ uint32_t type;
+ uint32_t fd;
+ uint32_t offset;
+ uint32_t index;
+ uint32_t iova;
+ uint32_t dsp_remote_map;
+ uint64_t v_dsp_addr;
+ * command: defined as a packet initiated from one party.
+ * message: defined as a packet sent as response to a command
+ * cvp_dsp_cmd_msg contains
+ * the message sent from CPU to DSP
+ * or
+ * the command sent from CPU to DSP
+struct cvp_dsp_cmd_msg {
+ int32_t ret;
+ uint64_t msg_ptr;
+ uint32_t msg_ptr_len;
+ uint32_t buff_fd_iova;
+ uint32_t buff_index;
+ uint32_t buff_size;
+ uint32_t session_id;
+ int32_t ddr_type;
+ uint32_t buff_fd;
+ uint32_t buff_offset;
+ uint32_t buff_fd_size;
+ uint32_t eva_dsp_debug_mask;
+ /* Create Session */
+ uint32_t session_cpu_low;
+ uint32_t session_cpu_high;
+ struct eva_mem_remote sbuf;
+ uint32_t reserved1;
+ uint32_t reserved2;
+/* cvp_dsp_rsp_msg contains the message sent from DSP to CPU */
+struct cvp_dsp_rsp_msg {
+ uint32_t dsp_state;
+ uint32_t reserved[CVP_DSP_MAX_RESERVED - 1];
+/* cvp_dsp2cpu_cmd contains the command sent from DSP to cpu*/
+struct cvp_dsp2cpu_cmd {
+ uint32_t ver;
+ uint32_t len;
+ uint32_t session_type;
+ uint32_t kernel_mask;
+ uint32_t session_prio;
+ uint32_t is_secure;
+ uint32_t dsp_access_mask;
+ int32_t pid;
+ struct eva_power_req power_req;
+ uint32_t data[CVP_DSP2CPU_RESERVED];
+struct driver_name {
+ uint32_t status;
+ char name[CVP_FASTRPC_DRIVER_NAME_SIZE];
+enum DRIVER_NAME_STATUS {
+ DRIVER_NAME_INVALID = 0,
+ DRIVER_NAME_AVAILABLE = 1,
+ DRIVER_NAME_USED = 2,
+struct cvp_dsp_fastrpc_driver_entry {
+ uint32_t handle; /*handle is not PID*/
+ uint32_t session_cnt;
+ uint32_t driver_name_idx;
+ struct fastrpc_driver cvp_fastrpc_driver;
+ struct fastrpc_device *cvp_fastrpc_device;
+ struct completion fastrpc_probe_completion;
+ /* all dsp sessions list */
+ struct msm_cvp_list dsp_sessions;
+struct cvp_dsp_apps {
+ * tx_lock for sending CPU2DSP cmds or msgs
+ * and dsp state change
+ struct mutex tx_lock;
+ /* rx_lock for receiving DSP2CPU cmds or msgs */
+ struct mutex rx_lock;
+ struct mutex driver_name_lock;
+ struct rpmsg_device *chan;
+ uint32_t state;
+ uint32_t debug_mask;
+ bool hyp_assigned;
+ struct completion completions[CPU2DSP_MAX_CMD + 1];
+ struct cvp_dsp2cpu_cmd pending_dsp2cpu_cmd;
+ struct cvp_dsp_rsp_msg pending_dsp2cpu_rsp;
+ struct task_struct *dsp_thread;
+ /* dsp buffer mapping, set of dma function pointer */
+ const struct file_operations *dmabuf_f_op;
+ uint32_t buf_num;
+ struct msm_cvp_list fastrpc_driver_list;
+ struct driver_name cvp_fastrpc_name[MAX_FASTRPC_DRIVER_NUM];
+#define EVA_TRACE_MAX_SESSION_NUM 16
+#define EVA_TRACE_MAX_INSTANCE_NUM 6
+#define EVA_TRACE_MAX_BUF_NUM 256
+#define CONFIG_SIZE_IN_BYTES 2048
+#define CONFIG_SIZE_IN_WORDS (CONFIG_SIZE_IN_BYTES >> 2)
+// iova is eva_dsp_buf->iova
+// pkt_type is frame packet type using the buffer
+// buf_idx is the index of the buffer in a frame packet
+// transaction_id is the transaction id of frame packet
+struct cvp_dsp_trace_buf {
+// Saving config packet for each intance
+struct cvp_dsp_trace_instance {
+ u32 feature_type;
+ u32 config_pkt[CONFIG_SIZE_IN_WORDS];
+struct cvp_dsp_trace_session {
+ u32 buf_cnt;
+ u32 inst_cnt;
+ struct cvp_dsp_trace_instance instance[EVA_TRACE_MAX_INSTANCE_NUM];
+ struct cvp_dsp_trace_buf buf[EVA_TRACE_MAX_BUF_NUM];
+struct cvp_dsp_trace {
+ struct cvp_dsp_trace_session sessions[EVA_TRACE_MAX_SESSION_NUM];
+extern struct cvp_dsp_apps gfa_cv;
+ * API for CVP driver to suspend CVP session during
+ * power collapse
+int cvp_dsp_suspend(bool force);
+ * API for CVP driver to resume CVP session during
+int cvp_dsp_resume(void);
+ * API for CVP driver to shutdown CVP session during
+ * cvp subsystem error.
+int cvp_dsp_shutdown(void);
+int cvp_dsp_fastrpc_unmap(uint32_t handle, struct cvp_internal_buf *buf);
+int cvp_dsp_del_sess(uint32_t handle, struct msm_cvp_inst *inst);
+void cvp_dsp_send_debug_mask(void);
+#endif // MSM_CVP_DSP_H
@@ -0,0 +1,375 @@
+ * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+#if !defined(_MSM_CVP_EVENTS_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MSM_CVP_EVENTS_H_
+#include <linux/tracepoint.h>
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM msm_cvp
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE msm_cvp_events
+// Since Chrome supports to parse the event “tracing_mark_write” by default
+// so we can re-use this to display your own events in Chrome
+// enable command as below:
+// adb shell "echo 1 > /sys/kernel/tracing/events/msm_cvp/tracing_mark_write/enable"
+TRACE_EVENT(tracing_mark_write,
+ TP_PROTO(int pid, const char *name, bool trace_begin),
+ TP_ARGS(pid, name, trace_begin),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(trace_name, name)
+ __field(bool, trace_begin)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __assign_str(trace_name, name);
+ __entry->trace_begin = trace_begin;
+ TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+ __entry->pid, __get_str(trace_name))
+#define CVPKERNEL_ATRACE_END(name) \
+ trace_tracing_mark_write(current->tgid, name, 0)
+#define CVPKERNEL_ATRACE_BEGIN(name) \
+ trace_tracing_mark_write(current->tgid, name, 1)
+DECLARE_EVENT_CLASS(msm_v4l2_cvp,
+ TP_PROTO(char *dummy),
+ TP_ARGS(dummy),
+ __field(char *, dummy)
+ __entry->dummy = dummy;
+ TP_printk("%s", __entry->dummy)
+);
+DEFINE_EVENT(msm_v4l2_cvp, msm_v4l2_cvp_open_start,
+ TP_ARGS(dummy)
+DEFINE_EVENT(msm_v4l2_cvp, msm_v4l2_cvp_open_end,
+DEFINE_EVENT(msm_v4l2_cvp, msm_v4l2_cvp_close_start,
+DEFINE_EVENT(msm_v4l2_cvp, msm_v4l2_cvp_close_end,
+DEFINE_EVENT(msm_v4l2_cvp, msm_v4l2_cvp_fw_load_start,
+DEFINE_EVENT(msm_v4l2_cvp, msm_v4l2_cvp_fw_load_end,
+DECLARE_EVENT_CLASS(msm_cvp_common,
+ TP_PROTO(void *instp, int old_state, int new_state),
+ TP_ARGS(instp, old_state, new_state),
+ __field(void *, instp)
+ __field(int, old_state)
+ __field(int, new_state)
+ __entry->instp = instp;
+ __entry->old_state = old_state;
+ __entry->new_state = new_state;
+ TP_printk("Moved inst: %p from 0x%x to 0x%x",
+ __entry->instp,
+ __entry->old_state,
+ __entry->new_state)
+DEFINE_EVENT(msm_cvp_common, msm_cvp_common_state_change,
+ TP_ARGS(instp, old_state, new_state)
+DECLARE_EVENT_CLASS(cvp_venus_hfi_var,
+ TP_PROTO(u32 cp_start, u32 cp_size,
+ u32 cp_nonpixel_start, u32 cp_nonpixel_size),
+ TP_ARGS(cp_start, cp_size, cp_nonpixel_start, cp_nonpixel_size),
+ __field(u32, cp_start)
+ __field(u32, cp_size)
+ __field(u32, cp_nonpixel_start)
+ __field(u32, cp_nonpixel_size)
+ __entry->cp_start = cp_start;
+ __entry->cp_size = cp_size;
+ __entry->cp_nonpixel_start = cp_nonpixel_start;
+ __entry->cp_nonpixel_size = cp_nonpixel_size;
+ TP_printk(
+ "TZBSP_MEM_PROTECT_VIDEO_VAR done, cp_start : 0x%x, cp_size : 0x%x, cp_nonpixel_start : 0x%x, cp_nonpixel_size : 0x%x",
+ __entry->cp_start,
+ __entry->cp_size,
+ __entry->cp_nonpixel_start,
+ __entry->cp_nonpixel_size)
+DEFINE_EVENT(cvp_venus_hfi_var, cvp_venus_hfi_var_done,
+ TP_ARGS(cp_start, cp_size, cp_nonpixel_start, cp_nonpixel_size)
+DECLARE_EVENT_CLASS(msm_v4l2_cvp_buffer_events,
+ TP_PROTO(char *event_type, u32 device_addr, int64_t timestamp,
+ u32 alloc_len, u32 filled_len, u32 offset),
+ TP_ARGS(event_type, device_addr, timestamp, alloc_len,
+ filled_len, offset),
+ __field(char *, event_type)
+ __field(u32, device_addr)
+ __field(int64_t, timestamp)
+ __field(u32, alloc_len)
+ __field(u32, filled_len)
+ __field(u32, offset)
+ __entry->event_type = event_type;
+ __entry->device_addr = device_addr;
+ __entry->timestamp = timestamp;
+ __entry->alloc_len = alloc_len;
+ __entry->filled_len = filled_len;
+ __entry->offset = offset;
+ "%s, device_addr : 0x%x, timestamp : %lld, alloc_len : 0x%x, filled_len : 0x%x, offset : 0x%x",
+ __entry->event_type,
+ __entry->device_addr,
+ __entry->timestamp,
+ __entry->alloc_len,
+ __entry->filled_len,
+ __entry->offset)
+DEFINE_EVENT(msm_v4l2_cvp_buffer_events, msm_v4l2_cvp_buffer_event_start,
+ filled_len, offset)
+DEFINE_EVENT(msm_v4l2_cvp_buffer_events, msm_v4l2_cvp_buffer_event_end,
+DECLARE_EVENT_CLASS(msm_cvp_smem_buffer_dma_ops,
+ TP_PROTO(char *buffer_op, u32 buffer_type, u32 heap_mask,
+ size_t size, u32 align, u32 flags, int map_kernel),
+ TP_ARGS(buffer_op, buffer_type, heap_mask, size, align,
+ flags, map_kernel),
+ __field(char *, buffer_op)
+ __field(u32, buffer_type)
+ __field(u32, heap_mask)
+ __field(u32, size)
+ __field(u32, align)
+ __field(u32, flags)
+ __field(int, map_kernel)
+ __entry->buffer_op = buffer_op;
+ __entry->buffer_type = buffer_type;
+ __entry->heap_mask = heap_mask;
+ __entry->size = size;
+ __entry->align = align;
+ __entry->flags = flags;
+ __entry->map_kernel = map_kernel;
+ "%s, buffer_type : 0x%x, heap_mask : 0x%x, size : 0x%x, align : 0x%x, flags : 0x%x, map_kernel : %d",
+ __entry->buffer_op,
+ __entry->buffer_type,
+ __entry->heap_mask,
+ __entry->size,
+ __entry->align,
+ __entry->flags,
+ __entry->map_kernel)
+DEFINE_EVENT(msm_cvp_smem_buffer_dma_ops, msm_cvp_smem_buffer_dma_op_start,
+ flags, map_kernel)
+DEFINE_EVENT(msm_cvp_smem_buffer_dma_ops, msm_cvp_smem_buffer_dma_op_end,
+DECLARE_EVENT_CLASS(msm_cvp_smem_buffer_iommu_ops,
+ TP_PROTO(char *buffer_op, int domain_num, int partition_num,
+ unsigned long align, unsigned long iova,
+ unsigned long buffer_size),
+ TP_ARGS(buffer_op, domain_num, partition_num, align, iova, buffer_size),
+ __field(int, domain_num)
+ __field(int, partition_num)
+ __field(unsigned long, align)
+ __field(unsigned long, iova)
+ __field(unsigned long, buffer_size)
+ __entry->domain_num = domain_num;
+ __entry->partition_num = partition_num;
+ __entry->iova = iova;
+ __entry->buffer_size = buffer_size;
+ "%s, domain : %d, partition : %d, align : %lx, iova : 0x%lx, buffer_size=%lx",
+ __entry->domain_num,
+ __entry->partition_num,
+ __entry->iova,
+ __entry->buffer_size)
+DEFINE_EVENT(msm_cvp_smem_buffer_iommu_ops, msm_cvp_smem_buffer_iommu_op_start,
+ TP_ARGS(buffer_op, domain_num, partition_num, align, iova, buffer_size)
+DEFINE_EVENT(msm_cvp_smem_buffer_iommu_ops, msm_cvp_smem_buffer_iommu_op_end,
+DECLARE_EVENT_CLASS(msm_cvp_perf,
+ TP_PROTO(const char *name, unsigned long value),
+ TP_ARGS(name, value),
+ __field(const char *, name)
+ __field(unsigned long, value)
+ __entry->name = name;
+ __entry->value = value;
+ TP_printk("%s %lu", __entry->name, __entry->value)
+DEFINE_EVENT(msm_cvp_perf, msm_cvp_perf_clock_scale,
+ TP_PROTO(const char *clock_name, unsigned long frequency),
+ TP_ARGS(clock_name, frequency)
+DEFINE_EVENT(msm_cvp_perf, msm_cvp_perf_bus_vote,
+ TP_PROTO(const char *governor_mode, unsigned long ab),
+ TP_ARGS(governor_mode, ab)
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
@@ -0,0 +1,408 @@
+#ifndef _MSM_CVP_INTERNAL_H_
+#define _MSM_CVP_INTERNAL_H_
+#include <linux/atomic.h>
+#include <linux/time.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
+#include <linux/interconnect.h>
+#include <linux/kref.h>
+#include <linux/cdev.h>
+#include <linux/kthread.h>
+#define MAX_SUPPORTED_INSTANCES 16
+#define MAX_DEBUGFS_NAME 50
+#define MAX_DSP_INIT_ATTEMPTS 16
+#define FENCE_WAIT_SIGNAL_TIMEOUT 100
+#define FENCE_WAIT_SIGNAL_RETRY_TIMES 20
+#define FENCE_BIT (1ULL << 63)
+#define FENCE_DMM_ICA_ENABLED_IDX 0
+#define FENCE_DMM_DS_IDX 1
+#define FENCE_DMM_OUTPUT_IDX 7
+#define SYS_MSG_START HAL_SYS_INIT_DONE
+#define SYS_MSG_END HAL_SYS_ERROR
+#define SESSION_MSG_START HAL_SESSION_EVENT_CHANGE
+#define SESSION_MSG_END HAL_SESSION_ERROR
+#define SYS_MSG_INDEX(__msg) (__msg - SYS_MSG_START)
+#define SESSION_MSG_INDEX(__msg) (__msg - SESSION_MSG_START)
+#define ARP_BUF_SIZE 0x300000
+#define CVP_RT_PRIO_THRESHOLD 1
+enum cvp_core_state {
+ CVP_CORE_UNINIT = 0,
+ CVP_CORE_INIT,
+ CVP_CORE_INIT_DONE,
+enum instance_state {
+ MSM_CVP_CORE_UNINIT_DONE = 0x0001,
+ MSM_CVP_CORE_INIT,
+ MSM_CVP_CORE_INIT_DONE,
+ MSM_CVP_OPEN,
+ MSM_CVP_OPEN_DONE,
+ MSM_CVP_CLOSE,
+ MSM_CVP_CLOSE_DONE,
+ MSM_CVP_CORE_UNINIT,
+ MSM_CVP_CORE_INVALID
+enum dsp_state {
+ DSP_INVALID,
+ DSP_UNINIT,
+ DSP_PROBED,
+ DSP_READY,
+ DSP_SUSPEND,
+ DSP_INACTIVE,
+struct msm_cvp_common_data {
+ char key[128];
+ int value;
+enum sku_version {
+ SKU_VERSION_0 = 0,
+ SKU_VERSION_1,
+ SKU_VERSION_2,
+enum vpu_version {
+ VPU_VERSION_4 = 1,
+ VPU_VERSION_5,
+struct msm_cvp_ubwc_config_data {
+struct msm_cvp_qos_setting {
+ u32 axi_qos;
+ u32 prioritylut_low;
+ u32 prioritylut_high;
+ u32 urgency_low;
+ u32 urgency_low_ro;
+ u32 dangerlut_low;
+ u32 safelut_low;
+struct msm_cvp_platform_data {
+ struct msm_cvp_common_data *common_data;
+ unsigned int common_data_length;
+ unsigned int sku_version;
+ uint32_t vpu_ver;
+ unsigned int vm_id; /* pvm: 1; tvm: 2 */
+ struct msm_cvp_ubwc_config_data *ubwc_config;
+ struct msm_cvp_qos_setting *noc_qos;
+struct cvp_kmem_cache {
+ struct kmem_cache *cache;
+ atomic_t nr_objs;
+struct msm_cvp_drv {
+ struct msm_cvp_core *cvp_core;
+ struct dentry *debugfs_root;
+ int thermal_level;
+ u32 sku_version;
+ struct cvp_kmem_cache msg_cache;
+ struct cvp_kmem_cache frame_cache;
+ struct cvp_kmem_cache buf_cache;
+ struct cvp_kmem_cache smem_cache;
+ char fw_version[CVP_VERSION_LENGTH];
+enum profiling_points {
+ SYS_INIT = 0,
+ SESSION_INIT,
+ LOAD_RESOURCES,
+ FRAME_PROCESSING,
+ FW_IDLE,
+ MAX_PROFILING_POINTS,
+struct cvp_clock_data {
+ int buffer_counter;
+ int load;
+ int load_low;
+ int load_norm;
+ int load_high;
+ int min_threshold;
+ int max_threshold;
+ unsigned long bitrate;
+ unsigned long min_freq;
+ unsigned long curr_freq;
+ u32 operating_rate;
+ bool low_latency_mode;
+ bool turbo_mode;
+struct cvp_profile_data {
+ int start;
+ int stop;
+ int cumulative;
+ char name[64];
+ int sampling;
+ int average;
+struct msm_cvp_debug {
+ struct cvp_profile_data pdata[MAX_PROFILING_POINTS];
+ int profile;
+ int samples;
+enum msm_cvp_modes {
+ CVP_SECURE = BIT(0),
+ CVP_TURBO = BIT(1),
+ CVP_THUMBNAIL = BIT(2),
+ CVP_LOW_POWER = BIT(3),
+ CVP_REALTIME = BIT(4),
+#define MAX_NUM_MSGS_PER_SESSION 128
+struct cvp_session_msg {
+ struct list_head node;
+ struct cvp_hfi_msg_session_hdr_ext pkt;
+struct cvp_session_queue {
+ spinlock_t lock;
+ unsigned int msg_count;
+ struct list_head msgs;
+ wait_queue_head_t wq;
+struct cvp_session_prop {
+ u32 kernel_mask;
+ u32 priority;
+ u32 dsp_mask;
+ u32 fthread_nr;
+ u32 cycles[HFI_MAX_HW_THREADS];
+ u32 fw_cycles;
+ u32 op_cycles[HFI_MAX_HW_THREADS];
+ u32 fw_op_cycles;
+ u32 ddr_op_bw;
+ u32 ddr_cache;
+ u32 ddr_op_cache;
+ u32 fps[HFI_MAX_HW_THREADS];
+enum cvp_event_t {
+ CVP_NO_EVENT,
+ CVP_SSR_EVENT = 1,
+ CVP_SYS_ERROR_EVENT,
+ CVP_MAX_CLIENTS_EVENT,
+ CVP_HW_UNSUPPORTED_EVENT,
+ CVP_INVALID_EVENT,
+ CVP_DUMP_EVENT,
+struct cvp_session_event {
+ enum cvp_event_t event;
+#define MAX_ENTRIES 64
+struct smem_data {
+ u32 refcount;
+struct cvp_buf_data {
+struct inst_snapshot {
+ void *session;
+ u32 smem_index;
+ u32 dsp_index;
+ u32 persist_index;
+ struct smem_data smem_log[MAX_ENTRIES];
+ struct cvp_buf_data dsp_buf_log[MAX_ENTRIES];
+ struct cvp_buf_data persist_buf_log[MAX_ENTRIES];
+struct cvp_noc_log {
+ u32 used;
+ u32 err_ctrl_swid_low;
+ u32 err_ctrl_swid_high;
+ u32 err_ctrl_mainctl_low;
+ u32 err_ctrl_errvld_low;
+ u32 err_ctrl_errclr_low;
+ u32 err_ctrl_errlog0_low;
+ u32 err_ctrl_errlog0_high;
+ u32 err_ctrl_errlog1_low;
+ u32 err_ctrl_errlog1_high;
+ u32 err_ctrl_errlog2_low;
+ u32 err_ctrl_errlog2_high;
+ u32 err_ctrl_errlog3_low;
+ u32 err_ctrl_errlog3_high;
+ u32 err_core_swid_low;
+ u32 err_core_swid_high;
+ u32 err_core_mainctl_low;
+ u32 err_core_errvld_low;
+ u32 err_core_errclr_low;
+ u32 err_core_errlog0_low;
+ u32 err_core_errlog0_high;
+ u32 err_core_errlog1_low;
+ u32 err_core_errlog1_high;
+ u32 err_core_errlog2_low;
+ u32 err_core_errlog2_high;
+ u32 err_core_errlog3_low;
+ u32 err_core_errlog3_high;
+ u32 arp_test_bus[16];
+ u32 dma_test_bus[512];
+struct cvp_debug_log {
+ struct cvp_noc_log noc_log;
+ u32 snapshot_index;
+ struct inst_snapshot snapshot[16];
+struct msm_cvp_core {
+ struct mutex clk_lock;
+ dev_t dev_num;
+ struct cdev cdev;
+ struct class *class;
+ struct cvp_hfi_ops *dev_ops;
+ struct msm_cvp_platform_data *platform_data;
+ struct msm_cvp_synx_ops *synx_ftbl;
+ struct list_head instances;
+ enum cvp_core_state state;
+ struct completion completions[SYS_MSG_END - SYS_MSG_START + 1];
+ enum msm_cvp_hfi_type hfi_type;
+ struct msm_cvp_platform_resources resources;
+ struct delayed_work fw_unload_work;
+ struct work_struct ssr_work;
+ enum hal_ssr_trigger_type ssr_type;
+ u32 smmu_fault_count;
+ u32 last_fault_addr;
+ u32 ssr_count;
+ u32 smem_leak_count;
+ bool trigger_ssr;
+ unsigned long orig_core_sum;
+ atomic64_t kernel_trans_id;
+ struct cvp_debug_log log;
+struct msm_cvp_inst {
+ struct list_head dsp_list;
+ struct mutex sync_lock, lock;
+ enum session_type session_type;
+ u32 dsp_handle;
+ struct task_struct *task;
+ atomic_t smem_count;
+ struct cvp_session_queue session_queue;
+ struct cvp_session_queue session_queue_fence;
+ struct cvp_session_event event_handler;
+ enum instance_state state;
+ struct msm_cvp_list freqs;
+ struct msm_cvp_list persistbufs;
+ struct cvp_dmamap_cache dma_cache;
+ struct msm_cvp_list cvpdspbufs;
+ struct msm_cvp_list cvpwnccbufs;
+ struct msm_cvp_list frames;
+ struct cvp_frame_bufs last_frame;
+ struct cvp_frame_bufs unused_dsp_bufs;
+ struct cvp_frame_bufs unused_wncc_bufs;
+ u32 cvpwnccbufs_num;
+ struct msm_cvp_wncc_buffer* cvpwnccbufs_table;
+ struct completion completions[SESSION_MSG_END - SESSION_MSG_START + 1];
+ struct msm_cvp_debug debug;
+ struct cvp_clock_data clk_data;
+ enum msm_cvp_modes flags;
+ struct kref kref;
+ struct cvp_session_prop prop;
+ /* error_code will be cleared after being returned to user mode */
+ u32 error_code;
+ /* prev_error_code saves value of error_code before it's cleared */
+ u32 prev_error_code;
+ struct synx_session *synx_session_id;
+ struct cvp_fence_queue fence_cmd_queue;
+ char proc_name[TASK_COMM_LEN];
+void cvp_handle_cmd_response(enum hal_command_response cmd, void *data);
+ enum hal_ssr_trigger_type type);
+void msm_cvp_comm_handle_thermal_event(void);
+void msm_cvp_ssr_handler(struct work_struct *work);
+ * XXX: normally should be in msm_cvp_core.h, but that's meant for public APIs,
+ * whereas this is private
+int msm_cvp_destroy(struct msm_cvp_inst *inst);
+void *cvp_get_drv_data(struct device *dev);
+void *cvp_kmem_cache_zalloc(struct cvp_kmem_cache *k, gfp_t flags);
+void cvp_kmem_cache_free(struct cvp_kmem_cache *k, void *obj);
@@ -0,0 +1,670 @@
+#include <linux/compat.h>
+static int _get_pkt_hdr_from_user(struct eva_kmd_arg __user *up,
+ struct cvp_hal_session_cmd_pkt *pkt_hdr)
+ struct eva_kmd_hfi_packet *u;
+ hdr = (struct cvp_hfi_msg_session_hdr *)pkt_hdr;
+ u = &up->data.hfi_pkt;
+ if (get_user(pkt_hdr->size, &u->pkt_data[0]))
+ if (get_user(pkt_hdr->packet_type, &u->pkt_data[1]))
+ if (get_pkt_index(pkt_hdr) < 0) {
+ dprintk(CVP_ERR, "user mode provides incorrect hfi\n");
+ goto set_default_pkt_hdr;
+ if (pkt_hdr->size > MAX_HFI_PKT_SIZE*sizeof(unsigned int)) {
+ dprintk(CVP_ERR, "user HFI packet too large %x\n",
+ pkt_hdr->size);
+set_default_pkt_hdr:
+ pkt_hdr->size = get_msg_size(hdr);
+static int _get_fence_pkt_hdr_from_user(struct eva_kmd_arg __user *up,
+ struct eva_kmd_hfi_synx_packet __user *u;
+ u = &up->data.hfi_synx_pkt;
+ if (pkt_hdr->size > (MAX_HFI_PKT_SIZE*sizeof(unsigned int)))
+/* Size is in unit of u32 */
+static int _copy_pkt_from_user(struct eva_kmd_arg *kp,
+ struct eva_kmd_arg __user *up,
+ unsigned int start, unsigned int size)
+ struct eva_kmd_hfi_packet *k, *u;
+ k = &kp->data.hfi_pkt;
+ for (i = start; i < start + size; i++)
+ if (get_user(k->pkt_data[i], &u->pkt_data[i]))
+ if (get_user(k->oob_buf, &u->oob_buf))
+static int _copy_synx_data_from_user(
+ struct eva_kmd_hfi_synx_packet *k,
+ struct eva_kmd_hfi_synx_packet __user *u)
+ for (i = 0; i < MAX_FENCE_DATA_SIZE; i++) {
+ if (get_user(k->fence_data[i], &u->fence_data[i]))
+static int _copy_fence_data_from_user_deprecate(
+ struct eva_kmd_hfi_fence_packet *k,
+ struct eva_kmd_hfi_fence_packet __user *u)
+ for (i = 0; i < MAX_HFI_FENCE_SIZE; i++) {
+ if (get_user(k->frame_id, &u->frame_id)) {
+ dprintk(CVP_ERR, "Failed to get frame id from fence pkt\n");
+static int _copy_fence_pkt_from_user(struct eva_kmd_arg *kp,
+ struct eva_kmd_arg __user *up)
+{ struct eva_kmd_hfi_synx_packet *k;
+ struct eva_kmd_hfi_fence_packet __user *u1;
+ k = &kp->data.hfi_synx_pkt;
+ u1 = &up->data.hfi_fence_pkt;
+ for (i = 0; i < MAX_HFI_PKT_SIZE; i++)
+ if (get_user(k->fence_data[0], &u->fence_data[0]))
+ if (k->fence_data[0] == 0xFEEDFACE)
+ return _copy_synx_data_from_user(k, u);
+ return _copy_fence_data_from_user_deprecate(
+ (struct eva_kmd_hfi_fence_packet *)k, u1);
+static int _copy_frameid_from_user(struct eva_kmd_arg *kp,
+ if (get_user(kp->data.frame_id, &up->data.frame_id)) {
+ dprintk(CVP_ERR, "Failed to get frame id from user\n");
+static int _copy_sysprop_from_user(struct eva_kmd_arg *kp,
+ struct eva_kmd_sys_properties *k, *u;
+ k = &kp->data.sys_properties;
+ u = &up->data.sys_properties;
+ if (get_user(k->prop_num, &u->prop_num))
+ if (k->prop_num < 1 || k->prop_num > MAX_KMD_PROP_NUM_PER_PACKET) {
+ dprintk(CVP_ERR, "Num of prop out of range %d\n", k->prop_num);
+ return _copy_pkt_from_user(kp, up, 1,
+ (k->prop_num * (sizeof(struct eva_kmd_sys_property) >> 2)));
+static int _copy_pkt_to_user(struct eva_kmd_arg *kp,
+ unsigned int size)
+ for (i = 0; i < size; i++)
+ if (put_user(k->pkt_data[i], &u->pkt_data[i]))
+ if (put_user(k->oob_buf, &u->oob_buf))
+static int _copy_fence_pkt_to_user(struct eva_kmd_arg *kp,
+ struct eva_kmd_hfi_synx_packet *k;
+ for (i = 0; i < MAX_HFI_PKT_SIZE; i++) {
+static int _copy_sysprop_to_user(struct eva_kmd_arg *kp,
+ struct eva_kmd_sys_properties *k;
+ struct eva_kmd_sys_properties __user *u;
+ for (i = 0; i < 8; i++)
+ if (put_user(k->prop_data[i].data, &u->prop_data[i].data))
+static void print_hfi_short(struct eva_kmd_arg __user *up)
+ struct eva_kmd_hfi_packet *pkt;
+ unsigned int words[5];
+ pkt = &up->data.hfi_pkt;
+ if (get_user(words[0], &up->type) ||
+ get_user(words[1], &up->buf_offset) ||
+ get_user(words[2], &up->buf_num) ||
+ get_user(words[3], &pkt->pkt_data[0]) ||
+ get_user(words[4], &pkt->pkt_data[1]))
+ dprintk(CVP_ERR, "Failed to print ioctl cmd\n");
+ dprintk(CVP_HFI, "IOCTL cmd type %#x, offset %d, num %d, pkt %d %#x\n",
+ words[0], words[1], words[2], words[3], words[4]);
+static int _copy_session_ctrl_to_user(
+ struct eva_kmd_session_control *k,
+ struct eva_kmd_session_control *u)
+ if (put_user(k->ctrl_type, &u->ctrl_type))
+ if (put_user(k->ctrl_data[i], &u->ctrl_data[i]))
+static int _get_session_ctrl_from_user(
+ if (get_user(k->ctrl_type, &u->ctrl_type))
+ if (get_user(k->ctrl_data[i], &u->ctrl_data[i]))
+static int _get_session_info_from_user(
+ struct eva_kmd_session_info *k,
+ struct eva_kmd_session_info __user *u)
+ if (get_user(k->session_id, &u->session_id))
+ for (i = 0; i < 10; i++)
+ if (get_user(k->reserved[i], &u->reserved[i]))
+static int convert_from_user(struct eva_kmd_arg *kp,
+ unsigned long arg,
+ struct eva_kmd_arg __user *up = (struct eva_kmd_arg *)arg;
+ struct cvp_hal_session_cmd_pkt pkt_hdr;
+ int pkt_idx;
+ if (!kp || !up) {
+ print_hfi_short(up);
+ if (get_user(kp->type, &up->type))
+ if (get_user(kp->buf_offset, &up->buf_offset) ||
+ get_user(kp->buf_num, &up->buf_num))
+ switch (kp->type) {
+ struct eva_kmd_session_info *k;
+ struct eva_kmd_session_info __user *u;
+ k = &kp->data.session;
+ u = &up->data.session;
+ if (_get_session_info_from_user(k, u)) {
+ dprintk(CVP_ERR, "fail to get sess info\n");
+ struct eva_kmd_buffer *k, *u;
+ k = &kp->data.regbuf;
+ u = &up->data.regbuf;
+ if (get_user(k->type, &u->type) ||
+ get_user(k->index, &u->index) ||
+ get_user(k->fd, &u->fd) ||
+ get_user(k->size, &u->size) ||
+ get_user(k->offset, &u->offset) ||
+ get_user(k->pixelformat, &u->pixelformat) ||
+ get_user(k->flags, &u->flags))
+ for (i = 0; i < 5; i++)
+ k = &kp->data.unregbuf;
+ u = &up->data.unregbuf;
+ if (_get_pkt_hdr_from_user(up, &pkt_hdr)) {
+ dprintk(CVP_ERR, "Invalid syscall: %x, %x, %x\n",
+ kp->type, pkt_hdr.size, pkt_hdr.packet_type);
+ rc = _copy_pkt_from_user(kp, up, 0, (pkt_hdr.size >> 2));
+ if (_get_fence_pkt_hdr_from_user(up, &pkt_hdr)) {
+ dprintk(CVP_HFI, "system call cmd pkt: %d 0x%x\n",
+ pkt_hdr.size, pkt_hdr.packet_type);
+ pkt_idx = get_pkt_index(&pkt_hdr);
+ dprintk(CVP_ERR, "%s incorrect packet %d, %x\n",
+ pkt_hdr.size,
+ pkt_hdr.packet_type);
+ rc = _copy_fence_pkt_from_user(kp, up);
+ struct eva_kmd_session_control *k, *u;
+ k = &kp->data.session_ctrl;
+ u = &up->data.session_ctrl;
+ rc = _get_session_ctrl_from_user(k, u);
+ if (_copy_sysprop_from_user(kp, up)) {
+ dprintk(CVP_ERR, "Failed to get sysprop from user\n");
+ dprintk(CVP_ERR, "Failed to set sysprop from user\n");
+ if (_copy_frameid_from_user(kp, up))
+ dprintk_rl(CVP_ERR, "%s: unknown cmd type 0x%x\n",
+ __func__, kp->type);
+static int _put_user_session_info(
+ if (put_user(k->session_id, &u->session_id))
+ if (put_user(k->reserved[i], &u->reserved[i]))
+static int convert_to_user(struct eva_kmd_arg *kp, unsigned long arg)
+ int i, size;
+ if (put_user(kp->type, &up->type))
+ hdr = (struct cvp_hfi_msg_session_hdr *)k;
+ size = get_msg_size(hdr) >> 2;
+ if (_put_user_session_info(k, u)) {
+ dprintk(CVP_ERR, "fail to copy sess info to user\n");
+ if (put_user(k->type, &u->type) ||
+ put_user(k->index, &u->index) ||
+ put_user(k->fd, &u->fd) ||
+ put_user(k->size, &u->size) ||
+ put_user(k->offset, &u->offset) ||
+ put_user(k->pixelformat, &u->pixelformat) ||
+ put_user(k->flags, &u->flags))
+ if (_get_pkt_hdr_from_user(up, &pkt_hdr))
+ dprintk(CVP_HFI, "Send user cmd pkt: %d %d\n",
+ rc = _copy_pkt_to_user(kp, up, (pkt_hdr.size >> 2));
+ if (_get_fence_pkt_hdr_from_user(up, &pkt_hdr))
+ rc = _copy_fence_pkt_to_user(kp, up);
+ rc = _copy_session_ctrl_to_user(k, u);
+ if (_copy_sysprop_to_user(kp, up)) {
+ dprintk(CVP_ERR, "Fail to copy sysprop to user\n");
+ dprintk(CVP_ERR, "%s: unknown cmd type 0x%x\n",
+static long cvp_ioctl(struct msm_cvp_inst *inst,
+ unsigned int cmd, unsigned long arg)
+ struct eva_kmd_arg *karg;
+ karg = kzalloc(sizeof(*karg), GFP_KERNEL);
+ if (!karg)
+ if (convert_from_user(karg, arg, inst)) {
+ dprintk_rl(CVP_ERR, "%s: failed to get from user cmd %x\n",
+ __func__, karg->type);
+ kfree(karg);
+ rc = msm_cvp_private((void *)inst, cmd, karg);
+ dprintk(CVP_ERR, "%s: failed cmd type %x %d\n",
+ __func__, karg->type, rc);
+ if (convert_to_user(karg, arg)) {
+ dprintk(CVP_ERR, "%s: failed to copy to user cmd %x\n",
+ if (!filp || !filp->private_data) {
+ inst = filp->private_data;
+ return cvp_ioctl(inst, cmd, arg);
+ return cvp_ioctl(inst, cmd, (unsigned long)compat_ptr(arg));
@@ -0,0 +1,1042 @@
+#include <soc/qcom/of_common.h>
+#define UBWC_CONFIG(mco, mlo, hbo, bslo, bso, rs, mc, ml, hbb, bsl, bsp) \
+{ \
+ .override_bit_info.max_channel_override = mco, \
+ .override_bit_info.mal_length_override = mlo, \
+ .override_bit_info.hb_override = hbo, \
+ .override_bit_info.bank_swzl_level_override = bslo, \
+ .override_bit_info.bank_spreading_override = bso, \
+ .override_bit_info.reserved = rs, \
+ .max_channels = mc, \
+ .mal_length = ml, \
+ .highest_bank_bit = hbb, \
+ .bank_swzl_level = bsl, \
+ .bank_spreading = bsp, \
+static struct msm_cvp_common_data default_common_data[] = {
+ .key = "qcom,auto-pil",
+ .value = 1,
+static struct msm_cvp_common_data sm8450_common_data[] = {
+ .key = "qcom,pm-qos-latency-us",
+ .value = 50,
+ .key = "qcom,sw-power-collapse",
+ .key = "qcom,domain-attr-non-fatal-faults",
+ .key = "qcom,max-secure-instances",
+ .value = 2, /*
+ * As per design driver allows 3rd
+ * instance as well since the secure
+ * flags were updated later for the
+ * current instance. Hence total
+ * secure sessions would be
+ * max-secure-instances + 1.
+ .key = "qcom,max-ssr-allowed",
+ .value = 1, /*
+ * Maxinum number of SSR before BUG_ON
+ .key = "qcom,power-collapse-delay",
+ .value = 3000,
+ .key = "qcom,hw-resp-timeout",
+ .value = 2000,
+ .key = "qcom,dsp-resp-timeout",
+ .value = 1000,
+ .key = "qcom,debug-timeout",
+ .value = 0,
+ .key = "qcom,dsp-enabled",
+static struct msm_cvp_common_data sm8550_common_data[] = {
+static struct msm_cvp_common_data sm8550_tvm_common_data[] = {
+static struct msm_cvp_common_data sm8650_common_data[] = {
+ .key = "qcom,qos_noc_rge_niu_offset",
+ .value = 0x0,
+ .key = "qcom,qos_noc_gce_vadl_tof_niu_offset",
+ .key = "qcom,qos_noc_cdm_niu_offset",
+ .key = "qcom,noc_core_err_offset",
+ .value = 0xA000,
+ .key = "qcom,noc_main_sidebandmanager_offset",
+ .value = 0x6E00,
+static struct msm_cvp_common_data sm7650_common_data[] = {
+ .value = 2,
+ .value = 0x200,
+ .value = 0xE00,
+ .value = 0x1A00,
+/* Default UBWC config for LPDDR5 */
+static struct msm_cvp_ubwc_config_data kona_ubwc_data[] = {
+ UBWC_CONFIG(1, 1, 1, 0, 0, 0, 8, 32, 16, 0, 0),
+static struct msm_cvp_qos_setting waipio_noc_qos = {
+ .axi_qos = 0x99,
+ .prioritylut_low = 0x22222222,
+ .prioritylut_high = 0x33333333,
+ .urgency_low = 0x1022,
+ .dangerlut_low = 0x0,
+ .safelut_low = 0xffff,
+static struct msm_cvp_qos_setting lanai_noc_qos = {
+ .prioritylut_low = 0x33333333,
+ .urgency_low = 0x1033,
+ .urgency_low_ro = 0x1003,
+static struct msm_cvp_qos_setting palawan_noc_qos = {
+ .urgency_low = 0x1003,
+static struct msm_cvp_platform_data default_data = {
+ .common_data = default_common_data,
+ .common_data_length = ARRAY_SIZE(default_common_data),
+ .sku_version = 0,
+ .vpu_ver = VPU_VERSION_5,
+ .ubwc_config = 0x0,
+ .noc_qos = 0x0,
+ .vm_id = 1,
+static struct msm_cvp_platform_data sm8450_data = {
+ .common_data = sm8450_common_data,
+ .common_data_length = ARRAY_SIZE(sm8450_common_data),
+ .ubwc_config = kona_ubwc_data,
+ .noc_qos = &waipio_noc_qos,
+static struct msm_cvp_platform_data sm8550_data = {
+ .common_data = sm8550_common_data,
+ .common_data_length = ARRAY_SIZE(sm8550_common_data),
+ .ubwc_config = kona_ubwc_data, /*Reuse Kona setting*/
+ .noc_qos = &waipio_noc_qos, /*Reuse Waipio setting*/
+static struct msm_cvp_platform_data sm8550_tvm_data = {
+ .common_data = sm8550_tvm_common_data,
+ .common_data_length = ARRAY_SIZE(sm8550_tvm_common_data),
+ .vm_id = 2,
+static struct msm_cvp_platform_data sm8650_data = {
+ .common_data = sm8650_common_data,
+ .common_data_length = ARRAY_SIZE(sm8650_common_data),
+ .noc_qos = &lanai_noc_qos,
+static struct msm_cvp_platform_data sm7650_data = {
+ .common_data = sm7650_common_data,
+ .common_data_length = ARRAY_SIZE(sm7650_common_data),
+ .noc_qos = &palawan_noc_qos,
+static const struct of_device_id msm_cvp_dt_match[] = {
+ .compatible = "qcom,waipio-cvp",
+ .data = &sm8450_data,
+ .compatible = "qcom,kalama-cvp",
+ .data = &sm8550_data,
+ .compatible = "qcom,kalama-cvp-tvm",
+ .data = &sm8550_tvm_data,
+ .compatible = "qcom,pineapple-cvp",
+ .data = &sm8650_data,
+ .compatible = "qcom,cliffs-cvp",
+ .data = &sm7650_data,
+ {},
+ * WARN: name field CAN NOT hold more than 23 chars
+ * excluding the ending '\0'
+ * NOTE: the def entry index for the command packet is
+ * "the packet type - HFI_CMD_SESSION_CVP_START"
+const struct msm_cvp_hfi_defs cvp_hfi_defs[MAX_PKT_IDX] = {
+ [HFI_CMD_SESSION_CVP_DFS_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_DFS_CONFIG_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_DFS_CONFIG,
+ .is_config_pkt = true,
+ .resp = HAL_NO_RESP,
+ .name = "DFS",
+ [HFI_CMD_SESSION_CVP_DFS_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_DFS_FRAME_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_DFS_FRAME,
+ .is_config_pkt = false,
+ .name = "DFS_FRAME",
+ .force_kernel_fence = false,
+ [HFI_CMD_SESSION_CVP_SGM_OF_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .size = 0xFFFFFFFF,
+ .type = HFI_CMD_SESSION_CVP_SGM_OF_CONFIG,
+ .name = "SGM_OF",
+ [HFI_CMD_SESSION_CVP_SGM_OF_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_SGM_OF_FRAME,
+ .name = "SGM_OF_FRAME",
+ [HFI_CMD_SESSION_CVP_WARP_NCC_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_WARP_NCC_CONFIG,
+ .name = "WARP_NCC",
+ [HFI_CMD_SESSION_CVP_WARP_NCC_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_WARP_NCC_FRAME,
+ .name = "WARP_NCC_FRAME",
+ [HFI_CMD_SESSION_CVP_WARP_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_WARP_CONFIG,
+ .name = "WARP",
+ [HFI_CMD_SESSION_CVP_WARP_DS_PARAMS - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_WARP_DS_PARAMS,
+ .name = "WARP_DS_PARAMS",
+ [HFI_CMD_SESSION_CVP_WARP_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_WARP_FRAME,
+ .name = "WARP_FRAME",
+ .force_kernel_fence = true,
+ [HFI_CMD_SESSION_CVP_DMM_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_DMM_CONFIG_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_DMM_CONFIG,
+ .name = "DMM",
+ [HFI_CMD_SESSION_CVP_DMM_PARAMS - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_DMM_PARAMS,
+ .name = "DMM_PARAMS",
+ [HFI_CMD_SESSION_CVP_DMM_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_DMM_FRAME_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_DMM_FRAME,
+ .name = "DMM_FRAME",
+ [HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_PERSIST_CMD_SIZE,
+ .type =HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS,
+ .name = "SET_PERSIST",
+ [HFI_CMD_SESSION_CVP_RELEASE_PERSIST_BUFFERS - HFI_CMD_SESSION_CVP_START] =
+ .size = 0xffffffff,
+ .type =HFI_CMD_SESSION_CVP_RELEASE_PERSIST_BUFFERS,
+ .name = "REL_PERSIST",
+ [HFI_CMD_SESSION_CVP_DS_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_DS_CONFIG_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_DS_CONFIG,
+ .name = "DS_CONFIG",
+ [HFI_CMD_SESSION_CVP_DS - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_DS_CMD_SIZE,
+ .type =HFI_CMD_SESSION_CVP_DS,
+ .name = "DS",
+ [HFI_CMD_SESSION_CVP_CV_TME_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_OF_CONFIG_CMD_SIZE,
+ .type =HFI_CMD_SESSION_CVP_CV_TME_CONFIG,
+ .name = "TME",
+ [HFI_CMD_SESSION_CVP_CV_TME_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_OF_FRAME_CMD_SIZE,
+ .type =HFI_CMD_SESSION_CVP_CV_TME_FRAME,
+ .name = "TME_FRAME",
+ [HFI_CMD_SESSION_CVP_CV_ODT_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_ODT_CONFIG_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_CV_ODT_CONFIG,
+ .name = "ODT",
+ [HFI_CMD_SESSION_CVP_CV_ODT_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_ODT_FRAME_CMD_SIZE,
+ .type =HFI_CMD_SESSION_CVP_CV_ODT_FRAME,
+ .name = "ODT_FRAME",
+ [HFI_CMD_SESSION_CVP_CV_OD_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_OD_CONFIG_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_CV_OD_CONFIG,
+ .name = "OD",
+ [HFI_CMD_SESSION_CVP_CV_OD_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_OD_FRAME_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_CV_OD_FRAME,
+ .name = "OD_FRAME",
+ [HFI_CMD_SESSION_CVP_NCC_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_NCC_CONFIG_CMD_SIZE,
+ .type =HFI_CMD_SESSION_CVP_NCC_CONFIG,
+ .name = "NCC",
+ [HFI_CMD_SESSION_CVP_NCC_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_NCC_FRAME_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_NCC_FRAME,
+ .name = "NCC_FRAME",
+ [HFI_CMD_SESSION_CVP_ICA_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_ICA_CONFIG_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_ICA_CONFIG,
+ .name = "ICA",
+ [HFI_CMD_SESSION_CVP_ICA_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_ICA_FRAME_CMD_SIZE,
+ .type =HFI_CMD_SESSION_CVP_ICA_FRAME,
+ .name = "ICA_FRAME",
+ [HFI_CMD_SESSION_CVP_HCD_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_HCD_CONFIG_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_HCD_CONFIG,
+ .name = "HCD",
+ [HFI_CMD_SESSION_CVP_HCD_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_HCD_FRAME_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_HCD_FRAME,
+ .name = "HCD_FRAME",
+ [HFI_CMD_SESSION_CVP_DC_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_DCM_CONFIG_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_DC_CONFIG,
+ .name = "DC",
+ [HFI_CMD_SESSION_CVP_DC_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_DCM_FRAME_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_DC_FRAME,
+ .name = "DC_FRAME",
+ [HFI_CMD_SESSION_CVP_DCM_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_DCM_CONFIG,
+ .name = "DCM",
+ [HFI_CMD_SESSION_CVP_DCM_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_DCM_FRAME,
+ .name = "DCM_FRAME",
+ [HFI_CMD_SESSION_CVP_PYS_HCD_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_PYS_HCD_CONFIG_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_PYS_HCD_CONFIG,
+ .name = "PYS_HCD",
+ [HFI_CMD_SESSION_CVP_PYS_HCD_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .size = HFI_PYS_HCD_FRAME_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_PYS_HCD_FRAME,
+ .name = "PYS_HCD_FRAME",
+ [HFI_CMD_SESSION_CVP_SET_MODEL_BUFFERS - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_SET_MODEL_BUFFERS,
+ .name = "SET_MODEL",
+ [HFI_CMD_SESSION_CVP_SET_SNAPSHOT_BUFFERS - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_SET_SNAPSHOT_BUFFERS,
+ .name = "SET_SNAPSHOT",
+ [HFI_CMD_SESSION_CVP_RELEASE_SNAPSHOT_BUFFERS - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_RELEASE_SNAPSHOT_BUFFERS,
+ .name = "REL_SNAPSHOT",
+ [HFI_CMD_SESSION_CVP_SET_SNAPSHOT_MODE - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_SET_SNAPSHOT_MODE,
+ .name = "SNAPSHOT_MODE",
+ [HFI_CMD_SESSION_CVP_SNAPSHOT_WRITE_DONE - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_SNAPSHOT_WRITE_DONE,
+ .name = "SNAPSHOT_DONE",
+ [HFI_CMD_SESSION_CVP_FD_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_FD_CONFIG,
+ .name = "FD",
+ [HFI_CMD_SESSION_CVP_FD_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_FD_FRAME,
+ .name = "FD_FRAME",
+ [HFI_CMD_SESSION_CVP_XRA_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_XRA_FRAME,
+ .name = "XRA_FRAME",
+ [HFI_CMD_SESSION_CVP_XRA_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_XRA_CONFIG,
+ .name = "XRA_CONFIG",
+ [HFI_CMD_SESSION_CVP_XRA_BLOB_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_XRA_BLOB_FRAME,
+ .name = "XRA_BLOB_FRAME",
+ [HFI_CMD_SESSION_CVP_XRA_BLOB_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_XRA_BLOB_CONFIG,
+ .name = "XRA_BLOB_CONFIG",
+ [HFI_CMD_SESSION_CVP_XRA_PATCH_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_XRA_PATCH_FRAME,
+ .name = "XRA_PATCH_FRAME",
+ [HFI_CMD_SESSION_CVP_XRA_PATCH_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_XRA_PATCH_CONFIG,
+ .name = "XRA_PATCH_CONFIG",
+ [HFI_CMD_SESSION_CVP_XRA_MATCH_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_XRA_MATCH_FRAME,
+ .name = "XRA_MATCH_FRAME",
+ [HFI_CMD_SESSION_CVP_XRA_MATCH_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_XRA_MATCH_CONFIG,
+ .name = "XRA_MATCH_CONFIG",
+ [HFI_CMD_SESSION_CVP_RGE_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_RGE_FRAME,
+ .name = "RGE_FRAME",
+ [HFI_CMD_SESSION_CVP_RGE_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_RGE_CONFIG,
+ .name = "RGE_CONFIG",
+ [HFI_CMD_SESSION_EVA_ITOF_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_EVA_ITOF_FRAME,
+ .name = "ITOF_FRAME",
+ [HFI_CMD_SESSION_EVA_ITOF_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_EVA_ITOF_CONFIG,
+ .name = "ITOF_CONFIG",
+ [HFI_CMD_SESSION_EVA_DLFD_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_EVA_DLFD_FRAME,
+ .name = "DLFD_FRAME",
+ [HFI_CMD_SESSION_EVA_DLFD_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_EVA_DLFD_CONFIG,
+ .name = "DLFD_CONFIG",
+ [HFI_CMD_SESSION_EVA_DLFL_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_EVA_DLFL_FRAME,
+ .name = "DLFL_FRAME",
+ [HFI_CMD_SESSION_EVA_DLFL_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_EVA_DLFL_CONFIG,
+ .name = "DLFL_CONFIG",
+ [HFI_CMD_SESSION_CVP_SYNX - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_CVP_SYNX,
+ .name = "SYNX_TEST",
+ [HFI_CMD_SESSION_EVA_DME_ONLY_CONFIG - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_EVA_DME_ONLY_CONFIG,
+ .name = "DME_CONFIG",
+ [HFI_CMD_SESSION_EVA_DME_ONLY_FRAME - HFI_CMD_SESSION_CVP_START] =
+ .type = HFI_CMD_SESSION_EVA_DME_ONLY_FRAME,
+ .name = "DME_FRAME",
+int get_pkt_index(struct cvp_hal_session_cmd_pkt *hdr)
+ if (!hdr || (hdr->packet_type < HFI_CMD_SESSION_CVP_START)
+ || hdr->packet_type >= (HFI_CMD_SESSION_CVP_START + MAX_PKT_IDX))
+ if (cvp_hfi_defs[hdr->packet_type - HFI_CMD_SESSION_CVP_START].size)
+ return (hdr->packet_type - HFI_CMD_SESSION_CVP_START);
+int get_pkt_fenceoverride(struct cvp_hal_session_cmd_pkt* hdr)
+ return cvp_hfi_defs[hdr->packet_type - HFI_CMD_SESSION_CVP_START].force_kernel_fence;
+int get_pkt_index_from_type(u32 pkt_type)
+ if ((pkt_type < HFI_CMD_SESSION_CVP_START) ||
+ pkt_type >= (HFI_CMD_SESSION_CVP_START + MAX_PKT_IDX))
+ if (cvp_hfi_defs[pkt_type - HFI_CMD_SESSION_CVP_START].size)
+ return (pkt_type - HFI_CMD_SESSION_CVP_START);
+MODULE_DEVICE_TABLE(of, msm_cvp_dt_match);
+int cvp_of_fdt_get_ddrtype(void)
+#ifdef FIXED_DDR_TYPE
+ /* of_fdt_get_ddrtype() is usually unavailable during pre-sil */
+ return DDR_TYPE_LPDDR5;
+ return of_fdt_get_ddrtype();
+void *cvp_get_drv_data(struct device *dev)
+ struct msm_cvp_platform_data *driver_data;
+ const struct of_device_id *match;
+ uint32_t ddr_type = DDR_TYPE_LPDDR5;
+ driver_data = &default_data;
+ if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
+ match = of_match_node(msm_cvp_dt_match, dev->of_node);
+ driver_data = (struct msm_cvp_platform_data *)match->data;
+ if (!strcmp(match->compatible, "qcom,waipio-cvp")) {
+ ddr_type = cvp_of_fdt_get_ddrtype();
+ if (ddr_type == -ENOENT) {
+ "Failed to get ddr type, use LPDDR5\n");
+ if (driver_data->ubwc_config &&
+ (ddr_type == DDR_TYPE_LPDDR4 ||
+ ddr_type == DDR_TYPE_LPDDR4X))
+ driver_data->ubwc_config->highest_bank_bit = 15;
+ dprintk(CVP_CORE, "DDR Type 0x%x hbb 0x%x\n",
+ ddr_type, driver_data->ubwc_config ?
+ driver_data->ubwc_config->highest_bank_bit : -1);
+ return driver_data;
@@ -0,0 +1,1265 @@
+#include <linux/sort.h>
+#include <linux/of_reserved_mem.h>
+#include "soc/qcom/secure_buffer.h"
+enum clock_properties {
+ CLOCK_PROP_HAS_SCALING = 1 << 0,
+ CLOCK_PROP_HAS_MEM_RETENTION = 1 << 1,
+#define PERF_GOV "performance"
+static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
+static size_t get_u32_array_num_elements(struct device_node *np,
+ char *name)
+ int len;
+ size_t num_elements = 0;
+ if (!of_get_property(np, name, &len)) {
+ dprintk(CVP_ERR, "Failed to read %s from device tree\n",
+ name);
+ goto fail_read;
+ num_elements = len / sizeof(u32);
+ if (num_elements <= 0) {
+ dprintk(CVP_ERR, "%s not specified in device tree\n",
+ return num_elements;
+fail_read:
+static inline void msm_cvp_free_allowed_clocks_table(
+ res->allowed_clks_tbl = NULL;
+static inline void msm_cvp_free_cycles_per_mb_table(
+ res->clock_freq_tbl.clk_prof_entries = NULL;
+static inline void msm_cvp_free_reg_table(
+ res->reg_set.reg_tbl = NULL;
+static inline void msm_cvp_free_qdss_addr_table(
+ res->qdss_addr_set.addr_tbl = NULL;
+static inline void msm_cvp_free_bus_vectors(
+ kfree(res->bus_set.bus_tbl);
+ res->bus_set.bus_tbl = NULL;
+ res->bus_set.count = 0;
+static inline void msm_cvp_free_regulator_table(
+ int c = 0;
+ for (c = 0; c < res->regulator_set.count; ++c) {
+ struct regulator_info *rinfo =
+ &res->regulator_set.regulator_tbl[c];
+ rinfo->name = NULL;
+ res->regulator_set.regulator_tbl = NULL;
+ res->regulator_set.count = 0;
+static inline void msm_cvp_free_clock_table(
+ res->clock_set.clock_tbl = NULL;
+ res->clock_set.count = 0;
+void msm_cvp_free_platform_resources(
+ msm_cvp_free_clock_table(res);
+ msm_cvp_free_regulator_table(res);
+ msm_cvp_free_allowed_clocks_table(res);
+ msm_cvp_free_reg_table(res);
+ msm_cvp_free_qdss_addr_table(res);
+ msm_cvp_free_bus_vectors(res);
+static int msm_cvp_load_ipcc_regs(struct msm_cvp_platform_resources *res)
+ unsigned int reg_config[2];
+ struct platform_device *pdev = res->pdev;
+ ret = of_property_read_u32_array(pdev->dev.of_node, "qcom,ipcc-reg",
+ reg_config, 2);
+ dprintk(CVP_ERR, "Failed to read ipcc reg: %d\n", ret);
+ res->ipcc_reg_base = reg_config[0];
+ res->ipcc_reg_size = reg_config[1];
+ "ipcc reg_base = %x, reg_size = %x\n",
+ res->ipcc_reg_base,
+ res->ipcc_reg_size
+ );
+static int msm_cvp_load_regspace_mapping(struct msm_cvp_platform_resources *res)
+ unsigned int ipclite_mapping_config[3] = {0};
+ unsigned int hwmutex_mapping_config[3] = {0};
+ unsigned int aon_mapping_config[3] = {0};
+ unsigned int timer_config[3] = {0};
+ ret = of_property_read_u32_array(pdev->dev.of_node, "ipclite_mappings",
+ ipclite_mapping_config, 3);
+ dprintk(CVP_ERR, "Failed to read ipclite reg: %d\n", ret);
+ res->reg_mappings.ipclite_iova = ipclite_mapping_config[0];
+ res->reg_mappings.ipclite_size = ipclite_mapping_config[1];
+ res->reg_mappings.ipclite_phyaddr = ipclite_mapping_config[2];
+ ret = of_property_read_u32_array(pdev->dev.of_node, "hwmutex_mappings",
+ hwmutex_mapping_config, 3);
+ dprintk(CVP_ERR, "Failed to read hwmutex reg: %d\n", ret);
+ res->reg_mappings.hwmutex_iova = hwmutex_mapping_config[0];
+ res->reg_mappings.hwmutex_size = hwmutex_mapping_config[1];
+ res->reg_mappings.hwmutex_phyaddr = hwmutex_mapping_config[2];
+ ret = of_property_read_u32_array(pdev->dev.of_node, "aon_mappings",
+ aon_mapping_config, 3);
+ dprintk(CVP_ERR, "Failed to read aon reg: %d\n", ret);
+ res->reg_mappings.aon_iova = aon_mapping_config[0];
+ res->reg_mappings.aon_size = aon_mapping_config[1];
+ res->reg_mappings.aon_phyaddr = aon_mapping_config[2];
+ ret = of_property_read_u32_array(pdev->dev.of_node,
+ "aon_timer_mappings", timer_config, 3);
+ dprintk(CVP_ERR, "Failed to read timer reg: %d\n", ret);
+ res->reg_mappings.timer_iova = timer_config[0];
+ res->reg_mappings.timer_size = timer_config[1];
+ res->reg_mappings.timer_phyaddr = timer_config[2];
+ "reg mappings %#x %#x %#x %#x %#x %#X %#x %#x %#x %#x %#x %#x\n",
+ res->reg_mappings.ipclite_iova, res->reg_mappings.ipclite_size,
+ res->reg_mappings.ipclite_phyaddr, res->reg_mappings.hwmutex_iova,
+ res->reg_mappings.hwmutex_size, res->reg_mappings.hwmutex_phyaddr,
+ res->reg_mappings.aon_iova, res->reg_mappings.aon_size,
+ res->reg_mappings.aon_phyaddr, res->reg_mappings.timer_iova,
+ res->reg_mappings.timer_size, res->reg_mappings.timer_phyaddr);
+static int msm_cvp_load_gcc_regs(struct msm_cvp_platform_resources *res)
+ ret = of_property_read_u32_array(pdev->dev.of_node, "qcom,gcc-reg",
+ dprintk(CVP_WARN, "No gcc reg configured: %d\n", ret);
+ res->gcc_reg_base = reg_config[0];
+ res->gcc_reg_size = reg_config[1];
+static int msm_cvp_load_reg_table(struct msm_cvp_platform_resources *res)
+ if (!of_find_property(pdev->dev.of_node, "qcom,reg-presets", NULL)) {
+ * qcom,reg-presets is an optional property. It likely won't be
+ * present if we don't have any register settings to program
+ dprintk(CVP_CORE, "qcom,reg-presets not found\n");
+ reg_set = &res->reg_set;
+ reg_set->count = get_u32_array_num_elements(pdev->dev.of_node,
+ "qcom,reg-presets");
+ reg_set->count /= sizeof(*reg_set->reg_tbl) / sizeof(u32);
+ if (!reg_set->count) {
+ dprintk(CVP_CORE, "no elements in reg set\n");
+ reg_set->reg_tbl = devm_kzalloc(&pdev->dev, reg_set->count *
+ sizeof(*(reg_set->reg_tbl)), GFP_KERNEL);
+ if (!reg_set->reg_tbl) {
+ dprintk(CVP_ERR, "%s Failed to alloc register table\n",
+ if (of_property_read_u32_array(pdev->dev.of_node, "qcom,reg-presets",
+ (u32 *)reg_set->reg_tbl, reg_set->count * 2)) {
+ dprintk(CVP_ERR, "Failed to read register table\n");
+ "reg = %x, value = %x\n",
+ reg_set->reg_tbl[i].value
+static int msm_cvp_load_qdss_table(struct msm_cvp_platform_resources *res)
+ struct addr_set *qdss_addr_set;
+ if (!of_find_property(pdev->dev.of_node, "qcom,qdss-presets", NULL)) {
+ * qcom,qdss-presets is an optional property. It likely won't be
+ dprintk(CVP_CORE, "qcom,qdss-presets not found\n");
+ qdss_addr_set = &res->qdss_addr_set;
+ qdss_addr_set->count = get_u32_array_num_elements(pdev->dev.of_node,
+ "qcom,qdss-presets");
+ qdss_addr_set->count /= sizeof(*qdss_addr_set->addr_tbl) / sizeof(u32);
+ if (!qdss_addr_set->count) {
+ dprintk(CVP_CORE, "no elements in qdss reg set\n");
+ qdss_addr_set->addr_tbl = devm_kzalloc(&pdev->dev,
+ qdss_addr_set->count * sizeof(*qdss_addr_set->addr_tbl),
+ if (!qdss_addr_set->addr_tbl) {
+ goto err_qdss_addr_tbl;
+ rc = of_property_read_u32_array(pdev->dev.of_node, "qcom,qdss-presets",
+ (u32 *)qdss_addr_set->addr_tbl, qdss_addr_set->count * 2);
+ dprintk(CVP_ERR, "Failed to read qdss address table\n");
+ for (i = 0; i < qdss_addr_set->count; i++) {
+ dprintk(CVP_CORE, "qdss addr = %x, value = %x\n",
+ qdss_addr_set->addr_tbl[i].start,
+ qdss_addr_set->addr_tbl[i].size);
+err_qdss_addr_tbl:
+static int msm_cvp_load_fw_name(struct msm_cvp_platform_resources *res)
+ return of_property_read_string_index(pdev->dev.of_node,
+ "cvp,firmware-name", 0, &res->fw_name);
+static int msm_cvp_load_subcache_info(struct msm_cvp_platform_resources *res)
+ int rc = 0, num_subcaches = 0, c;
+ struct subcache_set *subcaches = &res->subcache_set;
+ num_subcaches = of_property_count_strings(pdev->dev.of_node,
+ "cache-slice-names");
+ if (num_subcaches <= 0) {
+ dprintk(CVP_CORE, "No subcaches found\n");
+ goto err_load_subcache_table_fail;
+ subcaches->subcache_tbl = devm_kzalloc(&pdev->dev,
+ sizeof(*subcaches->subcache_tbl) * num_subcaches, GFP_KERNEL);
+ if (!subcaches->subcache_tbl) {
+ "Failed to allocate memory for subcache tbl\n");
+ subcaches->count = num_subcaches;
+ dprintk(CVP_CORE, "Found %d subcaches\n", num_subcaches);
+ for (c = 0; c < num_subcaches; ++c) {
+ struct subcache_info *vsc = &res->subcache_set.subcache_tbl[c];
+ of_property_read_string_index(pdev->dev.of_node,
+ "cache-slice-names", c, &vsc->name);
+ res->sys_cache_present = true;
+err_load_subcache_table_fail:
+ res->sys_cache_present = false;
+ subcaches->count = 0;
+ subcaches->subcache_tbl = NULL;
+ * msm_cvp_load_u32_table() - load dtsi table entries
+ * @pdev: A pointer to the platform device.
+ * @of_node: A pointer to the device node.
+ * @table_name: A pointer to the dtsi table entry name.
+ * @struct_size: The size of the structure which is nothing but
+ * a single entry in the dtsi table.
+ * @table: A pointer to the table pointer which needs to be
+ * filled by the dtsi table entries.
+ * @num_elements: Number of elements pointer which needs to be filled
+ * with the number of elements in the table.
+ * This is a generic implementation to load single or multiple array
+ * table from dtsi. The array elements should be of size equal to u32.
+ * Return: Return '0' for success else appropriate error value.
+int msm_cvp_load_u32_table(struct platform_device *pdev,
+ struct device_node *of_node, char *table_name, int struct_size,
+ u32 **table, u32 *num_elements)
+ int rc = 0, num_elemts = 0;
+ u32 *ptbl = NULL;
+ if (!of_find_property(of_node, table_name, NULL)) {
+ dprintk(CVP_CORE, "%s not found\n", table_name);
+ num_elemts = get_u32_array_num_elements(of_node, table_name);
+ if (!num_elemts) {
+ dprintk(CVP_ERR, "no elements in %s\n", table_name);
+ num_elemts /= struct_size / sizeof(u32);
+ ptbl = devm_kzalloc(&pdev->dev, num_elemts * struct_size, GFP_KERNEL);
+ if (!ptbl) {
+ dprintk(CVP_ERR, "Failed to alloc table %s\n", table_name);
+ if (of_property_read_u32_array(of_node, table_name, ptbl,
+ num_elemts * struct_size / sizeof(u32))) {
+ dprintk(CVP_ERR, "Failed to read %s\n", table_name);
+ *table = ptbl;
+ if (num_elements)
+ *num_elements = num_elemts;
+EXPORT_SYMBOL(msm_cvp_load_u32_table);
+/* A comparator to compare loads (needed later on) */
+static int cmp(const void *a, const void *b)
+ return ((struct allowed_clock_rates_table *)a)->clock_rate -
+ ((struct allowed_clock_rates_table *)b)->clock_rate;
+static int msm_cvp_load_allowed_clocks_table(
+ if (!of_find_property(pdev->dev.of_node,
+ "qcom,allowed-clock-rates", NULL)) {
+ dprintk(CVP_CORE, "qcom,allowed-clock-rates not found\n");
+ rc = msm_cvp_load_u32_table(pdev, pdev->dev.of_node,
+ "qcom,allowed-clock-rates",
+ sizeof(*res->allowed_clks_tbl),
+ (u32 **)&res->allowed_clks_tbl,
+ &res->allowed_clks_tbl_size);
+ "%s: failed to read allowed clocks table\n", __func__);
+ sort(res->allowed_clks_tbl, res->allowed_clks_tbl_size,
+ sizeof(*res->allowed_clks_tbl), cmp, NULL);
+static int msm_cvp_populate_mem_cdsp(struct device *dev,
+ struct device_node *mem_node;
+ mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (mem_node) {
+ ret = of_reserved_mem_device_init_by_idx(dev,
+ dev->of_node, 0);
+ of_node_put(dev->of_node);
+ "Failed to initialize reserved mem, ret %d\n",
+ ret);
+ res->mem_cdsp.dev = dev;
+static int msm_cvp_populate_bus(struct device *dev,
+ struct bus_set *buses = &res->bus_set;
+ const char *temp_name = NULL;
+ struct bus_info *bus = NULL, *temp_table;
+ u32 range[2];
+ temp_table = krealloc(buses->bus_tbl, sizeof(*temp_table) *
+ (buses->count + 1), GFP_KERNEL);
+ if (!temp_table) {
+ dprintk(CVP_ERR, "%s: Failed to allocate memory", __func__);
+ goto err_bus;
+ buses->bus_tbl = temp_table;
+ bus = &buses->bus_tbl[buses->count];
+ memset(bus, 0x0, sizeof(struct bus_info));
+ rc = of_property_read_string(dev->of_node, "label", &temp_name);
+ dprintk(CVP_ERR, "'label' not found in node\n");
+ /* need a non-const version of name, hence copying it over */
+ bus->name = devm_kstrdup(dev, temp_name, GFP_KERNEL);
+ if (!bus->name) {
+ rc = of_property_read_u32(dev->of_node, "qcom,bus-master",
+ &bus->master);
+ dprintk(CVP_ERR, "'qcom,bus-master' not found in node\n");
+ rc = of_property_read_u32(dev->of_node, "qcom,bus-slave", &bus->slave);
+ dprintk(CVP_ERR, "'qcom,bus-slave' not found in node\n");
+ rc = of_property_read_string(dev->of_node, "qcom,bus-governor",
+ &bus->governor);
+ "'qcom,bus-governor' not found, default to performance governor\n");
+ bus->governor = PERF_GOV;
+ if (!strcmp(bus->governor, PERF_GOV))
+ bus->is_prfm_gov_used = true;
+ rc = of_property_read_u32_array(dev->of_node, "qcom,bus-range-kbps",
+ range, ARRAY_SIZE(range));
+ "'qcom,range' not found defaulting to <0 INT_MAX>\n");
+ range[0] = 0;
+ range[1] = INT_MAX;
+ bus->range[0] = range[0]; /* min */
+ bus->range[1] = range[1]; /* max */
+ buses->count++;
+ bus->dev = dev;
+ dprintk(CVP_CORE, "Found bus %s [%d->%d] with governor %s\n",
+ bus->name, bus->master, bus->slave, bus->governor);
+err_bus:
+static int msm_cvp_load_regulator_table(
+ struct regulator_set *regulators = &res->regulator_set;
+ struct device_node *domains_parent_node = NULL;
+ struct property *domains_property = NULL;
+ int reg_count = 0;
+ regulators->count = 0;
+ regulators->regulator_tbl = NULL;
+ domains_parent_node = pdev->dev.of_node;
+ for_each_property_of_node(domains_parent_node, domains_property) {
+ const char *search_string = "-supply";
+ char *supply;
+ bool matched = false;
+ /* check if current property is possibly a regulator */
+ supply = strnstr(domains_property->name, search_string,
+ strlen(domains_property->name) + 1);
+ matched = supply && (*(supply + strlen(search_string)) == '\0');
+ if (!matched)
+ reg_count++;
+ regulators->regulator_tbl = devm_kzalloc(&pdev->dev,
+ sizeof(*regulators->regulator_tbl) *
+ reg_count, GFP_KERNEL);
+ if (!regulators->regulator_tbl) {
+ "Failed to alloc memory for regulator table\n");
+ goto err_reg_tbl_alloc;
+ struct device_node *regulator_node = NULL;
+ matched = supply && (supply[strlen(search_string)] == '\0');
+ /* make sure prop isn't being misused */
+ regulator_node = of_parse_phandle(domains_parent_node,
+ domains_property->name, 0);
+ if (IS_ERR(regulator_node)) {
+ dprintk(CVP_WARN, "%s is not a phandle\n",
+ domains_property->name);
+ regulators->count++;
+ /* populate regulator info */
+ rinfo = ®ulators->regulator_tbl[regulators->count - 1];
+ rinfo->name = devm_kzalloc(&pdev->dev,
+ (supply - domains_property->name) + 1, GFP_KERNEL);
+ if (!rinfo->name) {
+ "Failed to alloc memory for regulator name\n");
+ goto err_reg_name_alloc;
+ strlcpy(rinfo->name, domains_property->name,
+ (supply - domains_property->name) + 1);
+ rinfo->has_hw_power_collapse = of_property_read_bool(
+ regulator_node, "qcom,support-hw-trigger");
+ dprintk(CVP_CORE, "Found regulator %s: h/w collapse = %s\n",
+ rinfo->name,
+ rinfo->has_hw_power_collapse ? "yes" : "no");
+ if (!regulators->count)
+ dprintk(CVP_CORE, "No regulators found");
+err_reg_name_alloc:
+err_reg_tbl_alloc:
+static int msm_cvp_load_clock_table(
+ int rc = 0, num_clocks = 0, c = 0;
+ int *clock_ids = NULL;
+ int *clock_props = NULL;
+ struct clock_set *clocks = &res->clock_set;
+ num_clocks = of_property_count_strings(pdev->dev.of_node,
+ "clock-names");
+ if (num_clocks <= 0) {
+ dprintk(CVP_CORE, "No clocks found\n");
+ clocks->count = 0;
+ goto err_load_clk_table_fail;
+ clock_ids = devm_kzalloc(&pdev->dev, num_clocks *
+ sizeof(*clock_ids), GFP_KERNEL);
+ if (!clock_ids) {
+ dprintk(CVP_ERR, "No memory to read clock ids\n");
+ rc = of_property_read_u32_array(pdev->dev.of_node,
+ "clock-ids", clock_ids,
+ num_clocks);
+ dprintk(CVP_CORE, "Failed to read clock ids: %d\n", rc);
+ msm_cvp_mmrm_enabled = false;
+ dprintk(CVP_CORE, "flag msm_cvp_mmrm_enabled disabled\n");
+ clock_props = devm_kzalloc(&pdev->dev, num_clocks *
+ sizeof(*clock_props), GFP_KERNEL);
+ if (!clock_props) {
+ dprintk(CVP_ERR, "No memory to read clock properties\n");
+ "qcom,clock-configs", clock_props,
+ dprintk(CVP_ERR, "Failed to read clock properties: %d\n", rc);
+ goto err_load_clk_prop_fail;
+ clocks->clock_tbl = devm_kzalloc(&pdev->dev, sizeof(*clocks->clock_tbl)
+ * num_clocks, GFP_KERNEL);
+ if (!clocks->clock_tbl) {
+ dprintk(CVP_ERR, "Failed to allocate memory for clock tbl\n");
+ clocks->count = num_clocks;
+ dprintk(CVP_CORE, "Found %d clocks\n", num_clocks);
+ for (c = 0; c < num_clocks; ++c) {
+ struct clock_info *vc = &res->clock_set.clock_tbl[c];
+ "clock-names", c, &vc->name);
+ if (msm_cvp_mmrm_enabled == true)
+ vc->clk_id = clock_ids[c];
+ if (clock_props[c] & CLOCK_PROP_HAS_SCALING) {
+ vc->has_scaling = true;
+ vc->count = 0;
+ vc->has_scaling = false;
+ if (clock_props[c] & CLOCK_PROP_HAS_MEM_RETENTION)
+ vc->has_mem_retention = true;
+ vc->has_mem_retention = false;
+ dprintk(CVP_CORE, "Found clock %s id %d: scale-able = %s\n",
+ vc->name, vc->clk_id, vc->count ? "yes" : "no");
+err_load_clk_prop_fail:
+err_load_clk_table_fail:
+#define MAX_CLK_RESETS 5
+static int msm_cvp_load_reset_table(
+ struct reset_set *rst = &res->reset_set;
+ int num_clocks = 0, c = 0, ret = 0;
+ int pwr_stats[MAX_CLK_RESETS];
+ "reset-names");
+ if (num_clocks <= 0 || num_clocks > MAX_CLK_RESETS) {
+ dprintk(CVP_ERR, "Num reset clocks out of range\n");
+ rst->count = 0;
+ rst->reset_tbl = devm_kcalloc(&pdev->dev, num_clocks,
+ sizeof(*rst->reset_tbl), GFP_KERNEL);
+ if (!rst->reset_tbl)
+ rst->count = num_clocks;
+ dprintk(CVP_CORE, "Found %d reset clocks\n", num_clocks);
+ "reset-power-status", pwr_stats,
+ dprintk(CVP_ERR, "Failed to read reset pwr state: %d\n", ret);
+ devm_kfree(&pdev->dev, rst->reset_tbl);
+ struct reset_info *rc = &res->reset_set.reset_tbl[c];
+ "reset-names", c, &rc->name);
+ rc->required_stage = pwr_stats[c];
+static int find_key_value(struct msm_cvp_platform_data *platform_data,
+ const char *key)
+ struct msm_cvp_common_data *common_data = platform_data->common_data;
+ int size = platform_data->common_data_length;
+ for (i = 0; i < size; i++) {
+ if (!strcmp(common_data[i].key, key))
+ return common_data[i].value;
+int cvp_read_platform_resources_from_drv_data(
+ if (!core || !core->platform_data) {
+ dprintk(CVP_ERR, "%s Invalid data\n", __func__);
+ platform_data = core->platform_data;
+ res = &core->resources;
+ res->sku_version = platform_data->sku_version;
+ res->dsp_enabled = find_key_value(platform_data,
+ "qcom,dsp-enabled");
+ res->max_ssr_allowed = find_key_value(platform_data,
+ "qcom,max-ssr-allowed");
+ res->sw_power_collapsible = find_key_value(platform_data,
+ "qcom,sw-power-collapse");
+ res->debug_timeout = find_key_value(platform_data,
+ "qcom,debug-timeout");
+ res->pm_qos.latency_us = find_key_value(platform_data,
+ "qcom,pm-qos-latency-us");
+ res->pm_qos.silver_count = 0;
+ for(i = 0; i < MAX_SILVER_CORE_NUM; i++) {
+ if(topology_cluster_id(i) == 0)
+ res->pm_qos.silver_count++;
+ for (i = 0; i < res->pm_qos.silver_count; i++)
+ res->pm_qos.silver_cores[i] = i;
+ res->pm_qos.off_vote_cnt = 0;
+ spin_lock_init(&res->pm_qos.lock);
+ res->max_secure_inst_count = find_key_value(platform_data,
+ "qcom,max-secure-instances");
+ res->thermal_mitigable = find_key_value(platform_data,
+ "qcom,enable-thermal-mitigation");
+ res->msm_cvp_pwr_collapse_delay = find_key_value(platform_data,
+ "qcom,power-collapse-delay");
+ res->msm_cvp_hw_rsp_timeout = find_key_value(platform_data,
+ "qcom,hw-resp-timeout");
+ res->msm_cvp_dsp_rsp_timeout = find_key_value(platform_data,
+ "qcom,dsp-resp-timeout");
+ res->non_fatal_pagefaults = find_key_value(platform_data,
+ "qcom,domain-attr-non-fatal-faults");
+ //Address offsets for QOS setting.
+ //There are diff between lanai and palawan for QOS register addresses
+ res->qos_noc_rge_niu_offset = find_key_value(platform_data,
+ "qcom,qos_noc_rge_niu_offset");
+ res->qos_noc_gce_vadl_tof_niu_offset = find_key_value(platform_data,
+ "qcom,qos_noc_gce_vadl_tof_niu_offset");
+ res->qos_noc_cdm_niu_offset = find_key_value(platform_data,
+ "qcom,qos_noc_cdm_niu_offset");
+ res->noc_core_err_offset = find_key_value(platform_data,
+ "qcom,noc_core_err_offset");
+ res->noc_main_sidebandmanager_offset = find_key_value(platform_data,
+ "qcom,noc_main_sidebandmanager_offset");
+ res->vpu_ver = platform_data->vpu_ver;
+ res->ubwc_config = platform_data->ubwc_config;
+ res->fatal_ssr = false;
+int cvp_read_platform_resources_from_dt(
+ struct resource *kres = NULL;
+ uint32_t firmware_base = 0;
+ if (!pdev->dev.of_node) {
+ dprintk(CVP_ERR, "DT node not found\n");
+ INIT_LIST_HEAD(&res->context_banks);
+ res->firmware_base = (phys_addr_t)firmware_base;
+ kres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res->register_base = kres ? kres->start : -1;
+ res->register_size = kres ? (kres->end + 1 - kres->start) : -1;
+ res->irq = platform_get_irq(pdev, 0);
+ dprintk(CVP_CORE, "%s: res->irq:%d \n",
+ __func__, res->irq);
+ //Parsing for WD interrupt
+ res->irq_wd = platform_get_irq(pdev, 1);
+ dprintk(CVP_CORE, "%s: res->irq_wd:%d \n",
+ __func__, res->irq_wd);
+ rc = msm_cvp_load_fw_name(res);
+ dprintk(CVP_CORE, "Firmware filename: %s\n", res->fw_name);
+ dprintk(CVP_WARN, "Failed to load fw name info: %d\n", rc);
+ rc = msm_cvp_load_subcache_info(res);
+ dprintk(CVP_WARN, "Failed to load subcache info: %d\n", rc);
+ rc = msm_cvp_load_qdss_table(res);
+ dprintk(CVP_WARN, "Failed to load qdss reg table: %d\n", rc);
+ rc = msm_cvp_load_reg_table(res);
+ dprintk(CVP_ERR, "Failed to load reg table: %d\n", rc);
+ goto err_load_reg_table;
+ rc = msm_cvp_load_ipcc_regs(res);
+ dprintk(CVP_ERR, "Failed to load IPCC regs: %d\n", rc);
+ rc = msm_cvp_load_regspace_mapping(res);
+ dprintk(CVP_ERR, "Failed to load reg space mapping: %d\n", rc);
+ rc = msm_cvp_load_gcc_regs(res);
+ rc = msm_cvp_load_regulator_table(res);
+ dprintk(CVP_ERR, "Failed to load list of regulators %d\n", rc);
+ goto err_load_regulator_table;
+ rc = msm_cvp_load_clock_table(res);
+ "Failed to load clock table: %d\n", rc);
+ goto err_load_clock_table;
+ rc = msm_cvp_load_allowed_clocks_table(res);
+ "Failed to load allowed clocks table: %d\n", rc);
+ goto err_load_allowed_clocks_table;
+ rc = msm_cvp_load_reset_table(res);
+ "Failed to load reset table: %d\n", rc);
+ goto err_load_reset_table;
+ res->use_non_secure_pil = of_property_read_bool(pdev->dev.of_node,
+ "qcom,use-non-secure-pil");
+ if (res->use_non_secure_pil || !is_iommu_present(res)) {
+ of_property_read_u32(pdev->dev.of_node, "qcom,fw-bias",
+ &firmware_base);
+ "Using fw-bias : %pa", &res->firmware_base);
+return rc;
+err_load_reset_table:
+err_load_allowed_clocks_table:
+err_load_clock_table:
+err_load_regulator_table:
+err_load_reg_table:
+static int msm_cvp_setup_context_bank(struct msm_cvp_platform_resources *res,
+ struct context_bank_info *cb, struct device *dev)
+ struct bus_type *bus;
+ if (!dev || !cb || !res) {
+ "%s: Invalid Input params\n", __func__);
+ cb->dev = dev;
+ bus = cb->dev->bus;
+ if (IS_ERR_OR_NULL(bus)) {
+ dprintk(CVP_ERR, "%s - failed to get bus type\n", __func__);
+ rc = PTR_ERR(bus) ?: -ENODEV;
+ goto remove_cb;
+ * configure device segment size and segment boundary to ensure
+ * iommu mapping returns one mapping (which is required for partial
+ * cache operations)
+ if (!dev->dma_parms)
+ dev->dma_parms =
+ devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ dma_set_seg_boundary(dev, DMA_BIT_MASK(64));
+ dprintk(CVP_CORE, "Attached %s and created mapping\n", dev_name(dev));
+ "Context bank name:%s, buffer_type: %#x, is_secure: %d, address range start: %#x, size: %#x, dev: %pK",
+ cb->name, cb->buffer_type, cb->is_secure, cb->addr_range.start,
+ cb->addr_range.size, cb->dev);
+remove_cb:
+int msm_cvp_smmu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova, int flags, void *token)
+ struct msm_cvp_core *core = token;
+ bool log = false;
+ if (!domain || !core) {
+ dprintk(CVP_ERR, "%s - invalid param %pK %pK\n",
+ __func__, domain, core);
+ dprintk(CVP_ERR, "%s - faulting address: %lx fault cnt %d\n",
+ __func__, iova, core->smmu_fault_count);
+ if (core->smmu_fault_count > 0) {
+ core->smmu_fault_count++;
+ return -ENOSYS;
+ if (!core->last_fault_addr)
+ core->last_fault_addr = iova;
+ log = (core->log.snapshot_index > 0)? false : true;
+ msm_cvp_print_inst_bufs(inst, log);
+ if (hdev) {
+ hdev->error = CVP_ERR_NOC_ERROR;
+ call_hfi_op(core->dev_ops, debug_hook, hdev);
+ * Return -EINVAL to elicit the default behaviour of smmu driver.
+ * If we return -ENOSYS, then smmu driver assumes page fault handler
+ * is not installed and prints a list of useful debug information like
+ * FAR, SID etc. This information is not printed if we return 0.
+static int msm_cvp_populate_context_bank(struct device *dev,
+ struct device_node *np = NULL;
+ if (!dev || !core) {
+ dprintk(CVP_ERR, "%s - invalid inputs\n", __func__);
+ np = dev->of_node;
+ cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
+ dprintk(CVP_ERR, "%s - Failed to allocate cb\n", __func__);
+ rc = of_property_read_string(np, "label", &cb->name);
+ "Failed to read cb label from device tree\n");
+ INIT_LIST_HEAD(&cb->list);
+ list_add_tail(&cb->list, &core->resources.context_banks);
+ dprintk(CVP_CORE, "%s: context bank has name %s\n", __func__, cb->name);
+ if (!strcmp(cb->name, "cvp_camera")) {
+ cb->is_secure = true;
+ rc = msm_cvp_setup_context_bank(&core->resources, cb, dev);
+ dprintk(CVP_ERR, "Cannot setup context bank %s %d\n",
+ cb->name, rc);
+ goto err_setup_cb;
+ rc = of_property_read_u32_array(np, "qcom,iommu-dma-addr-pool",
+ (u32 *)&cb->addr_range, 2);
+ "Could not read addr pool for context bank : %s %d\n",
+ cb->is_secure = of_property_read_bool(np, "qcom,iommu-vmid");
+ dprintk(CVP_CORE, "context bank %s : secure = %d\n",
+ cb->name, cb->is_secure);
+ /* setup buffer type for each sub device*/
+ rc = of_property_read_u32(np, "buffer-types", &cb->buffer_type);
+ dprintk(CVP_ERR, "failed to load buffer_type info %d\n", rc);
+ "context bank %s address start = %x address size = %x buffer_type = %x\n",
+ cb->name, cb->addr_range.start,
+ cb->addr_range.size, cb->buffer_type);
+ cb->domain = iommu_get_domain_for_dev(dev);
+ if (IS_ERR_OR_NULL(cb->domain)) {
+ dprintk(CVP_ERR, "Create domain failed\n");
+ dprintk(CVP_ERR, "Cannot setup context bank %d\n", rc);
+ iommu_set_fault_handler(cb->domain,
+ msm_cvp_smmu_fault_handler, (void *)core);
+err_setup_cb:
+ list_del(&cb->list);
+int cvp_read_context_bank_resources_from_dt(struct platform_device *pdev)
+ dprintk(CVP_ERR, "Invalid platform device\n");
+ } else if (!pdev->dev.parent) {
+ dprintk(CVP_ERR, "Failed to find a parent for %s\n",
+ dev_name(&pdev->dev));
+ core = dev_get_drvdata(pdev->dev.parent);
+ dprintk(CVP_ERR, "Failed to find cookie in parent device %s",
+ dev_name(pdev->dev.parent));
+ rc = msm_cvp_populate_context_bank(&pdev->dev, core);
+ dprintk(CVP_ERR, "Failed to probe context bank\n");
+ dprintk(CVP_CORE, "Successfully probed context bank\n");
+int cvp_read_bus_resources_from_dt(struct platform_device *pdev)
+ return msm_cvp_populate_bus(&pdev->dev, &core->resources);
+int cvp_read_mem_cdsp_resources_from_dt(struct platform_device *pdev)
+ dprintk(CVP_ERR, "%s: invalid platform device\n", __func__);
+ return msm_cvp_populate_mem_cdsp(&pdev->dev, &core->resources);
@@ -0,0 +1,30 @@
+#ifndef __MSM_CVP_RES_PARSE_H__
+#define __MSM_CVP_RES_PARSE_H__
+ struct msm_cvp_platform_resources *res);
+int read_hfi_type(struct platform_device *pdev);
+ struct msm_cvp_core *core);
+int cvp_read_context_bank_resources_from_dt(struct platform_device *pdev);
+int cvp_read_bus_resources_from_dt(struct platform_device *pdev);
+int cvp_read_mem_cdsp_resources_from_dt(struct platform_device *pdev);
+ u32 **table, u32 *num_elements);
@@ -0,0 +1,232 @@
+#ifndef __MSM_CVP_RESOURCES_H__
+#define __MSM_CVP_RESOURCES_H__
+struct reg_value_pair {
+ u32 value;
+struct reg_set {
+ struct reg_value_pair *reg_tbl;
+ int count;
+struct addr_range {
+ u32 start;
+struct addr_set {
+ struct addr_range *addr_tbl;
+struct context_bank_info {
+ const char *name;
+ bool is_secure;
+ struct addr_range addr_range;
+struct regulator_info {
+ struct regulator *regulator;
+ bool has_hw_power_collapse;
+struct regulator_set {
+ struct regulator_info *regulator_tbl;
+ u32 count;
+struct clock_info {
+ u32 clk_id;
+ struct clk *clk;
+ bool has_scaling;
+ bool has_mem_retention;
+struct clock_set {
+ struct clock_info *clock_tbl;
+struct bus_info {
+ int master;
+ int slave;
+ unsigned int range[2];
+ const char *governor;
+ struct devfreq_dev_profile devfreq_prof;
+ struct devfreq *devfreq;
+ struct icc_path *client;
+ bool is_prfm_gov_used;
+struct bus_set {
+ struct bus_info *bus_tbl;
+enum action_stage {
+ CVP_ON_INIT,
+ CVP_ON_USE,
+ CVP_ON_INVALID,
+enum reset_clk_state {
+ RESET_INIT,
+ RESET_ACQUIRED,
+ RESET_RELEASED,
+struct reset_info {
+ enum action_stage required_stage;
+ enum reset_clk_state state;
+struct reset_set {
+ struct reset_info *reset_tbl;
+struct allowed_clock_rates_table {
+ u32 clock_rate;
+struct clock_profile_entry {
+ u32 codec_mask;
+ u32 vpp_cycles;
+ u32 vsp_cycles;
+ u32 low_power_cycles;
+struct clock_freq_table {
+ struct clock_profile_entry *clk_prof_entries;
+struct subcache_info {
+ bool isactive;
+ bool isset;
+ struct llcc_slice_desc *subcache;
+struct subcache_set {
+ struct subcache_info *subcache_tbl;
+struct msm_cvp_mem_cdsp {
+#define MAX_SILVER_CORE_NUM 8
+#define HFI_SESSION_FD 4
+#define HFI_SESSION_DMM 2
+struct cvp_pm_qos {
+ u32 silver_count;
+ u32 latency_us;
+ u32 off_vote_cnt;
+ int silver_cores[MAX_SILVER_CORE_NUM];
+ struct dev_pm_qos_request *pm_qos_hdls;
+struct cvp_fw_reg_mappings {
+ phys_addr_t ipclite_iova;
+ phys_addr_t ipclite_phyaddr;
+ uint32_t ipclite_size;
+ phys_addr_t hwmutex_iova;
+ phys_addr_t hwmutex_phyaddr;
+ uint32_t hwmutex_size;
+ phys_addr_t aon_iova;
+ phys_addr_t aon_phyaddr;
+ uint32_t aon_size;
+ phys_addr_t timer_iova;
+ phys_addr_t timer_phyaddr;
+ uint32_t timer_size;
+struct msm_cvp_platform_resources {
+ phys_addr_t register_base;
+ phys_addr_t ipcc_reg_base;
+ phys_addr_t gcc_reg_base;
+ uint32_t register_size;
+ uint32_t ipcc_reg_size;
+ uint32_t gcc_reg_size;
+ struct cvp_fw_reg_mappings reg_mappings;
+ uint32_t irq;
+ uint32_t irq_wd;
+ uint32_t sku_version;
+ struct allowed_clock_rates_table *allowed_clks_tbl;
+ u32 allowed_clks_tbl_size;
+ struct clock_freq_table clock_freq_tbl;
+ bool sys_cache_present;
+ bool sys_cache_res_set;
+ struct subcache_set subcache_set;
+ struct reg_set reg_set;
+ struct addr_set qdss_addr_set;
+ uint32_t max_ssr_allowed;
+ struct platform_device *pdev;
+ struct regulator_set regulator_set;
+ struct clock_set clock_set;
+ struct bus_set bus_set;
+ struct reset_set reset_set;
+ bool use_non_secure_pil;
+ bool sw_power_collapsible;
+ bool dsp_enabled;
+ struct list_head context_banks;
+ bool thermal_mitigable;
+ const char *fw_name;
+ const char *hfi_version;
+ bool debug_timeout;
+ struct cvp_pm_qos pm_qos;
+ uint32_t max_inst_count;
+ uint32_t max_secure_inst_count;
+ int msm_cvp_hw_rsp_timeout;
+ int msm_cvp_dsp_rsp_timeout;
+ uint32_t msm_cvp_pwr_collapse_delay;
+ bool non_fatal_pagefaults;
+ bool fatal_ssr;
+ struct msm_cvp_mem_cdsp mem_cdsp;
+ uint32_t fw_cycles;
+ uint32_t qos_noc_rge_niu_offset;
+ uint32_t qos_noc_gce_vadl_tof_niu_offset;
+ uint32_t qos_noc_cdm_niu_offset;
+ uint32_t noc_core_err_offset;
+ uint32_t noc_main_sidebandmanager_offset;
+static inline bool is_iommu_present(struct msm_cvp_platform_resources *res)
+ return !list_empty(&res->context_banks);
+int cvp_of_fdt_get_ddrtype(void);
@@ -0,0 +1,344 @@
+static int cvp_sess_init_synx_v2(struct msm_cvp_inst *inst)
+ struct synx_initialization_params params = { 0 };
+ params.name = "cvp-kernel-client";
+ params.id = SYNX_CLIENT_EVA_CTX0;
+ inst->synx_session_id = synx_initialize(¶ms);
+ if (IS_ERR_OR_NULL(&inst->synx_session_id)) {
+ dprintk(CVP_ERR, "%s synx_initialize failed\n", __func__);
+static int cvp_sess_deinit_synx_v2(struct msm_cvp_inst *inst)
+ dprintk(CVP_ERR, "Used invalid sess in deinit_synx\n");
+ synx_uninitialize(inst->synx_session_id);
+static void cvp_dump_fence_queue_v2(struct msm_cvp_inst *inst)
+ struct synx_session *ssid;
+ ssid = inst->synx_session_id;
+ dprintk(CVP_WARN, "inst %x fence q mode %d, ssid %pK\n",
+ hash32_ptr(inst->session), q->mode, ssid);
+ dprintk(CVP_WARN, "fence cmdq wait list:\n");
+ list_for_each_entry(f, &q->wait_list, list) {
+ dprintk(CVP_WARN, "frame pkt type 0x%x\n", f->pkt->packet_type);
+ for (i = 0; i < f->output_index; i++)
+ dprintk(CVP_WARN, "idx %d client hdl %d, state %d\n",
+ i, f->synx[i],
+ synx_get_status(ssid, f->synx[i]));
+ dprintk(CVP_WARN, "fence cmdq schedule list:\n");
+static int cvp_import_synx_v2(struct msm_cvp_inst *inst,
+ u32 *fence)
+ int rc = 0, rr = 0;
+ struct eva_kmd_fence *fs;
+ struct synx_import_params params = {0};
+ u32 h_synx;
+ fs = (struct eva_kmd_fence *)fence;
+ for (i = 0; i < fc->num_fences; ++i) {
+ h_synx = fs[i].h_synx;
+ if (h_synx) {
+ params.type = SYNX_IMPORT_INDV_PARAMS;
+ params.indv.fence = &h_synx;
+ params.indv.flags = SYNX_IMPORT_SYNX_FENCE
+ | SYNX_IMPORT_LOCAL_FENCE;
+ params.indv.new_h_synx = &fc->synx[i];
+ rc = synx_import(ssid, ¶ms);
+ "%s: %u synx_import failed\n",
+ __func__, h_synx);
+ rr = rc;
+ return rr;
+static int cvp_release_synx_v2(struct msm_cvp_inst *inst,
+ struct cvp_fence_command *fc)
+ h_synx = fc->synx[i];
+ rc = synx_release(ssid, h_synx);
+ "%s: synx_release %d, %d failed\n",
+ __func__, h_synx, i);
+static int cvp_cancel_synx_impl(struct msm_cvp_inst *inst,
+ enum cvp_synx_type type,
+ int synx_state)
+ int start = 0, end = 0;
+ if (type == CVP_INPUT_SYNX) {
+ start = 0;
+ end = fc->output_index;
+ } else if (type == CVP_OUTPUT_SYNX) {
+ start = fc->output_index;
+ end = fc->num_fences;
+ dprintk(CVP_ERR, "%s Incorrect synx type\n", __func__);
+ for (i = start; i < end; ++i) {
+ rc = synx_signal(ssid, h_synx, synx_state);
+ dprintk(CVP_SYNX, "Cancel synx %d session %llx\n",
+ h_synx, inst);
+ "%s: synx_signal %d %d %d failed\n",
+ __func__, h_synx, i, synx_state);
+static int cvp_cancel_synx_v2(struct msm_cvp_inst *inst, enum cvp_synx_type type,
+ struct cvp_fence_command *fc, int synx_state)
+ return cvp_cancel_synx_impl(inst, type, fc, synx_state);
+static int cvp_wait_synx(struct synx_session *ssid, u32 *synx, u32 num_synx,
+ u32 *synx_state)
+ unsigned long timeout_ms = 2000;
+ while (i < num_synx) {
+ h_synx = synx[i];
+ rc = synx_wait(ssid, h_synx, timeout_ms);
+ *synx_state = synx_get_status(ssid, h_synx);
+ if(*synx_state == SYNX_STATE_SIGNALED_SUCCESS)
+ dprintk(CVP_SYNX, "%s: SYNX SIGNAl STATE SUCCESS \n", __func__);
+ rc=0;
+ else if (*synx_state == SYNX_STATE_SIGNALED_CANCEL) {
+ dprintk(CVP_SYNX,
+ "%s: synx_wait %d cancel %d state %d\n",
+ current->comm, i, rc, *synx_state);
+ "%s: synx_wait %d failed %d state %d\n",
+ *synx_state = SYNX_STATE_SIGNALED_CANCEL;
+ rc = 0; /* SYNX_STATE_SIGNALED_SUCCESS = 2 */
+ dprintk(CVP_SYNX, "Wait synx %u returned succes\n",
+ h_synx);
+ ++i;
+static int cvp_signal_synx(struct synx_session *ssid, u32 *synx, u32 num_synx,
+ u32 synx_state)
+ "%s: synx_signal %u %d failed\n",
+ current->comm, h_synx, i);
+ dprintk(CVP_SYNX, "Signaled synx %u state %d\n",
+ h_synx, synx_state);
+static int cvp_synx_ops_v2(struct msm_cvp_inst *inst, enum cvp_synx_type type,
+ struct cvp_fence_command *fc, u32 *synx_state)
+ if (fc->signature == 0xB0BABABE)
+ return cvp_wait_synx(ssid, fc->synx, fc->output_index,
+ synx_state);
+ return cvp_signal_synx(ssid, &fc->synx[fc->output_index],
+ (fc->num_fences - fc->output_index),
+ *synx_state);
+ dprintk(CVP_ERR, "%s Incorrect SYNX type\n", __func__);
+static struct msm_cvp_synx_ops cvp_synx = {
+ .cvp_sess_init_synx = cvp_sess_init_synx_v2,
+ .cvp_sess_deinit_synx = cvp_sess_deinit_synx_v2,
+ .cvp_release_synx = cvp_release_synx_v2,
+ .cvp_import_synx = cvp_import_synx_v2,
+ .cvp_synx_ops = cvp_synx_ops_v2,
+ .cvp_cancel_synx = cvp_cancel_synx_v2,
+ .cvp_dump_fence_queue = cvp_dump_fence_queue_v2,
+static int cvp_sess_init_synx_stub(struct msm_cvp_inst *inst)
+static int cvp_sess_deinit_synx_stub(struct msm_cvp_inst *inst)
+static int cvp_release_synx_stub(struct msm_cvp_inst *inst,
+static int cvp_import_synx_stub(struct msm_cvp_inst *inst,
+static int cvp_synx_ops_stub(struct msm_cvp_inst *inst, enum cvp_synx_type type,
+static int cvp_cancel_synx_stub(struct msm_cvp_inst *inst, enum cvp_synx_type type,
+static void cvp_dump_fence_queue_stub(struct msm_cvp_inst *inst)
+ .cvp_sess_init_synx = cvp_sess_init_synx_stub,
+ .cvp_sess_deinit_synx = cvp_sess_deinit_synx_stub,
+ .cvp_release_synx = cvp_release_synx_stub,
+ .cvp_import_synx = cvp_import_synx_stub,
+ .cvp_synx_ops = cvp_synx_ops_stub,
+ .cvp_cancel_synx = cvp_cancel_synx_stub,
+ .cvp_dump_fence_queue = cvp_dump_fence_queue_stub,
+void cvp_synx_ftbl_init(struct msm_cvp_core *core)
+ /* Synx API version check below if needed */
+ core->synx_ftbl = &cvp_synx;
@@ -0,0 +1,74 @@
+//#ifndef _MSM_CVP_SYNX_H_
+#define _MSM_CVP_SYNX_H_
+#include <synx_api.h>
+#define SYNX_STATE_SIGNALED_SUCCESS 0
+#define SYNX_STATE_SIGNALED_ERROR 0
+#define SYNX_STATE_SIGNALED_CANCEL 0
+struct synx_session {
+ u32 client_id;
+#endif /* end of CVP_SYNX_ENABLED */
+struct msm_cvp_core;
+struct cvp_fence_queue {
+ struct list_head wait_list;
+ struct list_head sched_list;
+struct cvp_fence_command {
+ u64 frame_id;
+ u32 signature;
+ u32 num_fences;
+ u32 output_index;
+ u32 synx[MAX_HFI_FENCE_SIZE];
+enum cvp_synx_type {
+ CVP_UINIT_SYNX,
+ CVP_INPUT_SYNX,
+ CVP_OUTPUT_SYNX,
+ CVP_INVALID_SYNX,
+struct msm_cvp_synx_ops {
+ int (*cvp_sess_init_synx)(struct msm_cvp_inst *inst);
+ int (*cvp_sess_deinit_synx)(struct msm_cvp_inst *inst);
+ int (*cvp_release_synx)(struct msm_cvp_inst *inst,
+ struct cvp_fence_command *fc);
+ int (*cvp_import_synx)(struct msm_cvp_inst *inst,
+ u32 *fence);
+ int (*cvp_synx_ops)(struct msm_cvp_inst *inst,
+ u32 *synx_state);
+ int (*cvp_cancel_synx)(struct msm_cvp_inst *inst,
+ int synx_state);
+ void (*cvp_dump_fence_queue)(struct msm_cvp_inst *inst);
+void cvp_synx_ftbl_init(struct msm_cvp_core *core);
+//#endif
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+#ifndef _CVP_VM_H_
+#define _CVP_VM_H_
+#include "cvp_vm_msgq.h"
+#include "cvp_vm_resource.h"
+enum cvp_vm_id {
+ VM_PRIMARY = 1,
+ VM_TRUSTED = 2,
+ VM_INVALID = 3,
+enum cvp_vm_state {
+ VM_STATE_INIT = 1,
+ VM_STATE_ACTIVE = 2,
+ VM_STATE_ERROR = 3,
+ VM_STATE_INVALID = 4,
+struct msm_cvp_vm_ops {
+ int (*vm_start)(struct msm_cvp_core *core);
+ int (*vm_init_reg_and_irq)(struct iris_hfi_device *device,
+struct msm_cvp_vm_manager {
+ enum cvp_vm_state vm_state;
+ enum cvp_vm_id vm_id;
+ struct cvp_msgq_drv *msgq_drv;
+ struct cvp_vm_resource *vm_rm;
+ struct msm_cvp_vm_ops *vm_ops;
+extern struct msm_cvp_vm_manager vm_manager;
@@ -0,0 +1,181 @@
+#include "cvp_vm.h"
+#define FIRMWARE_SIZE 0X00A00000
+static int msm_cvp_vm_start(struct msm_cvp_core *core);
+static int msm_cvp_vm_init_reg_and_irq(struct iris_hfi_device *device,
+static struct msm_cvp_vm_ops vm_ops = {
+ .vm_start = msm_cvp_vm_start,
+ .vm_init_reg_and_irq = msm_cvp_vm_init_reg_and_irq,
+struct msm_cvp_vm_manager vm_manager = {
+ .msgq_drv = &cvp_ipc_msgq,
+ .vm_rm = &cvp_vm_rm,
+ .vm_ops = &vm_ops,
+static int msm_cvp_vm_start(struct msm_cvp_core *core)
+ __func__, core,
+ (core == NULL)? NULL: core->platform_data);
+ vm_manager.vm_id = core->platform_data->vm_id;
+static int __check_core_registered(struct iris_hfi_device *device,
+ phys_addr_t fw_addr, u8 *reg_addr, u32 reg_size,
+ phys_addr_t irq)
+ dprintk(CVP_INFO, "no device Registered\n");
+ cvp_hal_data = device->cvp_hal_data;
+ if (!cvp_hal_data)
+ if (cvp_hal_data->irq == irq &&
+ (CONTAINS(cvp_hal_data->firmware_base,
+ FIRMWARE_SIZE, fw_addr) ||
+ CONTAINS(fw_addr, FIRMWARE_SIZE,
+ cvp_hal_data->firmware_base) ||
+ CONTAINS(cvp_hal_data->register_base,
+ reg_size, reg_addr) ||
+ CONTAINS(reg_addr, reg_size,
+ cvp_hal_data->register_base) ||
+ OVERLAPS(cvp_hal_data->register_base,
+ reg_size, reg_addr, reg_size) ||
+ OVERLAPS(reg_addr, reg_size,
+ cvp_hal_data->register_base,
+ reg_size) ||
+ OVERLAPS(cvp_hal_data->firmware_base,
+ FIRMWARE_SIZE, fw_addr,
+ FIRMWARE_SIZE) ||
+ OVERLAPS(fw_addr, FIRMWARE_SIZE,
+ cvp_hal_data->firmware_base,
+ FIRMWARE_SIZE))) {
+ dprintk(CVP_INFO, "Device not registered\n");
+ struct cvp_hal_data *hal = NULL;
+ if (vm_manager.vm_id == VM_TRUSTED)
+ rc = __check_core_registered(device, res->firmware_base,
+ (u8 *)(uintptr_t)res->register_base,
+ res->register_size, res->irq);
+ dprintk(CVP_ERR, "Core present/Already added\n");
+ rc = -EEXIST;
+ hal = kzalloc(sizeof(*hal), GFP_KERNEL);
+ if (!hal) {
+ dprintk(CVP_ERR, "Failed to alloc\n");
+ hal->irq = res->irq;
+ hal->irq_wd = res->irq_wd;
+ hal->firmware_base = res->firmware_base;
+ hal->register_base = devm_ioremap(&res->pdev->dev,
+ res->register_base, res->register_size);
+ hal->register_size = res->register_size;
+ if (!hal->register_base) {
+ "could not map reg addr %pa of size %d\n",
+ &res->register_base, res->register_size);
+ goto error_irq_fail;
+ if (res->gcc_reg_base) {
+ hal->gcc_reg_base = devm_ioremap(&res->pdev->dev,
+ res->gcc_reg_base, res->gcc_reg_size);
+ hal->gcc_reg_size = res->gcc_reg_size;
+ if (!hal->gcc_reg_base)
+ "could not map gcc reg addr %pa of size %d\n",
+ &res->gcc_reg_base, res->gcc_reg_size);
+ device->cvp_hal_data = hal;
+ rc = request_threaded_irq(res->irq, cvp_hfi_isr, iris_hfi_core_work_handler,
+ IRQF_TRIGGER_HIGH, "msm_cvp", device);
+ if (unlikely(rc)) {
+ dprintk(CVP_ERR, "%s: request_irq failed rc: %d\n", __func__, rc);
+ rc = request_irq(res->irq_wd, iris_hfi_isr_wd, IRQF_TRIGGER_HIGH,
+ "msm_cvp", device);
+ dprintk(CVP_ERR, "() :request_irq for WD failed\n");
+ disable_irq_nosync(res->irq);
+ "firmware_base = %pa, register_base = %pa, register_size = %d\n",
+ &res->firmware_base, &res->register_base,
+ res->register_size);
+error_irq_fail:
+ kfree(hal);
@@ -0,0 +1,341 @@
+ * cvp_msgq_receiver - thread function that receive msg from gunyah msgq
+ * data: cvp_msgq_drv pointer
+ * Note: single thread. If the sub-function or global data used in this
+ * function is also used somehwere else, please add rx_lock.
+static int cvp_msgq_receiver(void *data)
+ struct cvp_msgq_drv *msgq_drv = data;
+ struct cvp_ipc_msg *msg_ptr;
+ size_t size;
+ bool is_resp;
+ * true: response received from remote VM, cmd initiated from LOCAL VM;
+ * false: cmd initiated from REMOTE VM;
+ int rc = -1;
+ if (IS_ERR_OR_NULL(msgq_drv))
+ msg_ptr = kzalloc(sizeof(*msg_ptr), GFP_KERNEL);
+ if (!msg_ptr) {
+ dprintk(CVP_ERR, "%s: fail to allocate mem\n", __func__);
+ while (true) {
+ rc = gh_msgq_recv(msgq_drv->config.handle, msg_ptr,
+ sizeof(*msg_ptr), &size, 0);
+ if (rc != 0 ) {
+ "%s: gh_msgq_recv fail rc=%d handle=%#x msg_ptr=%#x\n",
+ __func__, rc, msgq_drv->config.handle, msg_ptr);
+ if (rc != -EAGAIN) {
+ kfree(msg_ptr);
+ is_resp = (msg_ptr->type &
+ CVP_IPC_MSG_TYPE_DIR_CHECK) ? true : false;
+ if (is_resp == false) {
+ dprintk(CVP_VM,
+ "%s: gh_msgq_recv cmd from remote VM\n",
+ if (msgq_drv->pending_local_cmd.type == 0) {
+ /* copy ipc message to local cmd */
+ memcpy(&msgq_drv->pending_local_cmd,
+ msg_ptr, sizeof(struct cvp_ipc_msg));
+ /* toggle the direction bit*/
+ msgq_drv->pending_local_cmd.type ^=
+ CVP_IPC_MSG_TYPE_DIR_CHECK;
+ /* TODO: call client function ptr to process */
+ memcpy(msg_ptr, &msgq_drv->pending_local_cmd,
+ sizeof(struct cvp_ipc_msg));
+ /* 4: elements before actual data in cvp_ipc_msg*/
+ size = (4 + msgq_drv->pending_local_cmd.len)<<2;
+ /* sanity check on size information */
+ if (size > GH_MSGQ_MAX_MSG_SIZE_BYTES) {
+ "%s: msg size %d exceed max size supported %d \n",
+ __func__, size, GH_MSGQ_MAX_MSG_SIZE_BYTES);
+ rc = -E2BIG;
+ msgq_drv->pending_local_cmd.type = 0;
+ /* send it back to the remote VM as response */
+ rc = gh_msgq_send(msgq_drv->config.handle,
+ msg_ptr, size, GH_MSGQ_TX_PUSH);
+ "%s: failed gh_msgq_send rc %d \n",
+ /* flag the source is released */
+ "%s: Msg rejected, local cmd in use type %d\n",
+ __func__, msgq_drv->pending_local_cmd.type);
+ "%s: gh_msgq_recv respond type from remote VM\n",
+ if ((msg_ptr->type & CVP_IPC_MSG_TYPE_ACT_CHECK) !=
+ msgq_drv->pending_remote_rsp.type) {
+ "%s: Msg disgard,recv type %d, pend local %d\n",
+ __func__, msg_ptr->type,
+ msgq_drv->pending_remote_rsp.type);
+ /* memcpy received data to pending_remote_rsp */
+ memcpy(&msgq_drv->pending_remote_rsp, msg_ptr,
+ /* clear direction bit of pending_remote_rsp */
+ msgq_drv->pending_remote_rsp.type &=
+ (~CVP_IPC_MSG_TYPE_DIR_CHECK);
+ /* complete for cmd initiated from local VM */
+ complete(&msgq_drv->completions[
+ msgq_drv->pending_remote_rsp.type - 1]);
+static int cvp_complete_msgq_init(struct cvp_msgq_drv *msgq_drv)
+ msgq_drv->receiver_thread = kthread_run(
+ cvp_msgq_receiver,
+ (void *)msgq_drv,
+ "CVP msgq receiver");
+ if (IS_ERR_OR_NULL(msgq_drv->receiver_thread)) {
+ dprintk(CVP_ERR, "Failed to start msgq receiver thread\n");
+ mutex_init(&msgq_drv->ipc_lock);
+ for (i = 0; i <= (CVP_MAX_IPC_CMD - 1); i++)
+ init_completion(&msgq_drv->completions[i]);
+#ifndef CONFIG_EVA_TVM
+static int cvp_msgq_cb(struct notifier_block *nb,
+ unsigned long cmd, void *data)
+ struct gh_rm_notif_vm_status_payload *vm_status_payload;
+ struct cvp_gh_msgq_config *msgq_config;
+ gh_vmid_t peer_vmid;
+ gh_vmid_t self_vmid;
+ if (IS_ERR_OR_NULL(nb))
+ msgq_config = container_of(nb, struct cvp_gh_msgq_config, rm_nb);
+ msgq_drv = container_of(msgq_config, struct cvp_msgq_drv, config);
+ if (cmd != GH_RM_NOTIF_VM_STATUS)
+ return NOTIFY_DONE;
+ * Check VM status, only GH_TRUSTED_VM notification activate
+ * Gunyah msgq registration
+ vm_status_payload = (struct gh_rm_notif_vm_status_payload *)data;
+ if (vm_status_payload->vm_status != GH_RM_VM_STATUS_READY)
+ return -12;
+ if (ghd_rm_get_vmid(msgq_config->peer_id, &peer_vmid))
+ return -13;
+ if (ghd_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
+ return -14;
+ if (peer_vmid != vm_status_payload->vmid)
+ dprintk(CVP_VM, "%s: vmid=%d, peer_vmid=%d\n",
+ __func__, vm_status_payload->vmid, peer_vmid);
+ if (msgq_config->handle)
+ return -15;
+ msgq_config->handle = gh_msgq_register(GH_MSGQ_LABEL_EVA);
+ if (IS_ERR(msgq_config->handle)) {
+ rc = PTR_ERR(msgq_drv->config.handle);
+ dprintk(CVP_ERR, "PVM failed to register msgq %d\n", rc);
+ dprintk(CVP_VM, "%s: gh_msgq_register handle: %x\n",
+ __func__, msgq_config->handle);
+ rc = cvp_complete_msgq_init(msgq_drv);
+static int cvp_msgq_init(struct cvp_msgq_drv *msgq_drv)
+ msgq_drv->config.label = GH_MSGQ_LABEL_EVA;
+ msgq_drv->config.handle = NULL;
+ /* PVM init */
+ msgq_drv->config.peer_id = GH_TRUSTED_VM;
+ msgq_drv->config.rm_nb.notifier_call = cvp_msgq_cb;
+ rc = gh_rm_register_notifier(&msgq_drv->config.rm_nb);
+ dprintk(CVP_ERR, "PVM Fail register msgq notifier %d\n", rc);
+ dprintk(CVP_VM, "%s: gh_rm_register_notifier\n", __func__);
+ /* TVM init */
+ msgq_drv->config.handle = gh_msgq_register(GH_MSGQ_LABEL_EVA);
+ if (IS_ERR(msgq_drv->config.handle)) {
+ dprintk(CVP_ERR, "TVM failed to register msgq %d\n", rc);
+static int cvp_msgq_deinit(struct cvp_msgq_drv *msgq_drv)
+ if (msgq_drv->receiver_thread)
+ kthread_stop(msgq_drv->receiver_thread);
+static int cvp_msgq_send_cmd(struct cvp_msgq_drv *msgq_drv,
+ void *msg, size_t msg_size)
+ struct cvp_ipc_msg *msg_ptr = msg;
+ if (!msgq_drv->config.handle) {
+ dprintk(CVP_ERR, "%s: Invalid msgq handle\n", __func__);
+ goto err_param_check;
+ if (msg_size > GH_MSGQ_MAX_MSG_SIZE_BYTES) {
+ __func__, msg_size, GH_MSGQ_MAX_MSG_SIZE_BYTES);
+ mutex_lock(&msgq_drv->ipc_lock);
+ /* init case: only allow sending msg sequentially */
+ if (msgq_drv->pending_remote_rsp.type &
+ CVP_IPC_MSG_TYPE_ACT_CHECK) {
+ rc = -EPERM;
+ "%s: Msg rejected, local rsp occupied.\n",
+ goto err_valid_check;
+ /* book keeping type bits in pending_remote_rsp */
+ msgq_drv->pending_remote_rsp.type = msg_ptr->type;
+ msg_ptr, msg_size, GH_MSGQ_TX_PUSH);
+ "%s: failed with gh_msgq_send with rc %d \n",
+ goto err_gh_send;
+ /* wait for completion */
+ &msgq_drv->completions[msgq_drv->pending_remote_rsp.type - 1],
+ msecs_to_jiffies(CVP_VM_RESPONSE_TIMEOUT))) {
+ dprintk(CVP_ERR, "%s cvp ipc msg type %d timeout\n",
+ __func__, msgq_drv->pending_remote_rsp.type-1);
+ /* copy pending_remote_rsp content to msg (inout param)*/
+ memcpy(msg, &msgq_drv->pending_remote_rsp,
+ /* clear type bits to indicate resource is avaialbel */
+ msgq_drv->pending_remote_rsp.type = 0;
+ mutex_unlock(&msgq_drv->ipc_lock);
+err_gh_send:
+err_valid_check:
+err_param_check:
+static struct cvp_msgq_ops msgq_ops = {
+ .msgq_init = cvp_msgq_init,
+ .msgq_deinit = cvp_msgq_deinit,
+ .msgq_send = cvp_msgq_send_cmd,
+ .msgq_receive = NULL,
+struct cvp_msgq_drv cvp_ipc_msgq = {
+ .ops = &msgq_ops,
@@ -0,0 +1,77 @@
+#ifndef _CVP_VM_MSGQ_H_
+#define _CVP_VM_MSGQ_H_
+#include <linux/gunyah/gh_msgq.h>
+#define MAX_CVP_IPC_LEN 16
+#define CVP_VM_RESPONSE_TIMEOUT 300
+#define CVP_IPC_MSG_TYPE_DIR_CHECK 0x10000000 /* direction check */
+#define CVP_IPC_MSG_TYPE_ACT_CHECK 0x00000011 /* action check */
+enum CVP_IPC_MSG_TYPE {
+ REQUEST_SESS_CTRL = 1,
+ RELEASE_SESS_CTRL = 2,
+ REQUEST_EVA_RESET = 3,
+ RECLAIM_SESS_CTRL = 4, /* Only PVM can reclaim sesession control */
+ CVP_MAX_IPC_CMD = 5,
+struct cvp_ipc_msg {
+ /* type format:
+ * bit 31: 0->Initiated command; 1->Response to remote command
+ * bit 2~0: CVP_IPC_MSG_TYPE
+ uint32_t resv;
+ uint32_t data[MAX_CVP_IPC_LEN];
+struct cvp_gh_msgq_config {
+ int peer_id;
+ int label;
+ void *handle;
+ struct notifier_block rm_nb;
+struct cvp_msgq_ops;
+struct cvp_msgq_drv {
+ struct mutex ipc_lock; /* Mutex for sending MSG */
+ struct cvp_gh_msgq_config config;
+ struct task_struct *receiver_thread;
+ struct completion completions[CVP_MAX_IPC_CMD + 1];
+ * pending_local_cmd: the command is being processed locally.
+ * The command is a request sent from remote VM
+ struct cvp_ipc_msg pending_local_cmd;
+ * pending_remote_rsp: the command is being processing remotely.
+ * The command is a request sent by local VM
+ struct cvp_ipc_msg pending_remote_rsp;
+ struct cvp_msgq_ops *ops;
+struct cvp_msgq_ops {
+ int (*msgq_init)(struct cvp_msgq_drv *msgq_drv);
+ int (*msgq_send)(struct cvp_msgq_drv *msgq_drv, void *msg,
+ size_t msg_size);
+ int (*msgq_receive)(struct cvp_msgq_drv *msgq_drv);
+ int (*msgq_deinit)(struct cvp_msgq_drv *msgq_drv);
+extern struct cvp_msgq_drv cvp_ipc_msgq;
@@ -0,0 +1,8 @@
+struct cvp_vm_resource cvp_vm_rm;
+#ifndef _CVP_VM_RESOURCE_H_
+#define _CVP_VM_RESOURCE_H_
+struct cvp_vm_resource {
+ int reserved;
+extern struct cvp_vm_resource cvp_vm_rm;
+load(":eva_modules.bzl", "eva_modules")
+load(":eva_module_build.bzl", "define_consolidate_gki_modules")
+def define_pineapple():
+ define_consolidate_gki_modules(
+ target = "pineapple",
+ registry = eva_modules,
+ modules = [
+ "msm-eva",
+ config_options = [
+ #"CONFIG_TARGET_SYNX_ENABLE",
+ "TARGET_SYNX_ENABLE",
+ "TARGET_DSP_ENABLE",
+ "CONFIG_EVA_PINEAPPLE"