Просмотр исходного кода

Merge 08b81d21083504db35a4ff949c5f75695116e3cc on remote branch

Change-Id: Ib8348135121f6f7f1505a31cca7324768fd3f9a4
Linux Build Service Account 2 лет назад
Родитель
Сommit
2b7fe69bbb

+ 6 - 0
Android.bp

@@ -40,3 +40,9 @@ cc_library_headers {
     vendor: true,
     recovery_available: true
 }
+
+cc_library_headers {
+    name: "smmu_proxy_uapi_header",
+    vendor_available: true,
+    export_include_dirs: ["smmu-proxy/uapi/"],
+}

+ 13 - 0
Android.mk

@@ -122,4 +122,17 @@ include $(DLKM_DIR)/Build_external_kernelmodule.mk
 endif #TARGET_ENABLE_QSEECOM OR TARGET_BOARD_AUTO
 ###################################################
 ###################################################
+ifeq ($(TARGET_USES_SMMU_PROXY), true)
+include $(CLEAR_VARS)
+#LOCAL_SRC_FILES           := $(SSG_SRC_FILES)
+LOCAL_EXPORT_KO_INCLUDE_DIRS := $(LOCAL_PATH)/smmu-proxy/ $(LOCAL_PATH)/
+LOCAL_MODULE              := smmu_proxy_dlkm.ko
+LOCAL_MODULE_KBUILD_NAME  := smmu_proxy_dlkm.ko
+LOCAL_MODULE_TAGS         := optional
+LOCAL_MODULE_DEBUG_ENABLE := true
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+endif
+###################################################
+###################################################
 endif #COMPILE_SECUREMSM_DLKM check

+ 31 - 0
BUILD.bazel

@@ -0,0 +1,31 @@
+package(
+    default_visibility = [
+        "//visibility:public",
+    ],
+)
+
+load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
+
+ddk_headers(
+    name = "smcinvoke_kernel_headers",
+    hdrs = glob([
+        "include/linux/smcinvoke*.h",
+        "include/linux/IClientE*.h",
+        "linux/misc/qseecom_kernel.h",
+        "linux/misc/qseecom_priv.h"
+    ]),
+    includes = ["include/linux", "linux", "include"]
+)
+
+ddk_headers(
+    name = "securemsm_kernel_headers",
+    hdrs = [
+        "linux/misc/qseecom_kernel.h",
+        "linux/misc/qseecom_priv.h"
+    ],
+    includes = ["linux"]
+)
+
+load("pineapple.bzl", "define_pineapple")
+
+define_pineapple()

+ 13 - 0
Kbuild

@@ -39,3 +39,16 @@ hdcp_qseecom_dlkm-objs := hdcp/hdcp_qseecom.o
 
 obj-$(CONFIG_HW_RANDOM_MSM_LEGACY) += qrng_dlkm.o
 qrng_dlkm-objs := qrng/msm_rng.o
+
+ifneq (, $(filter y, $(ARCH_QTI_VM) $(CONFIG_ARCH_PINEAPPLE)))
+    include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_smmu_proxy.conf
+    LINUXINCLUDE += -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_smmu_proxy.h
+
+    obj-$(CONFIG_QTI_SMMU_PROXY) += smmu_proxy_dlkm.o
+    smmu_proxy_dlkm-objs := smmu-proxy/qti-smmu-proxy-common.o
+    ifneq ($(CONFIG_ARCH_QTI_VM), y)
+    smmu_proxy_dlkm-objs += smmu-proxy/qti-smmu-proxy-pvm.o
+    else
+    smmu_proxy_dlkm-objs += smmu-proxy/qti-smmu-proxy-tvm.o
+    endif
+endif

+ 1 - 0
config/sec-kernel_defconfig_smmu_proxy.conf

@@ -0,0 +1 @@
+export CONFIG_QTI_SMMU_PROXY=m

+ 6 - 0
config/sec-kernel_defconfig_smmu_proxy.h

@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define  CONFIG_QTI_SMMU_PROXY 1

+ 12 - 0
pineapple.bzl

@@ -0,0 +1,12 @@
+load(":securemsm_kernel.bzl", "define_consolidate_gki_modules")
+
+def define_pineapple():
+    define_consolidate_gki_modules(
+        target = "pineapple",
+        modules = [
+            "smcinvoke_dlkm",
+            "tz_log_dlkm",
+        ],
+        extra_options = [
+            "CONFIG_QCOM_SMCINVOKE"]
+    )

+ 20 - 2
qseecom/qseecom.c

@@ -46,6 +46,7 @@
 #include <linux/of_reserved_mem.h>
 #include <linux/qtee_shmbridge.h>
 #include <linux/mem-buf.h>
+#include <linux/version.h>
 #include "ice.h"
 #if IS_ENABLED(CONFIG_QSEECOM_PROXY)
 #include <linux/qseecom_kernel.h>
@@ -54,6 +55,10 @@
 #include "misc/qseecom_kernel.h"
 #endif
 
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(6,0,0))
+#define KERNEL_VERSION_LEGACY
+#endif
+
 #define QSEECOM_DEV			"qseecom"
 #define QSEOS_VERSION_14		0x14
 #define QSEEE_VERSION_00		0x400000
@@ -1382,7 +1387,11 @@ static int qseecom_vaddr_map(int ion_fd,
 {
 	struct dma_buf *new_dma_buf = NULL;
 	struct dma_buf_attachment *new_attach = NULL;
+#ifdef KERNEL_VERSION_LEGACY
 	struct dma_buf_map new_dma_buf_map = {0};
+#else
+	struct iosys_map new_dma_buf_map = {0};
+#endif
 	struct sg_table *new_sgt = NULL;
 	void *new_va = NULL;
 	int ret = 0;
@@ -1424,11 +1433,15 @@ static void qseecom_vaddr_unmap(void *vaddr, struct sg_table *sgt,
 		struct dma_buf_attachment *attach,
 		struct dma_buf *dmabuf)
 {
-   struct dma_buf_map  dmabufmap = DMA_BUF_MAP_INIT_VADDR(vaddr);
+#ifdef KERNEL_VERSION_LEGACY
+	struct dma_buf_map  dmabufmap = DMA_BUF_MAP_INIT_VADDR(vaddr);
+#else
+	struct iosys_map  dmabufmap = IOSYS_MAP_INIT_VADDR(vaddr);
+#endif
 
 	if (!dmabuf || !vaddr || !sgt || !attach)
 		return;
-	pr_err("SMITA trying to unmap vaddr");
+	pr_err("Trying to unmap vaddr");
 	dma_buf_vunmap(dmabuf, &dmabufmap);
 	dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
 	qseecom_dmabuf_unmap(sgt, attach, dmabuf);
@@ -2605,7 +2618,9 @@ err_resp:
 					data->client.app_name, resp->data);
 				goto exit;
 			}
+			fallthrough;
 		case QSEOS_RESULT_SUCCESS:
+			break;
 		case QSEOS_RESULT_INCOMPLETE:
 			break;
 		case QSEOS_RESULT_CBACK_REQUEST:
@@ -3657,6 +3672,7 @@ static int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
 			data->client.app_id, data->client.app_name, resp->data);
 			return ret;
 		}
+		fallthrough;
 		/* fall through to process incomplete request */
 	case QSEOS_RESULT_INCOMPLETE:
 		qseecom.app_block_ref_cnt++;
@@ -3676,6 +3692,7 @@ static int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
 						resp->result);
 		return -EINVAL;
 	}
+	return ret;
 }
 
 static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
@@ -9837,6 +9854,7 @@ static void qseecom_exit(void)
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
+MODULE_IMPORT_NS(DMA_BUF);
 
 module_init(qseecom_init);
 module_exit(qseecom_exit);

+ 107 - 0
securemsm_kernel.bzl

@@ -0,0 +1,107 @@
+load("//build/kernel/kleaf:kernel.bzl", "kernel_modules_install",
+                                        "ddk_module")
+load(":securemsm_modules.bzl", "securemsm_modules",
+                           "securemsm_modules_by_config")
+load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
+
+def _replace_formatting_codes(target, variant, s):
+    kernel_build = "{}_{}".format(target, variant)
+
+    return s.replace("%b", kernel_build).replace("%t", target)
+
+def _console_print(target, variant, module, message):
+    if module:
+        print('{}: {}: securemsm-kernel: {}: {}'.format(target, variant, module, message))
+    else:
+        print('{}: {}: securemsm-kernel: {} '.format(target, variant, message))
+
+def _get_options(target, variant, target_config_option, modules, extra_options):
+    all_options = {option: True for option in extra_options}
+
+    redundant_options = []
+
+    for option in securemsm_modules_by_config:
+        module_name = securemsm_modules_by_config[option]
+
+        if option in all_options:
+            if module_name in modules:
+                redundant_options.append(option)
+            else:
+                _console_print(target, variant, None, 'WARNING: Config option "{}" corresponds to securemsm module {}, but this module is not listed in module list!'.format(option, module_name))
+        else:
+            all_options[option] = True
+
+    if target_config_option in all_options:
+        redundant_options.append(target_config_option)
+    else:
+        all_options[target_config_option] = True
+
+    if redundant_options:
+        _console_print(target, variant, None, 'INFO: The following options are already declared either by a module or the target, no need to redeclare: \n{}'.format('\n'.join(redundant_options)))
+
+    return all_options
+
+def _get_module_srcs(target, variant, module, options):
+    srcs = [] + module["default_srcs"] + module["srcs"]
+    module_path = "{}/".format(module["path"]) if module["path"] else ""
+
+    for option in module["config_srcs"]:
+        srcs.extend(module["config_srcs"][option].get(option in options, []))
+
+    globbed_srcs = native.glob(["{}{}".format(module_path, _replace_formatting_codes(target, variant, src)) for src in srcs])
+
+    if not globbed_srcs:
+        _console_print(target, variant, module["name"], 'WARNING: Module has no sources attached!')
+
+    return globbed_srcs
+
+def define_target_variant_modules(target, variant, modules, extra_options = [], config_option = None):
+    kernel_build_variant = "{}_{}".format(target, variant)
+    options = _get_options(target, variant, config_option, modules, extra_options)
+    module_rules = []
+    target_local_defines = []
+    modules = [securemsm_modules[module_name] for module_name in modules]
+    tv = "{}_{}".format(target, variant)
+
+    target_local_defines = ["SMCINVOKE_TRACE_INCLUDE_PATH=../../../{}/smcinvoke".format(native.package_name())]
+
+    for config in extra_options:
+        target_local_defines.append(config)
+    for module in modules:
+        rule_name = "{}_{}".format(kernel_build_variant, module["name"])
+        module_srcs = _get_module_srcs(target, variant, module, options)
+
+        ddk_module(
+            name = rule_name,
+            kernel_build = "//msm-kernel:{}".format(kernel_build_variant),
+            srcs = module_srcs,
+            out = "{}.ko".format(module["name"]),
+            deps = ["//msm-kernel:all_headers"] + [_replace_formatting_codes(target, variant, dep) for dep in module["deps"]],
+            hdrs = module["hdrs"],
+            local_defines = target_local_defines,
+            copts = module["copts"]
+
+        )
+        module_rules.append(rule_name)
+
+    copy_to_dist_dir(
+          name = "{}_securemsm-kernel_dist".format(kernel_build_variant),
+          data = module_rules,
+          dist_dir = "out/target/product/{}/dlkm/lib/modules/".format(target),
+          flat = True,
+          wipe_dist_dir = False,
+          allow_duplicate_filenames = False,
+          mode_overrides = {"**/*": "644"},
+          log = "info",
+    )
+
+
+    kernel_modules_install(
+        name = "{}_modules_install".format(kernel_build_variant),
+        kernel_build = "//msm-kernel:{}".format(kernel_build_variant),
+        kernel_modules = module_rules
+    )
+
+def define_consolidate_gki_modules(target, modules, extra_options = [], config_option = None):
+    define_target_variant_modules(target, "consolidate", modules, extra_options, config_option)
+    define_target_variant_modules(target, "gki", modules, extra_options, config_option)

+ 4 - 0
securemsm_kernel_product_board.mk

@@ -17,6 +17,10 @@ PRODUCT_PACKAGES += hdcp_qseecom_dlkm.ko
 PRODUCT_PACKAGES += qrng_dlkm.ko
 PRODUCT_PACKAGES += smcinvoke_dlkm.ko
 
+ifeq ($(TARGET_USES_SMMU_PROXY), true)
+PRODUCT_PACKAGES += smmu_proxy_dlkm.ko
+endif
+
 #Enable Qseecom if TARGET_ENABLE_QSEECOM or TARGET_BOARD_AUTO is set to true
 ifneq (, $(filter true, $(TARGET_ENABLE_QSEECOM) $(TARGET_BOARD_AUTO)))
 PRODUCT_PACKAGES += qseecom_dlkm.ko

+ 4 - 0
securemsm_kernel_vendor_board.mk

@@ -24,6 +24,10 @@ BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko
 BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko
 BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko
 
+ifeq ($(TARGET_USES_SMMU_PROXY), true)
+BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smmu_proxy_dlkm.ko
+endif
+
 #Enable Qseecom if TARGET_ENABLE_QSEECOM or TARGET_BOARD_AUTO is set to true
 ifneq (, $(filter true, $(TARGET_ENABLE_QSEECOM) $(TARGET_BOARD_AUTO)))
 BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/qseecom_dlkm.ko

+ 78 - 0
securemsm_modules.bzl

@@ -0,0 +1,78 @@
+SMCINVOKE_PATH = "smcinvoke"
+QSEECOM_PATH = "qseecom"
+TZLOG_PATH = "tz_log"
+
+# This dictionary holds all the securemsm-kernel  modules included by calling register_securemsm_module
+securemsm_modules = {}
+securemsm_modules_by_config = {}
+
+# Registers securemsm module to kernel build system.
+# name: The name of the module. The name of the file generated for this module will be {name}.ko.
+# path: The path that will be prepended to all sources listed for this module.
+# config_option: If this module is enabled, the config optiont that will get enabled if so. Not all modules have this, and this is an optional parameter.
+# config_srcs: A dictionary of sources to be added to the module depending on if a configuration option is enabled or not. The keys to the dictionary are
+# the name of the config option, and the value depends If it is a list, it will just be the list of sources to be added to the module if the config option
+# is enabled. If the value is another dictionary, then you can specify sources to be added if the config option is DISABLED by having a list under the
+# default_srcs: A list of sources to be added to the module regardless of configuration options.
+# deps: A list of kernel_module or ddk_module rules that this module depends on.
+
+def register_securemsm_module(name, path = None, config_option = None, default_srcs = [], config_srcs = {}, deps = [], srcs = [], copts = [], hdrs = []):
+    processed_config_srcs = {}
+
+    for config_src_name in config_srcs:
+        config_src = config_srcs[config_src_name]
+
+        if type(config_src) == "list":
+            processed_config_srcs[config_src_name] = { True: config_src }
+        else:
+            processed_config_srcs[config_src_name] = config_src
+
+    module = {
+        "name": name,
+        "path": path,
+        "default_srcs": default_srcs,
+        "config_srcs": processed_config_srcs,
+        "config_option": config_option,
+        "deps": deps,
+        "copts": copts,
+        "srcs": srcs,
+        "hdrs": hdrs,
+    }
+
+    securemsm_modules[name] = module
+
+    if config_option:
+        securemsm_modules_by_config[config_option] = name
+
+
+# ------------------------------------ SECUREMSM MODULE DEFINITIONS ---------------------------------
+register_securemsm_module(
+    name = "smcinvoke_dlkm",
+    path = SMCINVOKE_PATH,
+    default_srcs = [
+            "smcinvoke.c",
+            "smcinvoke_kernel.c",
+            "trace_smcinvoke.h",
+            "IQSEEComCompat.h",
+            "IQSEEComCompatAppLoader.h",
+
+    ],
+    deps = [":smcinvoke_kernel_headers"],
+    hdrs = [":smcinvoke_kernel_headers"],
+)
+
+register_securemsm_module(
+    name = "qseecom_dlkm",
+    path = QSEECOM_PATH,
+    default_srcs = ["qseecom.c",
+                    "ice.h"],
+    deps = [":securemsm_kernel_headers"],
+    srcs = ["config/sec-kernel_defconfig_qseecom.h"],
+    copts = ["-include", "config/sec-kernel_defconfig_qseecom.h"],
+)
+
+register_securemsm_module(
+    name = "tz_log_dlkm",
+    path = TZLOG_PATH,
+    default_srcs = ["tz_log.c"],
+)

+ 99 - 60
smcinvoke/smcinvoke.c

@@ -377,7 +377,8 @@ struct smcinvoke_worker_thread {
 static struct smcinvoke_worker_thread smcinvoke[MAX_THREAD_NUMBER];
 static const char thread_name[MAX_THREAD_NUMBER][MAX_CHAR_NAME] = {
 	"smcinvoke_shmbridge_postprocess", "smcinvoke_object_postprocess", "smcinvoke_adci_thread"};
-static struct Object adci_clientEnv = Object_NULL;
+static struct Object adci_rootEnv = Object_NULL;
+extern int get_root_obj(struct Object *rootObj);
 
 static int prepare_send_scm_msg(const uint8_t *in_buf, phys_addr_t in_paddr,
 		size_t in_buf_len,
@@ -524,7 +525,7 @@ static void smcinvoke_shmbridge_post_process(void)
 			do {
 				ret = qtee_shmbridge_deregister(handle);
 				if (unlikely(ret)) {
-					pr_err("SHM failed: ret:%d ptr:0x%x h:%#llx\n",
+					pr_err_ratelimited("SHM failed: ret:%d ptr:0x%x h:%#llx\n",
 							ret,
 							dmabuf_to_free,
 							handle);
@@ -538,17 +539,45 @@ static void smcinvoke_shmbridge_post_process(void)
 	} while (1);
 }
 
-static int smcinvoke_object_post_process(void)
+static int smcinvoke_release_tz_object(struct qtee_shm *in_shm, struct qtee_shm *out_shm,
+		uint32_t tzhandle, uint32_t context_type)
 {
-	struct smcinvoke_object_release_pending_list *entry = NULL;
-	struct list_head *pos;
 	int ret = 0;
 	bool release_handles;
-	uint32_t context_type;
 	uint8_t *in_buf = NULL;
-	uint8_t *out_buf = NULL;
-	struct smcinvoke_cmd_req req = {0};
+        uint8_t *out_buf = NULL;
 	struct smcinvoke_msg_hdr hdr = {0};
+	struct smcinvoke_cmd_req req = {0};
+
+	in_buf = in_shm->vaddr;
+	out_buf = out_shm->vaddr;
+	hdr.tzhandle = tzhandle;
+	hdr.op = OBJECT_OP_RELEASE;
+	hdr.counts = 0;
+	*(struct smcinvoke_msg_hdr *)in_buf = hdr;
+
+	ret = prepare_send_scm_msg(in_buf, in_shm->paddr,
+			SMCINVOKE_TZ_MIN_BUF_SIZE, out_buf, out_shm->paddr,
+			SMCINVOKE_TZ_MIN_BUF_SIZE, &req, NULL,
+			&release_handles, context_type, in_shm, out_shm);
+	process_piggyback_data(out_buf, SMCINVOKE_TZ_MIN_BUF_SIZE);
+	if (ret) {
+		pr_err_ratelimited("Failed to release object(0x%x), ret:%d\n",
+				hdr.tzhandle, ret);
+	} else {
+		pr_debug("Released object(0x%x) successfully.\n",
+				hdr.tzhandle);
+	}
+
+	return ret;
+}
+
+
+static int smcinvoke_object_post_process(void)
+{
+	struct smcinvoke_object_release_pending_list *entry = NULL;
+	struct list_head *pos;
+	int ret = 0;
 	struct qtee_shm in_shm = {0}, out_shm = {0};
 
 	ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &in_shm);
@@ -573,37 +602,19 @@ static int smcinvoke_object_post_process(void)
 		}
 		pos = g_object_postprocess.next;
 		entry = list_entry(pos, struct smcinvoke_object_release_pending_list, list);
-		if (entry) {
-			in_buf = in_shm.vaddr;
-			out_buf = out_shm.vaddr;
-			hdr.tzhandle = entry->data.tzhandle;
-			hdr.op = OBJECT_OP_RELEASE;
-			hdr.counts = 0;
-			*(struct smcinvoke_msg_hdr *)in_buf = hdr;
-			context_type = entry->data.context_type;
-		} else {
-			pr_err("entry is NULL, pos:%#llx\n", (uint64_t)pos);
-		}
+
 		list_del(pos);
-		kfree_sensitive(entry);
 		mutex_unlock(&object_postprocess_lock);
 
 		if (entry) {
 			do {
-				ret = prepare_send_scm_msg(in_buf, in_shm.paddr,
-					SMCINVOKE_TZ_MIN_BUF_SIZE, out_buf, out_shm.paddr,
-					SMCINVOKE_TZ_MIN_BUF_SIZE, &req, NULL,
-					&release_handles, context_type, &in_shm, &out_shm);
-				process_piggyback_data(out_buf, SMCINVOKE_TZ_MIN_BUF_SIZE);
-				if (ret) {
-					pr_err("Failed to release object(0x%x), ret:%d\n",
-								hdr.tzhandle, ret);
-				} else {
-					pr_debug("Released object(0x%x) successfully.\n",
-									hdr.tzhandle);
-				}
+				ret = smcinvoke_release_tz_object(&in_shm, &out_shm,
+					       	entry->data.tzhandle,  entry->data.context_type);
 			} while (-EBUSY == ret);
+		} else {
+			pr_err("entry is NULL, pos:%#llx\n", (uint64_t)pos);
 		}
+		kfree_sensitive(entry);
 	} while (1);
 
 out:
@@ -619,20 +630,20 @@ static void smcinvoke_start_adci_thread(void)
 	int32_t  ret = OBJECT_ERROR;
 	int retry_count = 0;
 
-	ret = get_client_env_object(&adci_clientEnv);
+	ret = get_root_obj(&adci_rootEnv);
 	if (ret) {
-		pr_err("failed to get clientEnv for ADCI invoke thread. ret = %d\n", ret);
+		pr_err("failed to get rootEnv for ADCI invoke thread. ret = %d\n", ret);
 		/* Marking it Object_NULL in case of failure scenario in order to avoid
-		 * undefined behavior while releasing garbage adci_clientEnv object.
-		 */
-		adci_clientEnv = Object_NULL;
+		 * undefined behavior while relasing garbage adci_rootEnv object. */
+		adci_rootEnv = Object_NULL;
 		goto out;
 	}
 	/* Invoke call to QTEE which should never return if ADCI is supported */
+	pr_debug("Invoking adciAccept method in QTEE\n");
 	do {
-		ret = IClientEnv_adciAccept(adci_clientEnv);
+		ret = IClientEnv_adciAccept(adci_rootEnv);
 		if (ret == OBJECT_ERROR_BUSY) {
-			pr_err("Secure side is busy,will retry after 5 ms, retry_count = %d",retry_count);
+			pr_err_ratelimited("Secure side is busy,will retry after 5 ms, retry_count = %d\n",retry_count);
 			msleep(SMCINVOKE_INTERFACE_BUSY_WAIT_MS);
 		}
 	} while ((ret == OBJECT_ERROR_BUSY) && (retry_count++ < SMCINVOKE_INTERFACE_MAX_RETRY));
@@ -644,7 +655,7 @@ static void smcinvoke_start_adci_thread(void)
 out:
 	/* Control should reach to this point only if ADCI feature is not supported by QTEE
 	  (or) ADCI thread held in QTEE is released. */
-	Object_ASSIGN_NULL(adci_clientEnv);
+	Object_ASSIGN_NULL(adci_rootEnv);
 }
 
 static void __wakeup_postprocess_kthread(struct smcinvoke_worker_thread *smcinvoke)
@@ -748,18 +759,19 @@ static void smcinvoke_destroy_kthreads(void)
 	int32_t  ret = OBJECT_ERROR;
 	int retry_count = 0;
 
-	if(!Object_isNull(adci_clientEnv)) {
+	if (!Object_isNull(adci_rootEnv)) {
+		pr_debug("Invoking adciShutdown method in QTEE\n");
 		do {
-			ret = IClientEnv_adciShutdown(adci_clientEnv);
+			ret = IClientEnv_adciShutdown(adci_rootEnv);
 			if (ret == OBJECT_ERROR_BUSY) {
-				pr_err("Secure side is busy,will retry after 5 ms, retry_count = %d",retry_count);
+				pr_err_ratelimited("Secure side is busy,will retry after 5 ms, retry_count = %d\n",retry_count);
 				msleep(SMCINVOKE_INTERFACE_BUSY_WAIT_MS);
 			}
 		} while ((ret == OBJECT_ERROR_BUSY) && (retry_count++ < SMCINVOKE_INTERFACE_MAX_RETRY));
-		if(OBJECT_isERROR(ret)) {
+		if (OBJECT_isERROR(ret)) {
 			pr_err("adciShutdown in QTEE failed with error = %d\n", ret);
 		}
-		Object_ASSIGN_NULL(adci_clientEnv);
+		Object_ASSIGN_NULL(adci_rootEnv);
 	}
 
 	for (i = 0; i < MAX_THREAD_NUMBER; i++) {
@@ -1913,7 +1925,7 @@ static int prepare_send_scm_msg(const uint8_t *in_buf, phys_addr_t in_paddr,
 					&response_type, &data, in_shm, out_shm);
 
 			if (ret == -EBUSY) {
-				pr_err("Secure side is busy,will retry after 30 ms, retry_count = %d",retry_count);
+				pr_err_ratelimited("Secure side is busy,will retry after 30 ms, retry_count = %d\n",retry_count);
 				msleep(SMCINVOKE_SCM_EBUSY_WAIT_MS);
 			}
 
@@ -2545,7 +2557,7 @@ start_waiting_for_requests:
 			mutex_lock(&g_smcinvoke_lock);
 
 			if(freezing(current)) {
-				pr_err("Server id :%d interrupted probaby due to suspend, pid:%d",
+				pr_err_ratelimited("Server id :%d interrupted probaby due to suspend, pid:%d\n",
 					server_info->server_id, current->pid);
 				/*
 				 * Each accept thread is identified by bits ranging from
@@ -2559,7 +2571,7 @@ start_waiting_for_requests:
 						SET_BIT(server_info->is_server_suspended,
 							(current->pid)%DEFAULT_CB_OBJ_THREAD_CNT);
 			} else {
-				pr_err("Setting pid:%d, server id : %d state to defunct",
+				pr_err_ratelimited("Setting pid:%d, server id : %d state to defunct\n",
 						current->pid, server_info->server_id);
 						server_info->state = SMCINVOKE_SERVER_STATE_DEFUNCT;
 			}
@@ -3008,6 +3020,7 @@ int smcinvoke_release_filp(struct file *filp)
 	struct smcinvoke_file_data *file_data = filp->private_data;
 	uint32_t tzhandle = 0;
 	struct smcinvoke_object_release_pending_list *entry = NULL;
+	struct qtee_shm in_shm = {0}, out_shm = {0};
 
 	trace_smcinvoke_release_filp(current->files, filp,
 			file_count(filp), file_data->context_type);
@@ -3019,29 +3032,55 @@ int smcinvoke_release_filp(struct file *filp)
 
 	tzhandle = file_data->tzhandle;
 	/* Root object is special in sense it is indestructible */
-	if (!tzhandle || tzhandle == SMCINVOKE_TZ_ROOT_OBJ)
+	if (!tzhandle || tzhandle == SMCINVOKE_TZ_ROOT_OBJ) {
+		if (!tzhandle)
+			pr_err("tzhandle not valid in object release\n");
 		goto out;
+	}
 
-	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-	if (!entry) {
-		ret = -ENOMEM;
+	ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &in_shm);
+	if (ret) {
+		pr_err("shmbridge alloc failed for in msg in object release"
+				"with ret %d\n", ret);
+		goto out;
+	}
+
+	ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &out_shm);
+	if (ret) {
+		pr_err("shmbridge alloc failed for out msg in object release"
+				"with ret:%d\n", ret);
 		goto out;
 	}
 
-	entry->data.tzhandle = tzhandle;
-	entry->data.context_type = file_data->context_type;
-	mutex_lock(&object_postprocess_lock);
-	list_add_tail(&entry->list, &g_object_postprocess);
-	mutex_unlock(&object_postprocess_lock);
-	pr_debug("Object release list: added a handle:0x%lx\n", tzhandle);
-	__wakeup_postprocess_kthread(&smcinvoke[OBJECT_WORKER_THREAD]);
+	ret = smcinvoke_release_tz_object(&in_shm, &out_shm,
+		tzhandle, file_data->context_type);
+
+	if (-EBUSY == ret) {
+		pr_debug("failed to release handle in sync adding to list\n");
+		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+		if (!entry) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		ret = 0;
+		entry->data.tzhandle = tzhandle;
+		entry->data.context_type = file_data->context_type;
+		mutex_lock(&object_postprocess_lock);
+		list_add_tail(&entry->list, &g_object_postprocess);
+		mutex_unlock(&object_postprocess_lock);
+		pr_debug("Object release list: added a handle:0x%lx\n", tzhandle);
+		__wakeup_postprocess_kthread(&smcinvoke[OBJECT_WORKER_THREAD]);
+	}
 
 out:
+	qtee_shmbridge_free_shm(&in_shm);
+	qtee_shmbridge_free_shm(&out_shm);
 	kfree(filp->private_data);
 	filp->private_data = NULL;
 
+	if (ret != 0)
+		pr_err ("Object release failed with ret %d\n", ret);
 	return ret;
-
 }
 
 int smcinvoke_release_from_kernel_client(int fd)

+ 1 - 1
smcinvoke/smcinvoke_kernel.c

@@ -272,7 +272,7 @@ exit:
 	return ret | req.result;
 }
 
-static int get_root_obj(struct Object *rootObj)
+int get_root_obj(struct Object *rootObj)
 {
 	int ret = 0;
 	int root_fd = -1;

+ 8 - 2
smcinvoke/trace_smcinvoke.h

@@ -487,10 +487,16 @@ TRACE_EVENT(smcinvoke_release,
 );
 
 #endif /* _TRACE_SMCINVOKE_H */
+/*
+* Path must be relative to location of 'define_trace.h' header in kernel
+* Define path if not defined in bazel file
+ */
+#ifndef SMCINVOKE_TRACE_INCLUDE_PATH
+#define SMCINVOKE_TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/securemsm-kernel/smcinvoke
+#endif
 
 #undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/securemsm-kernel/smcinvoke
-
+#define TRACE_INCLUDE_PATH SMCINVOKE_TRACE_INCLUDE_PATH
 #undef TRACE_INCLUDE_FILE
 #define TRACE_INCLUDE_FILE trace_smcinvoke
 

+ 18 - 0
smmu-proxy/linux/qti-smmu-proxy.h

@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef __QTI_SMMU_PROXY_H_
+#define __QTI_SMMU_PROXY_H_
+
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+#include <linux/align.h>
+
+#include <smmu-proxy/uapi/linux/qti-smmu-proxy.h>
+
+#define SMMU_PROXY_MEM_ALIGNMENT (1 << 21)
+
+int smmu_proxy_get_csf_version(struct csf_version *csf_version);
+
+#endif /* __QTI_SMMU_PROXY_H_ */

+ 99 - 0
smmu-proxy/qti-smmu-proxy-common.c

@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/cdev.h>
+#include "qti-smmu-proxy-common.h"
+#include "../include/linux/smcinvoke.h"
+#include "../include/linux/ITrustedCameraDriver.h"
+#include "../include/linux/CTrustedCameraDriver.h"
+#include "../include/linux/IClientEnv.h"
+
+#define SMMU_PROXY_MAX_DEVS 1
+static dev_t smmu_proxy_dev_no;
+static struct class *smmu_proxy_class;
+static struct cdev smmu_proxy_char_dev;
+
+static struct csf_version cached_csf_version;
+
+int smmu_proxy_get_csf_version(struct csf_version *csf_version)
+{
+	int ret;
+	struct Object client_env = {0};
+	struct Object sc_object;
+
+	if (cached_csf_version.arch_ver != 0) {
+		csf_version->arch_ver = cached_csf_version.arch_ver;
+		csf_version->max_ver = cached_csf_version.max_ver;
+		csf_version->min_ver = cached_csf_version.min_ver;
+
+		return 0;
+	}
+
+	ret = get_client_env_object(&client_env);
+	if (ret) {
+		pr_err("%s: Failed to get env object rc: %d\n", __func__,
+		       ret);
+		return ret;
+	}
+
+	ret = IClientEnv_open(client_env, CTrustedCameraDriver_UID, &sc_object);
+	if (ret) {
+		pr_err("%s: Failed to get seccam object rc: %d\n", __func__,
+		       ret);
+		return ret;
+	}
+
+	ret = ITrustedCameraDriver_getVersion(sc_object, &csf_version->arch_ver,
+					      &csf_version->max_ver,
+					      &csf_version->min_ver);
+
+	Object_release(sc_object);
+	Object_release(client_env);
+
+	return ret;
+}
+EXPORT_SYMBOL(smmu_proxy_get_csf_version);
+
+int smmu_proxy_create_dev(const struct file_operations *fops)
+{
+	int ret;
+	struct device *class_dev;
+
+	ret = alloc_chrdev_region(&smmu_proxy_dev_no, 0, SMMU_PROXY_MAX_DEVS,
+				  "qti-smmu-proxy");
+	if (ret < 0)
+		return ret;
+
+	smmu_proxy_class = class_create(THIS_MODULE, "qti-smmu-proxy");
+	if (IS_ERR(smmu_proxy_class)) {
+		ret = PTR_ERR(smmu_proxy_class);
+		goto err_class_create;
+	}
+
+	cdev_init(&smmu_proxy_char_dev, fops);
+	ret = cdev_add(&smmu_proxy_char_dev, smmu_proxy_dev_no,
+		       SMMU_PROXY_MAX_DEVS);
+	if (ret < 0)
+		goto err_cdev_add;
+
+	class_dev = device_create(smmu_proxy_class, NULL, smmu_proxy_dev_no, NULL,
+				  "qti-smmu-proxy");
+	if (IS_ERR(class_dev)) {
+		ret = PTR_ERR(class_dev);
+		goto err_dev_create;
+	}
+
+	return 0;
+
+err_dev_create:
+	cdev_del(&smmu_proxy_char_dev);
+err_cdev_add:
+	class_destroy(smmu_proxy_class);
+err_class_create:
+	unregister_chrdev_region(smmu_proxy_dev_no, SMMU_PROXY_MAX_DEVS);
+
+	return ret;
+}
+

+ 30 - 0
smmu-proxy/qti-smmu-proxy-common.h

@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __QTI_SMMU_PROXY_COMMON_H_
+#define __QTI_SMMU_PROXY_COMMON_H_
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/cdev.h>
+#include <linux/dma-buf.h>
+
+#include <linux/mem-buf.h>
+#include <soc/qcom/secure_buffer.h>
+#include <linux/gunyah/gh_msgq.h>
+#include "qti-smmu-proxy-msgq.h"
+#include "linux/qti-smmu-proxy.h"
+
+union smmu_proxy_ioctl_arg {
+	struct csf_version csf_version;
+	struct smmu_proxy_acl_ctl acl_ctl;
+	struct smmu_proxy_wipe_buf_ctl wipe_buf_ctl;
+	struct smmu_proxy_get_dma_buf_ctl get_dma_buf_ctl;
+};
+
+int smmu_proxy_create_dev(const struct file_operations *fops);
+
+#endif /* __QTI_SMMU_PROXY_COMMON_H_ */

+ 107 - 0
smmu-proxy/qti-smmu-proxy-msgq.h

@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef SMMU_PROXY_MSGQ_H
+#define SMMU_PROXY_MSGQ_H
+
+#include <linux/gunyah/gh_rm_drv.h>
+
+
+/**
+ * enum smmu_proxy_msg_type: Message types used by the SMMU proxy driver for
+ * communication.
+ * @SMMU_PROXY_MAP: The message is a request to map memory into the VM's
+ * SMMU.
+ * @SMMU_PROXY_MAP_RESP: The message is a response from a remote VM to a
+ * mapping request issued by the receiving VM
+ * @SMMU_PROXY_UNMAP: The message is a request to unmap some previously
+ * SMMU-mapped memory from the VM
+ * @SMMU_PROXY_UNMAP_RESP: The message is a response from a remote VM to an
+ * unmapping request issued by the receiving VM
+ * @SMMU_PROXY_ERR_RESP: The message is a response from a remote VM to give
+ * a generic error response for a prior message sent to the remote VM
+ */
+enum smmu_proxy_msg_type {
+	SMMU_PROXY_MAP,
+	SMMU_PROXY_MAP_RESP,
+	SMMU_PROXY_UNMAP,
+	SMMU_PROXY_UNMAP_RESP,
+	SMMU_PROXY_ERR_RESP,
+	SMMU_PROXY_MSG_MAX,
+};
+
+/**
+ * struct smmu_proxy_msg_hdr: The header for SMMU proxy messages
+ * @msg_type: The type of message.
+ * @msg_size: The size of message.
+ */
+struct smmu_proxy_msg_hdr {
+	u32 msg_type;
+	u32 msg_size;
+} __packed;
+
+/**
+ * struct smmu_proxy_msg_hdr: The header for responses to SMMU proxy messages
+ * @msg_type: The type of message.
+ * @msg_size: The size of message.
+ * @ret: Return code from remote VM
+ */
+struct smmu_proxy_resp_hdr {
+	u32 msg_type;
+	u32 msg_size;
+	s32 ret;
+} __packed;
+
+/**
+ * struct smmu_proxy_map_req: The message format for an SMMU mapping request from
+ * another VM.
+ * @hdr: Message header
+ * @hdl: The memparcel handle associated with the memory to be mapped in the SMMU
+ * of the relevant VM
+ * @cb_id: Context bank ID that we will map the memory associated with @hdl to
+ * @acl_desc: A GH ACL descriptor that describes the VMIDs that will be
+ * accessing the memory, as well as what permissions each VMID will have.
+ */
+struct smmu_proxy_map_req {
+	struct smmu_proxy_msg_hdr hdr;
+	u32 hdl;
+	u32 cb_id;
+	struct gh_acl_desc acl_desc;
+} __packed;
+
+/**
+ * struct smmu_proxy_map_resp: The message format for an SMMU mapping
+ * request response.
+ * @hdr: Response header
+ * @iova: IOVA of mapped memory
+ * @mapping_len: Lenth of IOMMU IOVA mapping
+ */
+struct smmu_proxy_map_resp {
+	struct smmu_proxy_resp_hdr hdr;
+	u64 iova;
+	u64 mapping_len;
+} __packed;
+
+/**
+ * struct smmu_proxy_unmap_req: The message format for an SMMU unmapping request from
+ * another VM.
+ * @hdr: Message header
+ * @hdl: The memparcel handle associated with the memory to be mapped in the SMMU
+ * of the relevant VM
+ */
+struct smmu_proxy_unmap_req {
+	struct smmu_proxy_msg_hdr hdr;
+	u32 hdl;
+} __packed;
+
+/**
+ * struct smmu_proxy_unmap_resp: The message format for an SMMU unmapping
+ * request response.
+ * @hdr: Response header
+ */
+struct smmu_proxy_unmap_resp {
+	struct smmu_proxy_resp_hdr hdr;
+} __packed;
+
+#endif /* SMMU_PROXY_MSGQ_H */

+ 323 - 0
smmu-proxy/qti-smmu-proxy-pvm.c

@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "qti-smmu-proxy-common.h"
+
+#include <linux/qti-smmu-proxy-callbacks.h>
+#include <linux/qcom-dma-mapping.h>
+#include <linux/of.h>
+
+static void *msgq_hdl;
+
+DEFINE_MUTEX(sender_mutex);
+
+static const struct file_operations smmu_proxy_dev_fops;
+
+int smmu_proxy_unmap(void *data)
+{
+	struct dma_buf *dmabuf;
+	void *buf;
+	size_t size;
+	int ret;
+	struct smmu_proxy_unmap_req *req;
+	struct smmu_proxy_unmap_resp *resp;
+
+	mutex_lock(&sender_mutex);
+	buf = kzalloc(GH_MSGQ_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
+	if (!buf) {
+		ret = -ENOMEM;
+		pr_err("%s: Failed to allocate memory!\n", __func__);
+		goto out;
+	}
+
+	req = buf;
+
+	dmabuf = data;
+	ret = mem_buf_dma_buf_get_memparcel_hdl(dmabuf, &req->hdl);
+	if (ret) {
+		pr_err("%s: Failed to get memparcel handle rc: %d\n", __func__, ret);
+		goto free_buf;
+	}
+
+	req->hdr.msg_type = SMMU_PROXY_UNMAP;
+	req->hdr.msg_size = sizeof(*req);
+
+	ret = gh_msgq_send(msgq_hdl, (void *) req, req->hdr.msg_size, 0);
+	if (ret < 0) {
+		pr_err("%s: failed to send message rc: %d\n", __func__, ret);
+		goto free_buf;
+	}
+
+	/*
+	 * No need to validate size -  gh_msgq_recv() ensures that sizeof(*resp) <
+	 * GH_MSGQ_MAX_MSG_SIZE_BYTES
+	 */
+	ret = gh_msgq_recv(msgq_hdl, buf, sizeof(*resp), &size, 0);
+	if (ret < 0) {
+		pr_err_ratelimited("%s: failed to receive message rc: %d\n", __func__, ret);
+		goto free_buf;
+	}
+
+	resp = buf;
+	if (resp->hdr.ret) {
+		ret = resp->hdr.ret;
+		pr_err("%s: Unmap call failed on remote VM, rc: %d\n", __func__,
+		       resp->hdr.ret);
+	}
+
+free_buf:
+	kfree(buf);
+out:
+	mutex_unlock(&sender_mutex);
+
+	return ret;
+}
+
+int smmu_proxy_map(struct device *client_dev, struct sg_table *proxy_iova,
+		   struct dma_buf *dmabuf)
+{
+	void *buf;
+	size_t size;
+	int ret = 0;
+	int n_acl_entries, i;
+	int vmids[2] = { VMID_TVM, VMID_OEMVM };
+	int perms[2] = { PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
+	struct csf_version csf_version;
+	struct mem_buf_lend_kernel_arg arg = {0};
+	struct smmu_proxy_map_req *req;
+	struct smmu_proxy_map_resp *resp;
+
+	ret = smmu_proxy_get_csf_version(&csf_version);
+	if (ret) {
+		return ret;
+	}
+
+	/*
+	 * We enter this function iff the CSF version is 2.5.* . If CSF 2.5.1
+	 * is in use, we set n_acl_entries to two, in order to assign this
+	 * memory to the TVM and OEM VM. If CSF 2.5.0 is in use, we just assign
+	 * it to the TVM.
+	 */
+	n_acl_entries = csf_version.min_ver == 1 ? 2 : 1;
+
+	mutex_lock(&sender_mutex);
+	buf = kzalloc(GH_MSGQ_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	if (mem_buf_dma_buf_exclusive_owner(dmabuf)) {
+		arg.vmids = vmids;
+		arg.perms = perms;
+		arg.nr_acl_entries = n_acl_entries;
+
+		ret = mem_buf_lend(dmabuf, &arg);
+		if (ret) {
+			pr_err("%s: Failed to lend buf rc: %d\n", __func__, ret);
+			goto free_buf;
+		}
+	}
+
+	/* Prepare the message */
+	req = buf;
+	req->acl_desc.n_acl_entries = n_acl_entries;
+	for (i = 0; i < n_acl_entries; i++) {
+		req->acl_desc.acl_entries[i].vmid = vmids[i];
+		req->acl_desc.acl_entries[i].perms = perms[i];
+	}
+
+	ret = mem_buf_dma_buf_get_memparcel_hdl(dmabuf, &req->hdl);
+	if (ret) {
+		pr_err("%s: Failed to get memparcel handle rc: %d\n", __func__, ret);
+		goto free_buf;
+	}
+
+	ret = of_property_read_u32(client_dev->of_node,
+				   "qti,smmu-proxy-cb-id",
+				   &req->cb_id);
+	if (ret) {
+		dev_err(client_dev, "%s: Err reading 'qti,smmu-proxy-cb-id' rc: %d\n",
+			__func__, ret);
+		goto free_buf;
+	}
+
+	req->hdr.msg_type = SMMU_PROXY_MAP;
+	req->hdr.msg_size = offsetof(struct smmu_proxy_map_req,
+				acl_desc.acl_entries[n_acl_entries]);
+
+	ret = gh_msgq_send(msgq_hdl, (void *) req, req->hdr.msg_size, 0);
+	if (ret < 0) {
+		pr_err("%s: failed to send message rc: %d\n", __func__, ret);
+		goto free_buf;
+	}
+
+	/*
+	 * No need to validate size -  gh_msgq_recv() ensures that sizeof(*resp) <
+	 * GH_MSGQ_MAX_MSG_SIZE_BYTES
+	 */
+	ret = gh_msgq_recv(msgq_hdl, buf, sizeof(*resp), &size, 0);
+	if (ret < 0) {
+		pr_err_ratelimited("%s: failed to receive message rc: %d\n", __func__, ret);
+		goto free_buf;
+	}
+
+	resp = buf;
+
+	if (resp->hdr.ret) {
+		proxy_iova = ERR_PTR(resp->hdr.ret);
+		pr_err_ratelimited("%s: Map call failed on remote VM, rc: %d\n", __func__,
+				   resp->hdr.ret);
+		goto free_buf;
+	}
+
+	ret = mem_buf_dma_buf_set_destructor(dmabuf, smmu_proxy_unmap, dmabuf);
+	if (ret) {
+		pr_err_ratelimited("%s: Failed to set vmperm destructor, rc: %d\n",
+				   __func__, ret);
+		goto free_buf;
+	}
+
+	sg_dma_address(proxy_iova->sgl) = resp->iova;
+	sg_dma_len(proxy_iova->sgl) = resp->mapping_len;
+	/*
+	 * We set the number of entries to one here, as we only allow the mapping to go
+	 * through on the TVM if the sg_table returned by dma_buf_map_attachment has one
+	 * entry.
+	 */
+	proxy_iova->nents = 1;
+
+free_buf:
+	kfree(buf);
+out:
+	mutex_unlock(&sender_mutex);
+
+	return ret;
+}
+
+void smmu_proxy_unmap_nop(struct device *client_dev, struct sg_table *table,
+			  struct dma_buf *dmabuf)
+{
+
+}
+
+
+static long smmu_proxy_dev_ioctl(struct file *filp, unsigned int cmd,
+			      unsigned long arg)
+{
+	unsigned int dir = _IOC_DIR(cmd);
+	union smmu_proxy_ioctl_arg ioctl_arg;
+	int ret;
+
+	if (_IOC_SIZE(cmd) > sizeof(ioctl_arg))
+		return -EINVAL;
+
+	if (copy_from_user(&ioctl_arg, (void __user *)arg, _IOC_SIZE(cmd)))
+		return -EFAULT;
+
+	if (!(dir & _IOC_WRITE))
+		memset(&ioctl_arg, 0, sizeof(ioctl_arg));
+
+	switch (cmd) {
+	case QTI_SMMU_PROXY_GET_VERSION_IOCTL:
+	{
+		struct csf_version *csf_version =
+			&ioctl_arg.csf_version;
+
+		ret = smmu_proxy_get_csf_version(csf_version);
+		if(ret)
+			return ret;
+
+		break;
+	}
+
+	default:
+		return -ENOTTY;
+	}
+
+	if (dir & _IOC_READ) {
+		if (copy_to_user((void __user *)arg, &ioctl_arg,
+				 _IOC_SIZE(cmd)))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static const struct file_operations smmu_proxy_dev_fops = {
+	.unlocked_ioctl = smmu_proxy_dev_ioctl,
+	.compat_ioctl = compat_ptr_ioctl,
+};
+
+static int sender_probe_handler(struct platform_device *pdev)
+{
+	int ret;
+	struct csf_version csf_version;
+
+	msgq_hdl = gh_msgq_register(GH_MSGQ_LABEL_SMMU_PROXY);
+	if (IS_ERR(msgq_hdl)) {
+		ret = PTR_ERR(msgq_hdl);
+		pr_err("%s: Queue registration failed rc: %d!\n", __func__, PTR_ERR(msgq_hdl));
+		return ret;
+	}
+
+	ret = smmu_proxy_get_csf_version(&csf_version);
+	if (ret) {
+		pr_err("%s: Failed to get CSF version rc: %d\n", __func__, ret);
+		goto free_msgq;
+	}
+
+	if (csf_version.arch_ver == 2 && csf_version.max_ver == 0) {
+		ret = qti_smmu_proxy_register_callbacks(NULL, NULL);
+	} else if (csf_version.arch_ver == 2 && csf_version.max_ver == 5) {
+		ret = qti_smmu_proxy_register_callbacks(smmu_proxy_map, smmu_proxy_unmap_nop);
+	} else {
+		pr_err("%s: Invalid CSF version: %d.%d\n", __func__, csf_version.arch_ver,
+			csf_version.max_ver);
+		goto free_msgq;
+	}
+
+	if (ret) {
+		pr_err("%s: Failed to set SMMU proxy callbacks rc: %d\n", __func__, ret);
+		goto free_msgq;
+	}
+
+	ret = smmu_proxy_create_dev(&smmu_proxy_dev_fops);
+	if (ret) {
+		pr_err("%s: Failed to create character device rc: %d\n", __func__,
+		       ret);
+		goto set_callbacks_null;
+	}
+
+	return 0;
+
+set_callbacks_null:
+	qti_smmu_proxy_register_callbacks(NULL, NULL);
+free_msgq:
+	gh_msgq_unregister(msgq_hdl);
+	return ret;
+}
+
+static const struct of_device_id smmu_proxy_match_table[] = {
+	{.compatible = "smmu-proxy-sender"},
+	{},
+};
+
+static struct platform_driver smmu_proxy_driver = {
+	.probe = sender_probe_handler,
+	.driver = {
+		.name = "qti-smmu-proxy",
+		.of_match_table = smmu_proxy_match_table,
+	},
+};
+
+int __init init_smmu_proxy_driver(void)
+{
+	return platform_driver_register(&smmu_proxy_driver);
+}
+module_init(init_smmu_proxy_driver);
+
+MODULE_IMPORT_NS(DMA_BUF);
+MODULE_LICENSE("GPL v2");

+ 738 - 0
smmu-proxy/qti-smmu-proxy-tvm.c

@@ -0,0 +1,738 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/kthread.h>
+
+#include <linux/qcom-iommu-util.h>
+#include <dt-bindings/arm/msm/qti-smmu-proxy-dt-ids.h>
+#include "qti-smmu-proxy-common.h"
+
+#define RECEIVER_COMPAT_STR "smmu-proxy-receiver"
+#define CB_COMPAT_STR "smmu-proxy-cb"
+
+static void *msgq_hdl;
+
+struct smmu_proxy_buffer_cb_info {
+	bool mapped;
+	struct dma_buf_attachment *attachment;
+	struct sg_table *sg_table;
+};
+
+struct smmu_proxy_buffer_state {
+	bool locked;
+	struct smmu_proxy_buffer_cb_info cb_info[QTI_SMMU_PROXY_CB_IDS_LEN];
+	struct dma_buf *dmabuf;
+};
+
+static DEFINE_MUTEX(buffer_state_lock);
+static DEFINE_XARRAY(buffer_state_arr);
+
+static unsigned int cb_map_counts[QTI_SMMU_PROXY_CB_IDS_LEN] = { 0 };
+struct device *cb_devices[QTI_SMMU_PROXY_CB_IDS_LEN] = { 0 };
+
+struct task_struct *receiver_msgq_handler_thread;
+
+static int iommu_unmap_and_relinquish(u32 hdl)
+{
+	int cb_id, ret = 0;
+	struct smmu_proxy_buffer_state *buf_state;
+
+	mutex_lock(&buffer_state_lock);
+	buf_state = xa_load(&buffer_state_arr, hdl);
+	if (!buf_state) {
+		pr_err("%s: handle 0x%llx unknown to proxy driver!\n", __func__, hdl);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (buf_state->locked) {
+		pr_err("%s: handle 0x%llx is locked!\n", __func__, hdl);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	for (cb_id = 0; cb_id < QTI_SMMU_PROXY_CB_IDS_LEN; cb_id++) {
+		if (buf_state->cb_info[cb_id].mapped) {
+			dma_buf_unmap_attachment(buf_state->cb_info[cb_id].attachment,
+						 buf_state->cb_info[cb_id].sg_table,
+						 DMA_BIDIRECTIONAL);
+			dma_buf_detach(buf_state->dmabuf,
+				       buf_state->cb_info[cb_id].attachment);
+			buf_state->cb_info[cb_id].mapped = false;
+
+			/* If nothing left is mapped for this CB, unprogram its SMR */
+			cb_map_counts[cb_id]--;
+			if (!cb_map_counts[cb_id]) {
+				ret = qcom_iommu_sid_switch(cb_devices[cb_id], SID_RELEASE);
+				if (ret) {
+					pr_err("%s: Failed to unprogram SMR for cb_id %d rc: %d\n",
+					       __func__, cb_id, ret);
+					break;
+				}
+			}
+		}
+	}
+
+	dma_buf_put(buf_state->dmabuf);
+	flush_delayed_fput();
+
+	xa_erase(&buffer_state_arr, hdl);
+	kfree(buf_state);
+out:
+	mutex_unlock(&buffer_state_lock);
+
+	return ret;
+}
+
+static int process_unmap_request(struct smmu_proxy_unmap_req *req, size_t size)
+{
+	struct smmu_proxy_unmap_resp *resp;
+	int ret = 0;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		pr_err("%s: Failed to allocate memory for response\n", __func__);
+		return -ENOMEM;
+	}
+
+	ret = iommu_unmap_and_relinquish(req->hdl);
+
+	resp->hdr.msg_type = SMMU_PROXY_UNMAP_RESP;
+	resp->hdr.msg_size = sizeof(*resp);
+	resp->hdr.ret = ret;
+
+	ret = gh_msgq_send(msgq_hdl, resp, resp->hdr.msg_size, 0);
+	if (ret < 0)
+		pr_err("%s: failed to send response to mapping request rc: %d\n", __func__, ret);
+	else
+		pr_debug("%s: response to mapping request sent\n", __func__);
+
+	kfree(resp);
+
+	return ret;
+}
+
+static
+inline
+struct sg_table *retrieve_and_iommu_map(struct mem_buf_retrieve_kernel_arg *retrieve_arg,
+					u32 cb_id)
+{
+	int ret;
+	struct dma_buf *dmabuf;
+	bool new_buf = false;
+	struct smmu_proxy_buffer_state *buf_state;
+	struct dma_buf_attachment *attachment;
+	struct sg_table *table;
+
+	if (cb_id >= QTI_SMMU_PROXY_CB_IDS_LEN) {
+		pr_err("%s: CB ID %d too large\n", __func__, cb_id);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!cb_devices[cb_id]) {
+		pr_err("%s: CB of ID %d not defined\n", __func__, cb_id);
+		return ERR_PTR(-EINVAL);
+	}
+
+	mutex_lock(&buffer_state_lock);
+	buf_state = xa_load(&buffer_state_arr, retrieve_arg->memparcel_hdl);
+	if (buf_state) {
+		if (buf_state->locked) {
+			pr_err("%s: handle 0x%llx is locked!\n", __func__,
+			       retrieve_arg->memparcel_hdl);
+			ret = -EINVAL;
+			goto unlock_err;
+		}
+		if (buf_state->cb_info[cb_id].mapped) {
+			table = buf_state->cb_info[cb_id].sg_table;
+			goto unlock;
+		}
+
+		dmabuf = buf_state->dmabuf;
+	} else {
+		new_buf = true;
+		dmabuf = mem_buf_retrieve(retrieve_arg);
+		if (IS_ERR(dmabuf)) {
+			ret = PTR_ERR(dmabuf);
+			pr_err("%s: Failed to retrieve DMA-BUF rc: %d\n", __func__, ret);
+			goto unlock_err;
+		}
+
+		buf_state = kzalloc(sizeof(*buf_state), GFP_KERNEL);
+		if (!buf_state) {
+			pr_err("%s: Unable to allocate memory for buf_state\n",
+			       __func__);
+			ret = -ENOMEM;
+			goto free_buf;
+		}
+
+		buf_state->dmabuf = dmabuf;
+	}
+
+	attachment = dma_buf_attach(dmabuf, cb_devices[cb_id]);
+	if (IS_ERR(attachment)) {
+		ret = PTR_ERR(attachment);
+		pr_err("%s: Failed to attach rc: %d\n", __func__, ret);
+		goto free_buf_state;
+	}
+
+	table = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
+	if (IS_ERR(table)) {
+		ret = PTR_ERR(table);
+		pr_err("%s: Failed to map rc: %d\n", __func__, ret);
+		goto detach;
+	}
+
+	if (table->nents != 1) {
+		ret = -EINVAL;
+		pr_err("%s: Buffer not mapped as one segment!\n", __func__);
+		goto unmap;
+	}
+
+	buf_state->cb_info[cb_id].mapped = true;
+	buf_state->cb_info[cb_id].attachment = attachment;
+	buf_state->cb_info[cb_id].sg_table = table;
+
+	if (!cb_map_counts[cb_id]) {
+		ret = qcom_iommu_sid_switch(cb_devices[cb_id], SID_ACQUIRE);
+		if (ret) {
+			pr_err("%s: Failed to program SMRs for cb_id %d rc: %d\n", __func__,
+			       cb_id, ret);
+			goto unmap;
+		}
+	}
+	cb_map_counts[cb_id]++;
+
+	ret = xa_err(xa_store(&buffer_state_arr, retrieve_arg->memparcel_hdl, buf_state,
+		     GFP_KERNEL));
+	if (ret < 0) {
+		pr_err("%s: Failed to store new buffer in xarray rc: %d\n", __func__,
+		       ret);
+		goto dec_cb_map_count;
+	}
+
+unlock:
+	mutex_unlock(&buffer_state_lock);
+
+	return table;
+
+dec_cb_map_count:
+	cb_map_counts[cb_id]--;
+	if (!cb_map_counts[cb_id]) {
+		ret = qcom_iommu_sid_switch(cb_devices[cb_id], SID_RELEASE);
+		if (ret)
+			pr_err("%s: Failed to unprogram SMR for cb_id %d rc: %d\n",
+			       __func__, cb_id, ret);
+	}
+unmap:
+	dma_buf_unmap_attachment(attachment, table, DMA_BIDIRECTIONAL);
+detach:
+	dma_buf_detach(dmabuf, attachment);
+free_buf_state:
+	if (new_buf)
+		kfree(buf_state);
+free_buf:
+	if (new_buf)
+		dma_buf_put(dmabuf);
+unlock_err:
+	mutex_unlock(&buffer_state_lock);
+
+	return ERR_PTR(ret);
+}
+
+static int process_map_request(struct smmu_proxy_map_req *req, size_t size)
+{
+	struct smmu_proxy_map_resp *resp;
+	int ret = 0;
+	u32 n_acl_entries = req->acl_desc.n_acl_entries;
+	size_t map_req_len = offsetof(struct smmu_proxy_map_req,
+				      acl_desc.acl_entries[n_acl_entries]);
+	struct mem_buf_retrieve_kernel_arg retrieve_arg = {0};
+	int i;
+	struct sg_table *table;
+
+	/*
+	 * Last entry of smmu_proxy_map_req is an array of arbitrary length.
+	 * Validate that the number of entries fits within the buffer given
+	 * to us by the message queue.
+	 */
+	if (map_req_len > size) {
+		pr_err("%s: Reported size of smmu_proxy_map_request (%d bytes) greater than message size given by message queue (%d bytes)\n",
+		       __func__, map_req_len, size);
+		return -EINVAL;
+	}
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		pr_err("%s: Failed to allocate memory for response\n", __func__);
+		return -ENOMEM;
+	}
+
+	retrieve_arg.vmids = kmalloc_array(n_acl_entries, sizeof(*retrieve_arg.vmids), GFP_KERNEL);
+	if (!retrieve_arg.vmids) {
+		ret = -ENOMEM;
+		goto free_resp;
+	}
+
+	retrieve_arg.perms = kmalloc_array(n_acl_entries, sizeof(*retrieve_arg.perms), GFP_KERNEL);
+	if (!retrieve_arg.perms) {
+		ret = -ENOMEM;
+		goto free_vmids;
+	}
+
+	retrieve_arg.memparcel_hdl = req->hdl;
+	retrieve_arg.sender_vmid = VMID_HLOS;
+	retrieve_arg.nr_acl_entries = n_acl_entries;
+
+	for (i = 0; i < n_acl_entries; i++) {
+		retrieve_arg.vmids[i] = req->acl_desc.acl_entries[i].vmid;
+		retrieve_arg.perms[i] = req->acl_desc.acl_entries[i].perms;
+	}
+
+	table = retrieve_and_iommu_map(&retrieve_arg, req->cb_id);
+	if (IS_ERR(table)) {
+		ret = PTR_ERR(table);
+		goto free_perms;
+	}
+
+	resp->hdr.msg_type = SMMU_PROXY_MAP_RESP;
+	resp->hdr.msg_size = sizeof(*resp);
+	resp->hdr.ret = ret;
+	resp->iova = sg_dma_address(table->sgl);
+	resp->mapping_len = sg_dma_len(table->sgl);
+
+	ret = gh_msgq_send(msgq_hdl, resp, resp->hdr.msg_size, 0);
+	if (ret < 0) {
+		pr_err("%s: failed to send response to mapping request rc: %d\n", __func__, ret);
+		iommu_unmap_and_relinquish(req->hdl);
+	} else {
+		pr_debug("%s: response to mapping request sent\n", __func__);
+	}
+
+free_perms:
+	kfree(retrieve_arg.perms);
+free_vmids:
+	kfree(retrieve_arg.vmids);
+free_resp:
+	kfree(resp);
+
+	return ret;
+}
+
+static void smmu_proxy_process_msg(void *buf, size_t size)
+{
+	struct smmu_proxy_msg_hdr *msg_hdr = buf;
+	struct smmu_proxy_resp_hdr *resp;
+	int ret = -EINVAL;
+
+	pr_err("%s: smmu-proxy message received\n", __func__);
+	if (size < sizeof(*msg_hdr) || msg_hdr->msg_size != size) {
+		pr_err("%s: message received is not of a proper size: 0x%lx, 0x:%lx\n",
+		       __func__, size, msg_hdr->msg_size);
+		goto handle_err;
+	}
+
+	switch (msg_hdr->msg_type) {
+	case SMMU_PROXY_MAP:
+		ret = process_map_request(buf, size);
+		break;
+	case SMMU_PROXY_UNMAP:
+		ret = process_unmap_request(buf, size);
+		break;
+	default:
+		pr_err("%s: received message of unknown type: %d\n", __func__,
+		       msg_hdr->msg_type);
+	}
+
+	if (!ret)
+		return;
+
+handle_err:
+	resp = kzalloc(sizeof(resp), GFP_KERNEL);
+	if (!resp) {
+		pr_err("%s: Failed to allocate memory for response\n", __func__);
+		return;
+	}
+
+	resp->msg_type = SMMU_PROXY_ERR_RESP;
+	resp->msg_size = sizeof(resp);
+	resp->ret = ret;
+
+	ret = gh_msgq_send(msgq_hdl, resp, resp->msg_size, 0);
+	if (ret < 0)
+		pr_err("%s: failed to send error response rc: %d\n", __func__, ret);
+	else
+		pr_debug("%s: response to mapping request sent\n", __func__);
+
+	kfree(resp);
+
+}
+
+static int receiver_msgq_handler(void *msgq_hdl)
+{
+	void *buf;
+	size_t size;
+	int ret;
+
+	buf = kzalloc(GH_MSGQ_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	while (!kthread_should_stop()) {
+		ret = gh_msgq_recv(msgq_hdl, buf, GH_MSGQ_MAX_MSG_SIZE_BYTES, &size, 0);
+		if (ret < 0) {
+			pr_err_ratelimited("%s failed to receive message rc: %d\n", __func__, ret);
+		} else {
+			smmu_proxy_process_msg(buf, size);
+		}
+	}
+
+	kfree(buf);
+
+	return 0;
+}
+
+static int smmu_proxy_ac_lock_toggle(int dma_buf_fd, bool lock)
+{
+	int ret = 0;
+	struct smmu_proxy_buffer_state *buf_state;
+	struct dma_buf *dmabuf;
+	u32 handle;
+
+	dmabuf = dma_buf_get(dma_buf_fd);
+	if (IS_ERR(dmabuf)) {
+		pr_err("%s: unable to get dma-buf from FD %d, rc: %d", __func__,
+		       dma_buf_fd, PTR_ERR(dmabuf));
+		return PTR_ERR(dmabuf);
+	}
+
+	ret = mem_buf_dma_buf_get_memparcel_hdl(dmabuf, &handle);
+	if (ret) {
+		pr_err("%s: Failed to get memparcel handle rc: %d\n", __func__, ret);
+		goto free_buf;
+	}
+
+	mutex_lock(&buffer_state_lock);
+	buf_state = xa_load(&buffer_state_arr, handle);
+	if (!buf_state) {
+		pr_err("%s: handle 0x%llx unknown to proxy driver!\n", __func__, handle);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (buf_state->locked == lock) {
+		pr_err("%s: handle 0x%llx already %s!\n", __func__, handle,
+		       lock ? "locked" : "unlocked");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	buf_state->locked = lock;
+out:
+	mutex_unlock(&buffer_state_lock);
+free_buf:
+	dma_buf_put(dmabuf);
+
+	return ret;
+}
+
+/*
+ * Iterate over all buffers mapped to context bank @context_bank_id, and zero
+ * out the buffers. If there is a single error for any buffer, we bail out with
+ * an error and disregard the rest of the buffers mapped to @context_bank_id.
+ */
+int smmu_proxy_clear_all_buffers(void __user *context_bank_id_array,
+				 __u32 num_cb_ids)
+{
+	unsigned long handle;
+	struct smmu_proxy_buffer_state *buf_state;
+	struct iosys_map vmap_struct = {0};
+	__u32 cb_ids[QTI_SMMU_PROXY_CB_IDS_LEN];
+	int i, ret = 0;
+	bool found_mapped_cb;
+
+	/* Checking this allows us to keep cb_id_arr fixed in length */
+	if (num_cb_ids > QTI_SMMU_PROXY_CB_IDS_LEN) {
+		pr_err("%s: Invalid number of CB IDs: %u\n", __func__, num_cb_ids);
+		return -EINVAL;
+	}
+
+	ret = copy_struct_from_user(&cb_ids, sizeof(cb_ids), context_bank_id_array,
+				    sizeof(cb_ids));
+	if (ret) {
+		pr_err("%s: Failed to get CB IDs from user space rc %d\n", __func__, ret);
+		return ret;
+	}
+
+	for (i = 0; i < num_cb_ids; i++) {
+		if (cb_ids[i] >= QTI_SMMU_PROXY_CB_IDS_LEN) {
+			pr_err("%s: Invalid CB ID of %u at pos %d\n", __func__, cb_ids[i], i);
+			return -EINVAL;
+		}
+	}
+
+	mutex_lock(&buffer_state_lock);
+	xa_for_each(&buffer_state_arr, handle, buf_state) {
+		found_mapped_cb = false;
+		for (i = 0; i < num_cb_ids; i++) {
+			if (buf_state->cb_info[cb_ids[i]].mapped) {
+				found_mapped_cb = true;
+				break;
+			}
+		}
+		if (!found_mapped_cb)
+			continue;
+
+		ret = dma_buf_vmap(buf_state->dmabuf, &vmap_struct);
+		if (ret) {
+			pr_err("%s: dma_buf_vmap() failed with %d\n", __func__, ret);
+			goto unlock;
+		}
+
+		/* Use DMA_TO_DEVICE since we are not reading anything */
+		ret = dma_buf_begin_cpu_access(buf_state->dmabuf, DMA_TO_DEVICE);
+		if (ret) {
+			pr_err("%s: dma_buf_begin_cpu_access() failed with %d\n", __func__, ret);
+			goto unmap;
+		}
+
+		memset(vmap_struct.vaddr, 0, buf_state->dmabuf->size);
+		ret = dma_buf_end_cpu_access(buf_state->dmabuf, DMA_TO_DEVICE);
+		if (ret)
+			pr_err("%s: dma_buf_end_cpu_access() failed with %d\n", __func__, ret);
+unmap:
+		dma_buf_vunmap(buf_state->dmabuf, &vmap_struct);
+		if (ret)
+			break;
+	}
+
+unlock:
+	mutex_unlock(&buffer_state_lock);
+	return ret;
+}
+
+static int smmu_proxy_get_dma_buf(struct smmu_proxy_get_dma_buf_ctl *get_dma_buf_ctl)
+{
+	struct smmu_proxy_buffer_state *buf_state;
+	int fd, ret = 0;
+
+	mutex_lock(&buffer_state_lock);
+	buf_state = xa_load(&buffer_state_arr, get_dma_buf_ctl->memparcel_hdl);
+	if (!buf_state) {
+		pr_err("%s: handle 0x%llx unknown to proxy driver!\n", __func__,
+		       get_dma_buf_ctl->memparcel_hdl);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	get_dma_buf(buf_state->dmabuf);
+	fd = dma_buf_fd(buf_state->dmabuf, O_RDWR | O_CLOEXEC);
+	if (fd < 0) {
+		ret = fd;
+		pr_err("%s: Failed to install FD for dma-buf rc: %d\n", __func__,
+		       ret);
+		dma_buf_put(buf_state->dmabuf);
+	} else {
+		get_dma_buf_ctl->dma_buf_fd = fd;
+	}
+out:
+	mutex_unlock(&buffer_state_lock);
+
+	return ret;
+}
+
+static long smmu_proxy_dev_ioctl(struct file *filp, unsigned int cmd,
+			      unsigned long arg)
+{
+	unsigned int dir = _IOC_DIR(cmd);
+	union smmu_proxy_ioctl_arg ioctl_arg;
+	int ret;
+
+	if (_IOC_SIZE(cmd) > sizeof(ioctl_arg))
+		return -EINVAL;
+
+	if (copy_from_user(&ioctl_arg, (void __user *)arg, _IOC_SIZE(cmd)))
+		return -EFAULT;
+
+	if (!(dir & _IOC_WRITE))
+		memset(&ioctl_arg, 0, sizeof(ioctl_arg));
+
+	switch (cmd) {
+	case QTI_SMMU_PROXY_AC_LOCK_BUFFER:
+	{
+		struct smmu_proxy_acl_ctl *acl_ctl =
+			&ioctl_arg.acl_ctl;
+
+		ret = smmu_proxy_ac_lock_toggle(acl_ctl->dma_buf_fd, true);
+		if (ret)
+			return ret;
+
+		break;
+	}
+	case QTI_SMMU_PROXY_AC_UNLOCK_BUFFER:
+	{
+		struct smmu_proxy_acl_ctl *acl_ctl =
+			&ioctl_arg.acl_ctl;
+
+		ret = smmu_proxy_ac_lock_toggle(acl_ctl->dma_buf_fd, false);
+		if (ret)
+			return ret;
+
+		break;
+	}
+	case QTI_SMMU_PROXY_WIPE_BUFFERS:
+	{
+		struct smmu_proxy_wipe_buf_ctl *wipe_buf_ctl =
+			&ioctl_arg.wipe_buf_ctl;
+
+		ret = smmu_proxy_clear_all_buffers((void *) wipe_buf_ctl->context_bank_id_array,
+						   wipe_buf_ctl->num_cb_ids);
+		break;
+	}
+	case QTI_SMMU_PROXY_GET_DMA_BUF:
+	{
+		ret = smmu_proxy_get_dma_buf(&ioctl_arg.get_dma_buf_ctl);
+		break;
+	}
+
+	default:
+		return -ENOTTY;
+	}
+
+	if (dir & _IOC_READ) {
+		if (copy_to_user((void __user *)arg, &ioctl_arg,
+				 _IOC_SIZE(cmd)))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static const struct file_operations smmu_proxy_dev_fops = {
+	.unlocked_ioctl = smmu_proxy_dev_ioctl,
+	.compat_ioctl = compat_ptr_ioctl,
+};
+
+static int receiver_probe_handler(struct device *dev)
+{
+	int ret = 0;
+
+	msgq_hdl = gh_msgq_register(GH_MSGQ_LABEL_SMMU_PROXY);
+	if (IS_ERR(msgq_hdl)) {
+		ret = PTR_ERR(msgq_hdl);
+		dev_err(dev, "Queue registration failed: %d!\n", PTR_ERR(msgq_hdl));
+		return ret;
+	}
+
+	receiver_msgq_handler_thread = kthread_run(receiver_msgq_handler, msgq_hdl,
+						   "smmu_proxy_msgq_handler");
+	if (IS_ERR(receiver_msgq_handler_thread)) {
+		ret = PTR_ERR(receiver_msgq_handler_thread);
+		dev_err(dev, "Failed to launch receiver_msgq_handler thread: %d\n",
+			PTR_ERR(receiver_msgq_handler_thread));
+		goto free_msgq;
+	}
+
+	ret = smmu_proxy_create_dev(&smmu_proxy_dev_fops);
+	if (ret) {
+		pr_err("Failed to create character device with error %d\n", ret);
+		goto free_kthread;
+	}
+
+	return 0;
+free_kthread:
+	kthread_stop(receiver_msgq_handler_thread);
+free_msgq:
+	gh_msgq_unregister(msgq_hdl);
+	return ret;
+}
+
+static int cb_probe_handler(struct device *dev)
+{
+	int ret;
+	unsigned int context_bank_id;
+
+	ret = of_property_read_u32(dev->of_node, "qti,cb-id", &context_bank_id);
+	if (ret) {
+		dev_err(dev, "Failed to read qti,cb-id property for device\n");
+		return -EINVAL;
+	}
+
+	if (context_bank_id >= QTI_SMMU_PROXY_CB_IDS_LEN) {
+		dev_err(dev, "Invalid CB ID: %u\n", context_bank_id);
+		return -EINVAL;
+	}
+
+	if (cb_devices[context_bank_id]) {
+		dev_err(dev, "Context bank %u is already populated\n", context_bank_id);
+		return -EINVAL;
+	}
+
+	ret = dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+	if (ret) {
+		dev_err(dev, "Failed to set segment size\n");
+		return ret;
+	}
+
+	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
+	if (ret) {
+		dev_err(dev, "Failed to set DMA-MASK\n");
+		return ret;
+	}
+
+	cb_devices[context_bank_id] = dev;
+
+	return 0;
+}
+
+static int smmu_proxy_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+
+	if (of_device_is_compatible(dev->of_node, CB_COMPAT_STR)) {
+		return cb_probe_handler(dev);
+	} else if (of_device_is_compatible(dev->of_node, RECEIVER_COMPAT_STR)) {
+		return  receiver_probe_handler(dev);
+	} else {
+		return -EINVAL;
+	}
+}
+
+static const struct of_device_id smmu_proxy_match_table[] = {
+	{.compatible = RECEIVER_COMPAT_STR},
+	{.compatible = CB_COMPAT_STR},
+	{},
+};
+
+static struct platform_driver smmu_proxy_driver = {
+	.probe = smmu_proxy_probe,
+	.driver = {
+		.name = "qti-smmu-proxy",
+		.of_match_table = smmu_proxy_match_table,
+	},
+};
+
+int __init init_smmu_proxy_driver(void)
+{
+	int ret;
+	struct csf_version csf_version;
+
+	ret = smmu_proxy_get_csf_version(&csf_version);
+	if (ret) {
+		pr_err("%s: Unable to get CSF version\n", __func__);
+		return ret;
+	}
+
+	if (csf_version.arch_ver == 2 && csf_version.max_ver == 0) {
+		pr_err("%s: CSF 2.5 not in use, not loading module\n", __func__);
+		return -EINVAL;
+	}
+
+	return platform_driver_register(&smmu_proxy_driver);
+}
+module_init(init_smmu_proxy_driver);
+
+MODULE_IMPORT_NS(DMA_BUF);
+MODULE_LICENSE("GPL v2");

+ 58 - 0
smmu-proxy/uapi/linux/qti-smmu-proxy.h

@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef __QTI_SMMU_PROXY_UAPI_H_
+#define __QTI_SMMU_PROXY_UAPI_H_
+
+#include <linux/types.h>
+
+#define QTI_SMMU_PROXY_CAMERA_CB 0
+#define QTI_SMMU_PROXY_DISPLAY_CB 1
+#define QTI_SMMU_PROXY_EVA_CB 2
+
+#define QTI_SMMU_PROXY_IOC_BASE 0x55
+
+struct csf_version {
+	__u32 arch_ver;
+	__u32 max_ver;
+	__u32 min_ver;
+	__u32 padding;
+};
+
+#define QTI_SMMU_PROXY_GET_VERSION_IOCTL	_IOR(QTI_SMMU_PROXY_IOC_BASE, 0, \
+						     struct csf_version)
+
+struct smmu_proxy_acl_ctl {
+	__u32 dma_buf_fd;
+	__u32 padding;
+};
+
+#define QTI_SMMU_PROXY_AC_LOCK_BUFFER	_IOW(QTI_SMMU_PROXY_IOC_BASE, 1, \
+					     struct smmu_proxy_acl_ctl)
+#define QTI_SMMU_PROXY_AC_UNLOCK_BUFFER	_IOW(QTI_SMMU_PROXY_IOC_BASE, 2, \
+					     struct smmu_proxy_acl_ctl)
+
+struct smmu_proxy_wipe_buf_ctl {
+	__u64 context_bank_id_array;
+	__u32 num_cb_ids;
+	__u32 padding;
+};
+
+#define QTI_SMMU_PROXY_WIPE_BUFFERS	_IOW(QTI_SMMU_PROXY_IOC_BASE, 3, \
+					     struct smmu_proxy_wipe_buf_ctl)
+
+struct smmu_proxy_get_dma_buf_ctl {
+	/*
+	 * memparcel_hdl only needs to be 32-bit for Gunyah, but a 64-bit value
+	 * is needed to remain forward compatible with FF-A .
+	 */
+	__u64 memparcel_hdl;
+	__u32 dma_buf_fd;
+	__u32 padding;
+};
+
+#define QTI_SMMU_PROXY_GET_DMA_BUF	_IOWR(QTI_SMMU_PROXY_IOC_BASE, 4, \
+					      struct smmu_proxy_get_dma_buf_ctl)
+
+#endif /* __QTI_SMMU_PROXY_UAPI_H_ */

+ 30 - 1
tz_log/tz_log.c

@@ -20,6 +20,7 @@
 #include <soc/qcom/qseecomi.h>
 #include <linux/qtee_shmbridge.h>
 #include <linux/proc_fs.h>
+#include <linux/version.h>
 
 /* QSEE_LOG_BUF_SIZE = 32K */
 #define QSEE_LOG_BUF_SIZE 0x8000
@@ -1304,7 +1305,13 @@ static ssize_t tzdbg_fs_read(struct file *file, char __user *buf,
 
 static int tzdbg_procfs_open(struct inode *inode, struct file *file)
 {
-	return single_open(file, NULL, pde_data(inode));
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(6,0,0))
+       return single_open(file, NULL, PDE_DATA(inode));
+#else
+       return single_open(file, NULL, pde_data(inode));
+#endif
+
 }
 
 static int tzdbg_procfs_release(struct inode *inode, struct file *file)
@@ -1460,6 +1467,20 @@ static void tzdbg_free_encrypted_log_buf(struct platform_device *pdev)
 			enc_qseelog_info.vaddr, enc_qseelog_info.paddr);
 }
 
+static bool is_hyp_dir(int tzdbg_stat_type)
+{
+	switch(tzdbg_stat_type)
+	{
+		case TZDBG_HYP_GENERAL:
+		case TZDBG_HYP_LOG:
+		case TZDBG_RM_LOG:
+			return true;
+		default:
+			return false;
+	}
+	return false;
+}
+
 static int  tzdbg_fs_init(struct platform_device *pdev)
 {
 	int rc = 0;
@@ -1475,6 +1496,14 @@ static int  tzdbg_fs_init(struct platform_device *pdev)
 
 	for (i = 0; i < TZDBG_STATS_MAX; i++) {
 		tzdbg.debug_tz[i] = i;
+		/*
+		 * If hypervisor is disabled, do not create
+		 * hyp_general, hyp_log and rm_log directories,
+		 * as accessing them would give segmentation fault
+		 */
+		if ((!tzdbg.is_hyplog_enabled) && (is_hyp_dir(i))) {
+			continue;
+		}
 		dent = proc_create_data(tzdbg.stat[i].name,
 				0444, dent_dir,
 				&tzdbg_fops, &tzdbg.debug_tz[i]);