浏览代码

Add 'qcom/opensource/mm-drivers/' from commit 'f37d625cda5ef9f033b3d34d9c1a64c52ebc51c2'

git-subtree-dir: qcom/opensource/mm-drivers
git-subtree-mainline: 880d4057199666efd4ca32c8a5696cea9387b129
git-subtree-split: f37d625cda5ef9f033b3d34d9c1a64c52ebc51c2
Change-Id:
repo: https://git.codelinaro.org/clo/la/platform/vendor/opensource/mm-drivers
tag: DISPLAY.LA.4.0.r2-07600-lanai.0
David Wronek 5 月之前
父节点
当前提交
f9b254670f
共有 45 个文件被更改,包括 9571 次插入0 次删除
  1. 36 0
      qcom/opensource/mm-drivers/Android.bp
  2. 16 0
      qcom/opensource/mm-drivers/Android.mk
  3. 22 0
      qcom/opensource/mm-drivers/BUILD.bazel
  4. 7 0
      qcom/opensource/mm-drivers/config/kalamammdrivers.conf
  5. 9 0
      qcom/opensource/mm-drivers/config/kalamammdriversconf.h
  6. 42 0
      qcom/opensource/mm-drivers/hw_fence/Android.mk
  7. 16 0
      qcom/opensource/mm-drivers/hw_fence/BUILD.bazel
  8. 28 0
      qcom/opensource/mm-drivers/hw_fence/Kbuild
  9. 4 0
      qcom/opensource/mm-drivers/hw_fence/Kconfig
  10. 14 0
      qcom/opensource/mm-drivers/hw_fence/Makefile
  11. 1 0
      qcom/opensource/mm-drivers/hw_fence/defconfig
  12. 46 0
      qcom/opensource/mm-drivers/hw_fence/define_hw_fence.bzl
  13. 209 0
      qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_debug.h
  14. 135 0
      qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_ipc.h
  15. 533 0
      qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_priv.h
  16. 167 0
      qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_utils.h
  17. 222 0
      qcom/opensource/mm-drivers/hw_fence/include/msm_hw_fence_synx_translation.h
  18. 1259 0
      qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_debug.c
  19. 428 0
      qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_ipc.c
  20. 1766 0
      qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_priv.c
  21. 1104 0
      qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_utils.c
  22. 629 0
      qcom/opensource/mm-drivers/hw_fence/src/hw_fence_ioctl.c
  23. 807 0
      qcom/opensource/mm-drivers/hw_fence/src/msm_hw_fence.c
  24. 335 0
      qcom/opensource/mm-drivers/hw_fence/src/msm_hw_fence_synx_translation.c
  25. 26 0
      qcom/opensource/mm-drivers/mm_driver_board.mk
  26. 17 0
      qcom/opensource/mm-drivers/mm_driver_product.mk
  27. 95 0
      qcom/opensource/mm-drivers/mm_drivers_kernel_headers.py
  28. 43 0
      qcom/opensource/mm-drivers/msm_ext_display/Android.mk
  29. 10 0
      qcom/opensource/mm-drivers/msm_ext_display/BUILD.bazel
  30. 12 0
      qcom/opensource/mm-drivers/msm_ext_display/Kbuild
  31. 4 0
      qcom/opensource/mm-drivers/msm_ext_display/Kconfig
  32. 15 0
      qcom/opensource/mm-drivers/msm_ext_display/Makefile
  33. 1 0
      qcom/opensource/mm-drivers/msm_ext_display/defconfig
  34. 31 0
      qcom/opensource/mm-drivers/msm_ext_display/define_msm_ext_display.bzl
  35. 702 0
      qcom/opensource/mm-drivers/msm_ext_display/src/msm_ext_display.c
  36. 42 0
      qcom/opensource/mm-drivers/sync_fence/Android.mk
  37. 16 0
      qcom/opensource/mm-drivers/sync_fence/BUILD.bazel
  38. 16 0
      qcom/opensource/mm-drivers/sync_fence/Kbuild
  39. 4 0
      qcom/opensource/mm-drivers/sync_fence/Kconfig
  40. 15 0
      qcom/opensource/mm-drivers/sync_fence/Makefile
  41. 1 0
      qcom/opensource/mm-drivers/sync_fence/defconfig
  42. 33 0
      qcom/opensource/mm-drivers/sync_fence/define_sync_fence.bzl
  43. 6 0
      qcom/opensource/mm-drivers/sync_fence/include/uapi/Kbuild
  44. 63 0
      qcom/opensource/mm-drivers/sync_fence/include/uapi/sync_fence/qcom_sync_file.h
  45. 584 0
      qcom/opensource/mm-drivers/sync_fence/src/qcom_sync_file.c

+ 36 - 0
qcom/opensource/mm-drivers/Android.bp

@@ -0,0 +1,36 @@
+headers_src = [
+    "sync_fence/include/uapi/*/**/*.h",
+]
+
+mm_drivers_headers_out = [
+    "sync_fence/qcom_sync_file.h",
+]
+
+mm_drivers_kernel_headers_verbose = "--verbose "
+genrule {
+    name: "qti_generate_mm_drivers_kernel_headers",
+    tools: [
+        "headers_install.sh",
+        "unifdef"
+    ],
+    tool_files: [
+        "mm_drivers_kernel_headers.py",
+    ],
+    srcs: headers_src,
+    cmd: "python3 $(location mm_drivers_kernel_headers.py) " +
+          mm_drivers_kernel_headers_verbose +
+         "--header_arch arm64 " +
+         "--gen_dir $(genDir) " +
+         "--mm_drivers_include_uapi $(locations sync_fence/include/uapi/*/**/*.h) " +
+         "--unifdef $(location unifdef) " +
+         "--headers_install $(location headers_install.sh)",
+    out: mm_drivers_headers_out,
+}
+
+cc_library_headers {
+    name: "qti_mm_drivers_kernel_headers",
+    generated_headers: ["qti_generate_mm_drivers_kernel_headers"],
+    export_generated_headers: ["qti_generate_mm_drivers_kernel_headers"],
+    vendor: true,
+    recovery_available: true
+}

+ 16 - 0
qcom/opensource/mm-drivers/Android.mk

@@ -0,0 +1,16 @@
+MM_DRIVER_PATH := $(call my-dir)
+
+MM_DRV_DLKM_ENABLE := true
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
+	ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false)
+		MM_DRV_DLKM_ENABLE := false
+	endif
+endif
+
+ifeq ($(MM_DRV_DLKM_ENABLE), true)
+	include $(MM_DRIVER_PATH)/msm_ext_display/Android.mk
+	ifneq ($(TARGET_BOARD_PLATFORM), taro)
+		include $(MM_DRIVER_PATH)/hw_fence/Android.mk
+		include $(MM_DRIVER_PATH)/sync_fence/Android.mk
+	endif
+endif

+ 22 - 0
qcom/opensource/mm-drivers/BUILD.bazel

@@ -0,0 +1,22 @@
+load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
+
+package(
+    default_visibility = [
+        "//visibility:public",
+    ],
+)
+
+ddk_headers(
+    name = "mm_drivers_configs",
+    hdrs = glob(["config/*.h"]),
+    includes = ["config"],
+)
+
+ddk_headers(
+    name = "mm_drivers_headers",
+    hdrs = [
+        ":mm_drivers_configs",
+        "//vendor/qcom/opensource/mm-drivers/hw_fence:hw_fence_headers",
+        "//vendor/qcom/opensource/mm-drivers/sync_fence:sync_fence_uapi_headers",
+    ],
+)

+ 7 - 0
qcom/opensource/mm-drivers/config/kalamammdrivers.conf

@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+# Copyright (c) 2020, The Linux Foundation. All rights reserved.
+
+export CONFIG_MSM_EXT_DISPLAY=y
+export CONFIG_QCOM_SPEC_SYNC=y
+export CONFIG_QTI_HW_FENCE=y

+ 9 - 0
qcom/opensource/mm-drivers/config/kalamammdriversconf.h

@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#define CONFIG_MSM_EXT_DISPLAY 1
+#define CONFIG_QCOM_SPEC_SYNC 1
+#define CONFIG_QTI_HW_FENCE 1

+ 42 - 0
qcom/opensource/mm-drivers/hw_fence/Android.mk

@@ -0,0 +1,42 @@
+LOCAL_PATH := $(call my-dir)
+LOCAL_MODULE_DDK_BUILD := true
+include $(CLEAR_VARS)
+
+# This makefile is only for DLKM
+ifneq ($(findstring vendor,$(LOCAL_PATH)),)
+
+ifneq ($(findstring opensource,$(LOCAL_PATH)),)
+	MSM_HW_FENCE_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/hw_fence
+endif # opensource
+
+DLKM_DIR := $(TOP)/device/qcom/common/dlkm
+
+LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+
+###########################################################
+# This is set once per LOCAL_PATH, not per (kernel) module
+KBUILD_OPTIONS := MSM_HW_FENCE_ROOT=$(MSM_HW_FENCE_BLD_DIR)
+KBUILD_OPTIONS += MODNAME=msm_hw_fence
+KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
+
+###########################################################
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES           := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+LOCAL_MODULE              := hw-fence-module-symvers
+LOCAL_MODULE_STEM         := Module.symvers
+LOCAL_MODULE_KBUILD_NAME  := Module.symvers
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+###########################################################
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES   := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+LOCAL_MODULE              := msm_hw_fence.ko
+LOCAL_MODULE_KBUILD_NAME  := msm_hw_fence.ko
+LOCAL_MODULE_TAGS         := optional
+LOCAL_MODULE_DEBUG_ENABLE := true
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+###########################################################
+endif # DLKM check

+ 16 - 0
qcom/opensource/mm-drivers/hw_fence/BUILD.bazel

@@ -0,0 +1,16 @@
+load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
+load(":define_hw_fence.bzl", "define_hw_fence")
+
+package(
+    default_visibility = [
+      "//visibility:public"
+    ],
+)
+
+ddk_headers(
+    name = "hw_fence_headers",
+    hdrs = glob(["include/*.h"]),
+    includes = ["include"]
+)
+
+define_hw_fence()

+ 28 - 0
qcom/opensource/mm-drivers/hw_fence/Kbuild

@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+KDIR := $(TOP)/kernel_platform/msm-kernel
+include $(MSM_HW_FENCE_ROOT)/config/kalamammdrivers.conf
+LINUXINCLUDE += -include $(MSM_HW_FENCE_ROOT)/config/kalamammdriversconf.h \
+		-I$(MSM_HW_FENCE_ROOT)hw_fence/include/
+
+ifdef CONFIG_QTI_HW_FENCE
+obj-m += msm_hw_fence.o
+
+msm_hw_fence-y := src/msm_hw_fence.o \
+		src/hw_fence_drv_priv.o \
+		src/hw_fence_drv_utils.o \
+		src/hw_fence_drv_debug.o \
+		src/hw_fence_drv_ipc.o
+
+ifneq ($(CONFIG_ARCH_KALAMA), y)
+LINUXINCLUDE += -I$(MSM_HW_FENCE_ROOT)/../synx-kernel/msm/synx/ \
+		-I$(MSM_HW_FENCE_ROOT)/../synx-kernel/include/uapi/synx/media/
+msm_hw_fence-y += src/msm_hw_fence_synx_translation.o
+endif
+
+msm_hw_fence-$(CONFIG_DEBUG_FS) += src/hw_fence_ioctl.o
+
+CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\"
+endif
+EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \
+		-Wformat-invalid-specifier -Wformat-zero-length -Wnonnull

+ 4 - 0
qcom/opensource/mm-drivers/hw_fence/Kconfig

@@ -0,0 +1,4 @@
+config QTI_HW_FENCE
+	bool "HW Fence"
+	help
+	  Enable the hw_fence module

+ 14 - 0
qcom/opensource/mm-drivers/hw_fence/Makefile

@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+KBUILD_OPTIONS += MSM_HW_FENCE_ROOT=$(KERNEL_SRC)/$(M)/../
+
+all: modules
+
+modules_install:
+	$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install
+
+%:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
+
+clean:
+	rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
+	rm -rf .tmp_versions

+ 1 - 0
qcom/opensource/mm-drivers/hw_fence/defconfig

@@ -0,0 +1 @@
+CONFIG_QTI_HW_FENCE=y

+ 46 - 0
qcom/opensource/mm-drivers/hw_fence/define_hw_fence.bzl

@@ -0,0 +1,46 @@
+load("//build/kernel/kleaf:kernel.bzl", "ddk_module", "ddk_submodule")
+load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
+load("//msm-kernel:target_variants.bzl", "get_all_variants")
+
+def _define_module(target, variant):
+    tv = "{}_{}".format(target, variant)
+    ddk_module(
+        name = "{}_msm_hw_fence".format(tv),
+        srcs = [
+            "src/hw_fence_drv_debug.c",
+            "src/hw_fence_drv_ipc.c",
+            "src/hw_fence_drv_priv.c",
+            "src/hw_fence_drv_utils.c",
+            "src/msm_hw_fence.c",
+            "src/msm_hw_fence_synx_translation.c",
+        ],
+        out = "msm_hw_fence.ko",
+        defconfig = "defconfig",
+        kconfig = "Kconfig",
+        conditional_srcs = {
+            "CONFIG_DEBUG_FS": {
+                True: ["src/hw_fence_ioctl.c"],
+            },
+        },
+        deps = [
+            "//msm-kernel:all_headers",
+            "//vendor/qcom/opensource/synx-kernel:synx_headers",
+            "//vendor/qcom/opensource/mm-drivers:mm_drivers_headers",
+        ],
+        kernel_build = "//msm-kernel:{}".format(tv),
+    )
+
+    copy_to_dist_dir(
+        name = "{}_msm_hw_fence_dist".format(tv),
+        data = [":{}_msm_hw_fence".format(tv)],
+        dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target),
+        flat = True,
+        wipe_dist_dir = False,
+        allow_duplicate_filenames = False,
+        mode_overrides = {"**/*": "644"},
+        log = "info",
+    )
+
+def define_hw_fence():
+    for (t, v) in get_all_variants():
+        _define_module(t, v)

+ 209 - 0
qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_debug.h

@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __HW_FENCE_DRV_DEBUG
+#define __HW_FENCE_DRV_DEBUG
+
+#include "hw_fence_drv_ipc.h"
+
+#define HW_FENCE_NAME_SIZE 64
+
+enum hw_fence_drv_prio {
+	HW_FENCE_HIGH = 0x000001,	/* High density debug messages (noisy) */
+	HW_FENCE_LOW = 0x000002,	/* Low density debug messages */
+	HW_FENCE_INFO = 0x000004,	/* Informational prints */
+	HW_FENCE_INIT = 0x00008,	/* Initialization logs */
+	HW_FENCE_QUEUE = 0x000010,	/* Queue logs */
+	HW_FENCE_LUT = 0x000020,	/* Look-up and algorithm logs */
+	HW_FENCE_IRQ = 0x000040,	/* Interrupt-related messages */
+	HW_FENCE_LOCK = 0x000080,	/* Lock-related messages */
+	HW_FENCE_PRINTK = 0x010000,
+};
+
+extern u32 msm_hw_fence_debug_level;
+
+#define dprintk(__level, __fmt, ...) \
+	do { \
+		if (msm_hw_fence_debug_level & __level) \
+			if (msm_hw_fence_debug_level & HW_FENCE_PRINTK) \
+				pr_err(__fmt, ##__VA_ARGS__); \
+	} while (0)
+
+
+#define HWFNC_ERR(fmt, ...) \
+	pr_err("[hwfence:%s:%d][err][%pS] "fmt, __func__, __LINE__, \
+	__builtin_return_address(0), ##__VA_ARGS__)
+
+#define HWFNC_DBG_H(fmt, ...) \
+	dprintk(HW_FENCE_HIGH, "[hwfence:%s:%d][dbgh]"fmt, __func__, __LINE__, ##__VA_ARGS__)
+
+#define HWFNC_DBG_L(fmt, ...) \
+	dprintk(HW_FENCE_LOW, "[hwfence:%s:%d][dbgl]"fmt, __func__, __LINE__, ##__VA_ARGS__)
+
+#define HWFNC_DBG_INFO(fmt, ...) \
+	dprintk(HW_FENCE_INFO, "[hwfence:%s:%d][dbgi]"fmt, __func__, __LINE__, ##__VA_ARGS__)
+
+#define HWFNC_DBG_INIT(fmt, ...) \
+	dprintk(HW_FENCE_INIT, "[hwfence:%s:%d][dbg]"fmt, __func__, __LINE__, ##__VA_ARGS__)
+
+#define HWFNC_DBG_Q(fmt, ...) \
+	dprintk(HW_FENCE_QUEUE, "[hwfence:%s:%d][dbgq]"fmt, __func__, __LINE__, ##__VA_ARGS__)
+
+#define HWFNC_DBG_LUT(fmt, ...) \
+	dprintk(HW_FENCE_LUT, "[hwfence:%s:%d][dbglut]"fmt, __func__, __LINE__, ##__VA_ARGS__)
+
+#define HWFNC_DBG_IRQ(fmt, ...) \
+	dprintk(HW_FENCE_IRQ, "[hwfence:%s:%d][dbgirq]"fmt, __func__, __LINE__, ##__VA_ARGS__)
+
+#define HWFNC_DBG_LOCK(fmt, ...) \
+	dprintk(HW_FENCE_LOCK, "[hwfence:%s:%d][dbglock]"fmt, __func__, __LINE__, ##__VA_ARGS__)
+
+#define HWFNC_DBG_DUMP(prio, fmt, ...) \
+	dprintk(prio, "[hwfence:%s:%d][dbgd]"fmt, __func__, __LINE__, ##__VA_ARGS__)
+
+#define HWFNC_WARN(fmt, ...) \
+	pr_warn("[hwfence:%s:%d][warn][%pS] "fmt, __func__, __LINE__, \
+	__builtin_return_address(0), ##__VA_ARGS__)
+
+int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data);
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+
+int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, int client_id);
+
+void hw_fence_debug_dump_queues(enum hw_fence_drv_prio prio,
+	struct msm_hw_fence_client *hw_fence_client);
+void hw_fence_debug_dump_fence(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, u64 hash,
+	u32 count);
+void hw_fence_debug_dump_table(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data);
+void hw_fence_debug_dump_events(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data);
+
+extern const struct file_operations hw_sync_debugfs_fops;
+
+struct hw_fence_out_clients_map {
+	int ipc_client_id_vid; /* ipc client virtual id for the hw fence client */
+	int ipc_client_id_pid; /* ipc client physical id for the hw fence client */
+	int ipc_signal_id; /* ipc signal id for the hw fence client */
+};
+
+/* These signals are the ones that the actual clients should be triggering, hw-fence driver
+ * does not need to have knowledge of these signals. Adding them here for debugging purposes.
+ * Only fence controller and the cliens know these id's, since these
+ * are to trigger the ipcc from the 'client hw-core' to the 'hw-fence controller'
+ * The index of this struct must match the enum hw_fence_client_id
+ */
+static const struct hw_fence_out_clients_map
+			dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_ID_VAL6 + 1] = {
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 0},  /* CTRL_LOOPBACK */
+	{HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0},  /* CTX0 */
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 2},  /* CTL0 */
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 4},  /* CTL1 */
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 6},  /* CTL2 */
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 8},  /* CTL3 */
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 10}, /* CTL4 */
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 12}, /* CTL5 */
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21}, /* VAL0 */
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22}, /* VAL1 */
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23}, /* VAL2 */
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24}, /* VAL3 */
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25}, /* VAL4 */
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26}, /* VAL5 */
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27}, /* VAL6 */
+};
+
+/**
+ * struct hw_dma_fence - fences created by hw-fence for debugging.
+ * @base: base dma-fence structure, this must remain at beginning of the struct.
+ * @name: name of each fence.
+ * @client_handle: handle for the client owner of this fence, this is returned by the hw-fence
+ *                 driver after a successful registration of the client and used by this fence
+ *                 during release.
+ */
+struct hw_dma_fence {
+	struct dma_fence base;
+	char name[HW_FENCE_NAME_SIZE];
+	void *client_handle;
+};
+
+static inline struct hw_dma_fence *to_hw_dma_fence(struct dma_fence *fence)
+{
+	return container_of(fence, struct hw_dma_fence, base);
+}
+
+static inline void _cleanup_fences(int i, struct dma_fence **fences, spinlock_t **fences_lock)
+{
+	struct hw_dma_fence *dma_fence;
+	int fence_idx;
+
+	for (fence_idx = i; fence_idx >= 0 ; fence_idx--) {
+		kfree(fences_lock[fence_idx]);
+
+		dma_fence = to_hw_dma_fence(fences[fence_idx]);
+		kfree(dma_fence);
+	}
+
+	kfree(fences_lock);
+	kfree(fences);
+}
+
+static const char *hw_fence_dbg_get_driver_name(struct dma_fence *fence)
+{
+	struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence);
+
+	return hw_dma_fence->name;
+}
+
+static const char *hw_fence_dbg_get_timeline_name(struct dma_fence *fence)
+{
+	struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence);
+
+	return hw_dma_fence->name;
+}
+
+static bool hw_fence_dbg_enable_signaling(struct dma_fence *fence)
+{
+	return true;
+}
+
+static void _hw_fence_release(struct hw_dma_fence *hw_dma_fence)
+{
+	if (IS_ERR_OR_NULL(hw_dma_fence->client_handle)) {
+		HWFNC_ERR("invalid hwfence data, won't release hw_fence!\n");
+		return;
+	}
+
+	/* release hw-fence */
+	if (msm_hw_fence_destroy(hw_dma_fence->client_handle, &hw_dma_fence->base))
+		HWFNC_ERR("failed to release hw_fence!\n");
+}
+
+static void hw_fence_dbg_release(struct dma_fence *fence)
+{
+	struct hw_dma_fence *hw_dma_fence;
+
+	if (!fence)
+		return;
+
+	HWFNC_DBG_H("release backing fence %pK\n", fence);
+	hw_dma_fence = to_hw_dma_fence(fence);
+
+	if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags))
+		_hw_fence_release(hw_dma_fence);
+
+	kfree(fence->lock);
+	kfree(hw_dma_fence);
+}
+
+static struct dma_fence_ops hw_fence_dbg_ops = {
+	.get_driver_name = hw_fence_dbg_get_driver_name,
+	.get_timeline_name = hw_fence_dbg_get_timeline_name,
+	.enable_signaling = hw_fence_dbg_enable_signaling,
+	.wait = dma_fence_default_wait,
+	.release = hw_fence_dbg_release,
+};
+
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* __HW_FENCE_DRV_DEBUG */

+ 135 - 0
qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_ipc.h

@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __HW_FENCE_DRV_IPC_H
+#define __HW_FENCE_DRV_IPC_H
+
+/* ipc clients virtual client-id */
+#define HW_FENCE_IPC_CLIENT_ID_APPS_VID 8
+#define HW_FENCE_IPC_CLIENT_ID_GPU_VID 9
+#define HW_FENCE_IPC_CLIENT_ID_IPE_VID 11
+#define HW_FENCE_IPC_CLIENT_ID_VPU_VID 12
+#define HW_FENCE_IPC_CLIENT_ID_DPU_VID 25
+#define HW_FENCE_IPC_CLIENT_ID_IFE0_VID 128
+#define HW_FENCE_IPC_CLIENT_ID_IFE1_VID 129
+#define HW_FENCE_IPC_CLIENT_ID_IFE2_VID 130
+#define HW_FENCE_IPC_CLIENT_ID_IFE3_VID 131
+#define HW_FENCE_IPC_CLIENT_ID_IFE4_VID 132
+#define HW_FENCE_IPC_CLIENT_ID_IFE5_VID 133
+#define HW_FENCE_IPC_CLIENT_ID_IFE6_VID 134
+#define HW_FENCE_IPC_CLIENT_ID_IFE7_VID 135
+
+/* ipc clients physical client-id */
+#define HW_FENCE_IPC_CLIENT_ID_APPS_PID 3
+#define HW_FENCE_IPC_CLIENT_ID_GPU_PID 4
+#define HW_FENCE_IPC_CLIENT_ID_IPE_PID 5
+#define HW_FENCE_IPC_CLIENT_ID_VPU_PID 8
+#define HW_FENCE_IPC_CLIENT_ID_DPU_PID 9
+#define HW_FENCE_IPC_CLIENT_ID_IFE0_PID 11
+#define HW_FENCE_IPC_CLIENT_ID_IFE1_PID 12
+#define HW_FENCE_IPC_CLIENT_ID_IFE2_PID 13
+#define HW_FENCE_IPC_CLIENT_ID_IFE3_PID 14
+#define HW_FENCE_IPC_CLIENT_ID_IFE4_PID 15
+#define HW_FENCE_IPC_CLIENT_ID_IFE5_PID 16
+#define HW_FENCE_IPC_CLIENT_ID_IFE6_PID 17
+#define HW_FENCE_IPC_CLIENT_ID_IFE7_PID 18
+
+#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KALAMA 2
+#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_PINEAPPLE 2
+#define HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE 4
+
+#define HW_FENCE_IPCC_HW_REV_170 0x00010700  /* Kalama */
+#define HW_FENCE_IPCC_HW_REV_203 0x00020003  /* Pineapple */
+
+#define IPC_PROTOCOLp_CLIENTc_VERSION(base, p, c) (base + (0x40000*p) + (0x1000*c))
+#define IPC_PROTOCOLp_CLIENTc_CONFIG(base, p, c) (base + 0x8 + (0x40000*p) + (0x1000*c))
+#define IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(base, p, c) \
+	(base + 0x14 + (0x40000*p) + (0x1000*c))
+#define IPC_PROTOCOLp_CLIENTc_SEND(base, p, c) ((base + 0xc) + (0x40000*p) + (0x1000*c))
+
+/**
+ * hw_fence_ipcc_trigger_signal() - Trigger ipc signal for the requested client/signal pair.
+ * @drv_data: driver data.
+ * @tx_client_id: ipc client id that sends the ipc signal.
+ * @rx_client_id: ipc client id that receives the ipc signal.
+ * @signal_id: signal id to send.
+ *
+ * This API triggers the ipc 'signal_id' from the 'tx_client_id' to the 'rx_client_id'
+ */
+void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data,
+	u32 tx_client_id, u32 rx_client_id, u32 signal_id);
+
+/**
+ * hw_fence_ipcc_enable_signaling() - Enable ipcc signaling for hw-fence driver.
+ * @drv_data: driver data.
+ *
+ * Return: 0 on success or negative errno (-EINVAL)
+ */
+int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data);
+
+/**
+ * hw_fence_ipcc_enable_dpu_signaling() - Enable ipcc signaling for dpu client.
+ * @drv_data: driver data.
+ *
+ * Return: 0 on success or negative errno (-EINVAL)
+ */
+int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data);
+
+/**
+ * hw_fence_ipcc_get_client_virt_id() - Returns the ipc client virtual id that corresponds to the
+ *		hw fence driver client.
+ * @drv_data: driver data.
+ * @client_id: hw fence driver client id.
+ *
+ * The ipc client id returned by this API is used by the hw fence driver when signaling the fence.
+ *
+ * Return: client_id on success or negative errno (-EINVAL)
+ */
+int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id);
+
+/**
+ * hw_fence_ipcc_get_client_phys_id() - Returns the ipc client physical id that corresponds to the
+ *		hw fence driver client.
+ * @drv_data: driver data.
+ * @client_id: hw fence driver client id.
+ *
+ * The ipc client id returned by this API is used by the hw fence driver when signaling the fence.
+ *
+ * Return: client_id on success or negative errno (-EINVAL)
+ */
+int hw_fence_ipcc_get_client_phys_id(struct hw_fence_driver_data *drv_data, u32 client_id);
+
+/**
+ * hw_fence_ipcc_get_signal_id() - Returns the ipc signal id that corresponds to the hw fence
+ *		driver client.
+ * @drv_data: driver data.
+ * @client_id: hw fence driver client id.
+ *
+ * The ipc signal id returned by this API is used by the hw fence driver when signaling the fence.
+ *
+ * Return: client_id on success or negative errno (-EINVAL)
+ */
+int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id);
+
+/**
+ * hw_fence_ipcc_needs_rxq_update() - Returns bool to indicate if client uses rx-queue.
+ * @drv_data: driver data.
+ * @client_id: hw fence driver client id.
+ *
+ * Return: true if client needs to update rxq, false otherwise
+ */
+bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id);
+
+/**
+ * hw_fence_ipcc_needs_ipc_irq() - Returns bool to indicate if client needs ipc interrupt for
+ *		already signaled fences
+ * @drv_data: driver data.
+ * @client_id: hw fence driver client id.
+ *
+ * Return: true if client needs ipc interrupt for signaled fences, false otherwise
+ */
+bool hw_fence_ipcc_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id);
+
+#endif /* __HW_FENCE_DRV_IPC_H */

+ 533 - 0
qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_priv.h

@@ -0,0 +1,533 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __HW_FENCE_DRV_INTERNAL_H
+#define __HW_FENCE_DRV_INTERNAL_H
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/soc/qcom/msm_hw_fence.h>
+#include <linux/dma-fence-array.h>
+#include <linux/slab.h>
+
+/* max u64 to indicate invalid fence */
+#define HW_FENCE_INVALID_PARENT_FENCE (~0ULL)
+
+/* hash algorithm constants */
+#define HW_FENCE_HASH_A_MULT	4969 /* a multiplier for Hash algorithm */
+#define HW_FENCE_HASH_C_MULT	907  /* c multiplier for Hash algorithm */
+
+/* number of queues per type (i.e. ctrl or client queues) */
+#define HW_FENCE_CTRL_QUEUES	2 /* Rx and Tx Queues */
+#define HW_FENCE_CLIENT_QUEUES	2 /* Rx and Tx Queues */
+
+/* hfi headers calculation */
+#define HW_FENCE_HFI_TABLE_HEADER_SIZE (sizeof(struct msm_hw_fence_hfi_queue_table_header))
+#define HW_FENCE_HFI_QUEUE_HEADER_SIZE (sizeof(struct msm_hw_fence_hfi_queue_header))
+
+#define HW_FENCE_HFI_CTRL_HEADERS_SIZE (HW_FENCE_HFI_TABLE_HEADER_SIZE + \
+			(HW_FENCE_HFI_QUEUE_HEADER_SIZE * HW_FENCE_CTRL_QUEUES))
+
+#define HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num) (HW_FENCE_HFI_TABLE_HEADER_SIZE + \
+			(HW_FENCE_HFI_QUEUE_HEADER_SIZE * queues_num))
+
+/*
+ * Max Payload size is the bigest size of the message that we can have in the CTRL queue
+ * in this case the max message is calculated like following, using 32-bits elements:
+ * 1 header + 1 msg-type + 1 client_id + 2 hash + 1 error
+ */
+#define HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE ((1 + 1 + 1 + 2 + 1) * sizeof(u32))
+
+#define HW_FENCE_CTRL_QUEUE_PAYLOAD HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE
+#define HW_FENCE_CLIENT_QUEUE_PAYLOAD (sizeof(struct msm_hw_fence_queue_payload))
+
+/* Locks area for all clients with RxQ */
+#define HW_FENCE_MEM_LOCKS_SIZE(rxq_clients_num) (sizeof(u64) * rxq_clients_num)
+
+#define HW_FENCE_TX_QUEUE 1
+#define HW_FENCE_RX_QUEUE 2
+
+/* ClientID for the internal join fence, this is used by the framework when creating a join-fence */
+#define HW_FENCE_JOIN_FENCE_CLIENT_ID (~(u32)0)
+
+/**
+ * msm hw fence flags:
+ * MSM_HW_FENCE_FLAG_SIGNAL - Flag set when the hw-fence is signaled
+ */
+#define MSM_HW_FENCE_FLAG_SIGNAL	BIT(0)
+
+/**
+ * MSM_HW_FENCE_MAX_JOIN_PARENTS:
+ * Maximum number of parents that a fence can have for a join-fence
+ */
+#define MSM_HW_FENCE_MAX_JOIN_PARENTS	3
+
+/**
+ * HW_FENCE_PAYLOAD_REV:
+ * Payload version with major and minor version information
+ */
+#define HW_FENCE_PAYLOAD_REV(major, minor) (major << 8 | (minor & 0xFF))
+
+/**
+ * HW_FENCE_EVENT_MAX_DATA:
+ * Maximum data that can be added to the debug event
+ */
+#define HW_FENCE_EVENT_MAX_DATA 12
+
+enum hw_fence_lookup_ops {
+	HW_FENCE_LOOKUP_OP_CREATE = 0x1,
+	HW_FENCE_LOOKUP_OP_DESTROY,
+	HW_FENCE_LOOKUP_OP_CREATE_JOIN,
+	HW_FENCE_LOOKUP_OP_FIND_FENCE
+};
+
+/**
+ * enum hw_fence_client_data_id - Enum with the clients having client_data, an optional
+ *                                parameter passed from the waiting client and returned
+ *                                to it upon fence signaling. Only the first HW Fence
+ *                                Client for non-VAL clients (e.g. GFX, IPE, VPU) have
+ *                                client_data.
+ * @HW_FENCE_CLIENT_DATA_ID_CTX0: GFX Client 0.
+ * @HW_FENCE_CLIENT_DATA_ID_IPE: IPE Client 0.
+ * @HW_FENCE_CLIENT_DATA_ID_VPU: VPU Client 0.
+ * @HW_FENCE_CLIENT_DATA_ID_VAL0: Debug validation client 0.
+ * @HW_FENCE_CLIENT_DATA_ID_VAL1: Debug validation client 1.
+ * @HW_FENCE_MAX_CLIENTS_WITH_DATA: Max number of clients with data, also indicates an
+ *                                  invalid hw_fence_client_data_id
+ */
+enum hw_fence_client_data_id {
+	HW_FENCE_CLIENT_DATA_ID_CTX0,
+	HW_FENCE_CLIENT_DATA_ID_IPE,
+	HW_FENCE_CLIENT_DATA_ID_VPU,
+	HW_FENCE_CLIENT_DATA_ID_VAL0,
+	HW_FENCE_CLIENT_DATA_ID_VAL1,
+	HW_FENCE_MAX_CLIENTS_WITH_DATA,
+};
+
+/**
+ * struct msm_hw_fence_queue - Structure holding the data of the hw fence queues.
+ * @va_queue: pointer to the virtual address of the queue elements
+ * @q_size_bytes: size of the queue
+ * @va_header: pointer to the hfi header virtual address
+ * @pa_queue: physical address of the queue
+ * @rd_wr_idx_start: start read and write indexes for client queue (zero by default)
+ * @rd_wr_idx_factor: factor to multiply custom index to get index in dwords (one by default)
+ * @skip_wr_idx: bool to indicate if update to write_index is skipped within hw fence driver and
+ *               hfi_header->tx_wm is updated instead
+ */
+struct msm_hw_fence_queue {
+	void *va_queue;
+	u32 q_size_bytes;
+	void *va_header;
+	phys_addr_t pa_queue;
+	u32 rd_wr_idx_start;
+	u32 rd_wr_idx_factor;
+	bool skip_wr_idx;
+};
+
+/**
+ * enum payload_type - Enum with the queue payload types.
+ * HW_FENCE_PAYLOAD_TYPE_1: client queue payload
+ * HW_FENCE_PAYLOAD_TYPE_2: ctrl queue payload for fence error; client_data stores client_id
+ */
+enum payload_type {
+	HW_FENCE_PAYLOAD_TYPE_1 = 1,
+	HW_FENCE_PAYLOAD_TYPE_2
+};
+
+/**
+ * struct msm_hw_fence_client - Structure holding the per-Client allocated resources.
+ * @client_id: internal client_id used within HW fence driver; index into the clients struct
+ * @client_id_ext: external client_id, equal to client_id except for clients with configurable
+ *                 number of sub-clients (e.g. ife clients)
+ * @mem_descriptor: hfi header memory descriptor
+ * @queues: queues descriptor
+ * @queues_num: number of client queues
+ * @fence_error_cb: function called for waiting clients that need HLOS notification of fence error
+ * @fence_error_cb_userdata: opaque pointer registered with fence error callback and passed to
+ *                           client during invocation of callback function
+ * @error_cb_lock: lock to synchronize access to fence error cb and fence error cb data
+ * @ipc_signal_id: id of the signal to be triggered for this client
+ * @ipc_client_vid: virtual id of the ipc client for this hw fence driver client
+ * @ipc_client_pid: physical id of the ipc client for this hw fence driver client
+ * @update_rxq: bool to indicate if client uses rx-queue
+ * @send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences
+ * @wait_queue: wait queue for the validation clients
+ * @val_signal: doorbell flag to signal the validation clients in the wait queue
+ */
+struct msm_hw_fence_client {
+	enum hw_fence_client_id client_id;
+	enum hw_fence_client_id client_id_ext;
+	struct msm_hw_fence_mem_addr mem_descriptor;
+	struct msm_hw_fence_queue queues[HW_FENCE_CLIENT_QUEUES];
+	int queues_num;
+	msm_hw_fence_error_cb_t fence_error_cb;
+	void *fence_error_cb_userdata;
+	struct mutex error_cb_lock;
+	int ipc_signal_id;
+	int ipc_client_vid;
+	int ipc_client_pid;
+	bool update_rxq;
+	bool send_ipc;
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+	wait_queue_head_t wait_queue;
+	atomic_t val_signal;
+#endif /* CONFIG_DEBUG_FS */
+};
+
+/**
+ * struct msm_hw_fence_mem_data - Structure holding internal memory attributes
+ *
+ * @attrs: attributes for the memory allocation
+ */
+struct msm_hw_fence_mem_data {
+	unsigned long attrs;
+};
+
+/**
+ * struct msm_hw_fence_dbg_data - Structure holding debugfs data
+ *
+ * @root: debugfs root
+ * @entry_rd: flag to indicate if debugfs dumps a single line or table
+ * @context_rd: debugfs setting to indicate which context id to dump
+ * @seqno_rd: debugfs setting to indicate which seqno to dump
+ * @hw_fence_sim_release_delay: delay in micro seconds for the debugfs node that simulates the
+ *                              hw-fences behavior, to release the hw-fences
+ * @create_hw_fences: boolean to continuosly create hw-fences within debugfs
+ * @clients_list: list of debug clients registered
+ * @clients_list_lock: lock to synchronize access to the clients list
+ * @lock_wake_cnt: number of times that driver triggers wake-up ipcc to unlock inter-vm try-lock
+ */
+struct msm_hw_fence_dbg_data {
+	struct dentry *root;
+
+	bool entry_rd;
+	u64 context_rd;
+	u64 seqno_rd;
+
+	u32 hw_fence_sim_release_delay;
+	bool create_hw_fences;
+
+	struct list_head clients_list;
+	struct mutex clients_list_lock;
+
+	u64 lock_wake_cnt;
+};
+
+/**
+ * struct hw_fence_client_type_desc - Structure holding client type properties, including static
+ *                                    properties and client queue properties read from device-tree.
+ *
+ * @name: name of client type, used to parse properties from device-tree
+ * @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g.
+ *           HW_FENCE_CLIENT_ID_CTL0 for DPU clients
+ * @max_clients_num: maximum number of clients of given client type
+ * @clients_num: number of clients of given client type
+ * @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or
+ *              two (for both Tx and Rx Queues)
+ * @queue_entries: number of entries per client queue of given client type
+ * @start_padding: size of padding between queue table header and first queue header in bytes
+ * @end_padding: size of padding between queue header(s) and first queue payload in bytes
+ * @mem_size: size of memory allocated for client queue(s) per client in bytes
+ * @txq_idx_start: start read and write indexes for client tx queue (zero by default)
+ * @txq_idx_factor: factor to multiply custom TxQ idx to get index in dwords (one by default)
+ * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence
+ *                   driver and hfi_header->tx_wm is updated instead
+ */
+struct hw_fence_client_type_desc {
+	char *name;
+	enum hw_fence_client_id init_id;
+	u32 max_clients_num;
+	u32 clients_num;
+	u32 queues_num;
+	u32 queue_entries;
+	u32 start_padding;
+	u32 end_padding;
+	u32 mem_size;
+	u32 txq_idx_start;
+	u32 txq_idx_factor;
+	bool skip_txq_wr_idx;
+};
+
+/**
+ * struct hw_fence_client_queue_desc - Structure holding client queue properties for a client.
+ *
+ * @type: pointer to client queue properties of client type
+ * @start_offset: start offset of client queue memory region, from beginning of carved-out memory
+ *                allocation for hw fence driver
+ */
+struct hw_fence_client_queue_desc {
+	struct hw_fence_client_type_desc *type;
+	u32 start_offset;
+};
+
+/**
+ * struct hw_fence_driver_data - Structure holding internal hw-fence driver data
+ *
+ * @dev: device driver pointer
+ * @resources_ready: value set by driver at end of probe, once all resources are ready
+ * @hw_fence_table_entries: total number of hw-fences in the global table
+ * @hw_fence_mem_fences_table_size: hw-fences global table total size
+ * @hw_fence_queue_entries: total number of entries that can be available in the queue
+ * @hw_fence_ctrl_queue_size: size of the ctrl queue for the payload
+ * @hw_fence_mem_ctrl_queues_size: total size of ctrl queues, including: header + rxq + txq
+ * @hw_fence_client_queue_size: descriptors of client queue properties for each hw fence client
+ * @hw_fence_client_types: descriptors of properties for each hw fence client type
+ * @rxq_clients_num: number of supported hw fence clients with rxq (configured based on device-tree)
+ * @clients_num: number of supported hw fence clients (configured based on device-tree)
+ * @hw_fences_tbl: pointer to the hw-fences table
+ * @hw_fences_tbl_cnt: number of elements in the hw-fence table
+ * @events: start address of hw fence debug events
+ * @total_events: total number of hw fence debug events supported
+ * @client_lock_tbl: pointer to the per-client locks table
+ * @client_lock_tbl_cnt: number of elements in the locks table
+ * @hw_fences_mem_desc: memory descriptor for the hw-fence table
+ * @clients_locks_mem_desc: memory descriptor for the locks table
+ * @ctrl_queue_mem_desc: memory descriptor for the ctrl queues
+ * @ctrl_queues: pointer to the ctrl queues
+ * @io_mem_base: pointer to the carved-out io memory
+ * @res: resources for the carved out memory
+ * @size: size of the carved-out memory
+ * @label: label for the carved-out memory (this is used by SVM to find the memory)
+ * @peer_name: peer name for this carved-out memory
+ * @rm_nb: hyp resource manager notifier
+ * @memparcel: memparcel for the allocated memory
+ * @used_mem_size: total memory size of global table, lock region, and ctrl and client queues
+ * @db_label: doorbell label
+ * @rx_dbl: handle to the Rx doorbell
+ * @debugfs_data: debugfs info
+ * @ipcc_reg_base: base for ipcc regs mapping
+ * @ipcc_io_mem: base for the ipcc io mem map
+ * @ipcc_size: size of the ipcc io mem mapping
+ * @protocol_id: ipcc protocol id used by this driver
+ * @ipcc_client_vid: ipcc client virtual-id for this driver
+ * @ipcc_client_pid: ipcc client physical-id for this driver
+ * @ipc_clients_table: table with the ipcc mapping for each client of this driver
+ * @qtime_reg_base: qtimer register base address
+ * @qtime_io_mem: qtimer io mem map
+ * @qtime_size: qtimer io mem map size
+ * @client_id_mask: bitmask for tracking registered client_ids
+ * @clients_register_lock: lock to synchronize clients registration and deregistration
+ * @clients: table with the handles of the registered clients; size is equal to clients_num
+ * @vm_ready: flag to indicate if vm has been initialized
+ * @ipcc_dpu_initialized: flag to indicate if dpu hw is initialized
+ */
+struct hw_fence_driver_data {
+
+	struct device *dev;
+	bool resources_ready;
+
+	/* Table & Queues info */
+	u32 hw_fence_table_entries;
+	u32 hw_fence_mem_fences_table_size;
+	u32 hw_fence_queue_entries;
+	/* ctrl queues */
+	u32 hw_fence_ctrl_queue_size;
+	u32 hw_fence_mem_ctrl_queues_size;
+	/* client queues */
+	struct hw_fence_client_queue_desc *hw_fence_client_queue_size;
+	struct hw_fence_client_type_desc *hw_fence_client_types;
+	u32 rxq_clients_num;
+	u32 clients_num;
+
+	/* HW Fences Table VA */
+	struct msm_hw_fence *hw_fences_tbl;
+	u32 hw_fences_tbl_cnt;
+
+	/* events */
+	struct msm_hw_fence_event *events;
+	u32 total_events;
+
+	/* Table with a Per-Client Lock */
+	u64 *client_lock_tbl;
+	u32 client_lock_tbl_cnt;
+
+	/* Memory Descriptors */
+	struct msm_hw_fence_mem_addr hw_fences_mem_desc;
+	struct msm_hw_fence_mem_addr clients_locks_mem_desc;
+	struct msm_hw_fence_mem_addr ctrl_queue_mem_desc;
+	struct msm_hw_fence_queue ctrl_queues[HW_FENCE_CTRL_QUEUES];
+
+	/* carved out memory */
+	void __iomem *io_mem_base;
+	struct resource res;
+	size_t size;
+	u32 label;
+	u32 peer_name;
+	struct notifier_block rm_nb;
+	u32 memparcel;
+	u32 used_mem_size;
+
+	/* doorbell */
+	u32 db_label;
+
+	/* VM virq */
+	void *rx_dbl;
+
+	/* debugfs */
+	struct msm_hw_fence_dbg_data debugfs_data;
+
+	/* ipcc regs */
+	phys_addr_t ipcc_reg_base;
+	void __iomem *ipcc_io_mem;
+	uint32_t ipcc_size;
+	u32 protocol_id;
+	u32 ipcc_client_vid;
+	u32 ipcc_client_pid;
+
+	/* table with mapping of ipc client for each hw-fence client */
+	struct hw_fence_client_ipc_map *ipc_clients_table;
+
+	/* qtime reg */
+	phys_addr_t qtime_reg_base;
+	void __iomem *qtime_io_mem;
+	uint32_t qtime_size;
+
+	/* synchronize client_ids registration and deregistration */
+	struct mutex clients_register_lock;
+
+	/* table with registered client handles */
+	struct msm_hw_fence_client **clients;
+
+	bool vm_ready;
+	/* state variables */
+	bool ipcc_dpu_initialized;
+};
+
+/**
+ * struct msm_hw_fence_queue_payload - hardware fence clients queues payload.
+ * @size: size of queue payload
+ * @type: type of queue payload
+ * @version: version of queue payload. High eight bits are for major and lower eight
+ *           bits are for minor version
+ * @ctxt_id: context id of the dma fence
+ * @seqno: sequence number of the dma fence
+ * @hash: fence hash
+ * @flags: see MSM_HW_FENCE_FLAG_* flags descriptions
+ * @client_data: data passed from and returned to waiting client upon fence signaling
+ * @error: error code for this fence, fence controller receives this
+ *		  error from the signaling client through the tx queue and
+ *		  propagates the error to the waiting client through rx queue
+ * @timestamp_lo: low 32-bits of qtime of when the payload is written into the queue
+ * @timestamp_hi: high 32-bits of qtime of when the payload is written into the queue
+ */
+struct msm_hw_fence_queue_payload {
+	u32 size;
+	u16 type;
+	u16 version;
+	u64 ctxt_id;
+	u64 seqno;
+	u64 hash;
+	u64 flags;
+	u64 client_data;
+	u32 error;
+	u32 timestamp_lo;
+	u32 timestamp_hi;
+	u32 reserve;
+};
+
+/**
+ * struct msm_hw_fence_event - hardware fence ctl debug event
+ * time: qtime when the event is logged
+ * cpu: cpu id where the event is logged
+ * data_cnt: count of valid data available in the data field
+ * data: debug data logged by the event
+ */
+struct msm_hw_fence_event {
+	u64 time;
+	u32 cpu;
+	u32 data_cnt;
+	u32 data[HW_FENCE_EVENT_MAX_DATA];
+};
+
+/**
+ * struct msm_hw_fence - structure holding each hw fence data.
+ * @valid: field updated when a hw-fence is reserved. True if hw-fence is in use
+ * @error: field to hold a hw-fence error
+ * @ctx_id: context id
+ * @seq_id: sequence id
+ * @wait_client_mask: bitmask holding the waiting-clients of the fence
+ * @fence_allocator: field to indicate the client_id that reserved the fence
+ * @fence_signal-client:
+ * @lock: this field is required to share information between the Driver & Driver ||
+ *        Driver & FenceCTL. Needs to be 64-bit atomic inter-processor lock.
+ * @flags: field to indicate the state of the fence
+ * @parent_list: list of indexes with the parents for a child-fence in a join-fence
+ * @parent_cnt: total number of parents for a child-fence in a join-fence
+ * @pending_child_cnt: children refcount for a parent-fence in a join-fence. Access must be atomic
+ *        or locked
+ * @fence_create_time: debug info with the create time timestamp
+ * @fence_trigger_time: debug info with the trigger time timestamp
+ * @fence_wait_time: debug info with the register-for-wait timestamp
+ * @debug_refcount: refcount used for debugging
+ * @client_data: array of data optionally passed from and returned to clients waiting on the fence
+ *               during fence signaling
+ */
+struct msm_hw_fence {
+	u32 valid;
+	u32 error;
+	u64 ctx_id;
+	u64 seq_id;
+	u64 wait_client_mask;
+	u32 fence_allocator;
+	u32 fence_signal_client;
+	u64 lock;	/* Datatype must be 64-bit. */
+	u64 flags;
+	u64 parent_list[MSM_HW_FENCE_MAX_JOIN_PARENTS];
+	u32 parents_cnt;
+	u32 pending_child_cnt;
+	u64 fence_create_time;
+	u64 fence_trigger_time;
+	u64 fence_wait_time;
+	u64 debug_refcount;
+	u64 client_data[HW_FENCE_MAX_CLIENTS_WITH_DATA];
+};
+
+int hw_fence_init(struct hw_fence_driver_data *drv_data);
+int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client,
+	struct msm_hw_fence_mem_addr *mem_descriptor);
+int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client);
+int hw_fence_init_controller_resources(struct msm_hw_fence_client *hw_fence_client);
+void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data,
+	 struct msm_hw_fence_client *hw_fence_client);
+void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client);
+int hw_fence_create(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client,
+	u64 context, u64 seqno, u64 *hash);
+int hw_fence_destroy(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client,
+	u64 context, u64 seqno);
+int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client, u64 hash);
+int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client,
+	struct dma_fence_array *array, u64 *hash_join_fence, u64 client_data);
+int hw_fence_process_fence(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 *hash,
+	u64 client_data);
+int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash,
+	u64 flags, u64 client_data, u32 error, int queue_type);
+int hw_fence_update_existing_txq_payload(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client, u64 hash, u32 error);
+inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data);
+int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client,
+	struct msm_hw_fence_queue_payload *payload, int queue_type);
+int hw_fence_read_queue_helper(struct msm_hw_fence_queue *queue,
+	struct msm_hw_fence_queue_payload *payload);
+int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data,
+	struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context,
+	u64 seqno, u64 *hash, u64 client_data);
+struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client,
+	u64 context, u64 seqno, u64 *hash);
+enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id client_id);
+
+#endif /* __HW_FENCE_DRV_INTERNAL_H */

+ 167 - 0
qcom/opensource/mm-drivers/hw_fence/include/hw_fence_drv_utils.h

@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __HW_FENCE_DRV_UTILS_H
+#define __HW_FENCE_DRV_UTILS_H
+
+/**
+ * HW_FENCE_MAX_CLIENT_TYPE_STATIC:
+ * Total number of client types without configurable number of sub-clients (GFX, DPU, VAL)
+ */
+#define HW_FENCE_MAX_CLIENT_TYPE_STATIC 3
+
+/**
+ * HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE:
+ * Maximum number of client types with configurable number of sub-clients (e.g. IPE, VPU, IFE)
+ */
+#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 10
+
+/**
+ * HW_FENCE_MAX_STATIC_CLIENTS_INDEX:
+ * Maximum number of static clients, i.e. clients without configurable numbers of sub-clients
+ */
+#define HW_FENCE_MAX_STATIC_CLIENTS_INDEX HW_FENCE_CLIENT_ID_IPE
+
+/**
+ * enum hw_fence_mem_reserve - Types of reservations for the carved-out memory.
+ * HW_FENCE_MEM_RESERVE_CTRL_QUEUE: Reserve memory for the ctrl rx/tx queues.
+ * HW_FENCE_MEM_RESERVE_LOCKS_REGION: Reserve memory for the per-client locks memory region.
+ * HW_FENCE_MEM_RESERVE_TABLE: Reserve memory for the hw-fences global table.
+ * HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: Reserve memory per-client for the rx/tx queues.
+ * HW_FENCE_MEM_RESERVE_EVENTS_BUFF: Reserve memory for the debug events
+ */
+enum hw_fence_mem_reserve {
+	HW_FENCE_MEM_RESERVE_CTRL_QUEUE,
+	HW_FENCE_MEM_RESERVE_LOCKS_REGION,
+	HW_FENCE_MEM_RESERVE_TABLE,
+	HW_FENCE_MEM_RESERVE_CLIENT_QUEUE,
+	HW_FENCE_MEM_RESERVE_EVENTS_BUFF
+};
+
+/**
+ * global_atomic_store() - Inter-processor lock
+ * @drv_data: hw fence driver data
+ * @lock: memory to lock
+ * @val: if true, api locks the memory, if false it unlocks the memory
+ */
+void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, bool val);
+
+/**
+ * hw_fence_utils_init_virq() - Initialilze doorbell (i.e. vIRQ) for SVM to HLOS signaling
+ * @drv_data: hw fence driver data
+ *
+ * Returns zero if success, otherwise returns negative error code.
+ */
+int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data);
+
+/**
+ * hw_fence_utils_process_doorbell_mask() - Sends doorbell mask to process the signaled clients
+ *                                          this API is only exported for simulation purposes.
+ * @drv_data: hw fence driver data.
+ * @db_flags: doorbell flag
+ */
+void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags);
+
+/**
+ * hw_fence_utils_alloc_mem() - Allocates the carved-out memory pool that will be used for the HW
+ *                              Fence global table, locks and queues.
+ * @hw_fence_drv_data: hw fence driver data
+ *
+ * Returns zero if success, otherwise returns negative error code.
+ */
+int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *hw_fence_drv_data);
+
+/**
+ * hw_fence_utils_reserve_mem() - Reserves memory from the carved-out memory pool.
+ * @drv_data: hw fence driver data.
+ * @type: memory reservation type.
+ * @phys: physical address of the carved-out memory pool
+ *
+ * Returns zero if success, otherwise returns negative error code.
+ */
+int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data,
+	enum hw_fence_mem_reserve type, phys_addr_t *phys, void **pa, u32 *size, int client_id);
+
+/**
+ * hw_fence_utils_parse_dt_props() -  Init dt properties
+ * @drv_data: hw fence driver data
+ *
+ * Returns zero if success, otherwise returns negative error code.
+ */
+int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data);
+
+/**
+ * hw_fence_utils_map_ipcc() -  Maps IPCC registers and enable signaling
+ * @drv_data: hw fence driver data
+ *
+ * Returns zero if success, otherwise returns negative error code.
+ */
+int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data);
+
+/**
+ * hw_fence_utils_map_qtime() -  Maps qtime register
+ * @drv_data: hw fence driver data
+ *
+ * Returns zero if success, otherwise returns negative error code.
+ */
+int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data);
+
+/**
+ * hw_fence_utils_cleanup_fence() -  Cleanup the hw-fence from a specified client
+ * @drv_data: hw fence driver data
+ * @hw_fence_client: client, for which the fence must be cleared
+ * @hw_fence: hw-fence to cleanup
+ * @hash: hash of the hw-fence to cleanup
+ * @reset_flags: flags to determine how to handle the reset
+ *
+ * Returns zero if success, otherwise returns negative error code.
+ */
+int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash,
+	u32 reset_flags);
+
+/**
+ * hw_fence_utils_fence_error_cb() - Invokes fence error callback registered by specified client
+ *
+ * @hw_fence_client: client, for which fence error callback must be invoked
+ * @ctxt_id: context id of the hw-fence
+ * @seqno: sequence number of the hw-fence
+ * @hash: hash of the hw-fence
+ * @flags: flags of the hw-fence
+ * @error: error of the hw-fence
+ *
+ * Returns zero if success, otherwise returns negative error code
+ */
+int hw_fence_utils_fence_error_cb(struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id,
+	u64 seqno, u64 hash, u64 flags, u32 error);
+
+/**
+ * hw_fence_utils_get_client_id_priv() - Gets the index into clients struct within hw fence driver
+ *                                       from the client_id used externally
+ *
+ * Performs a 1-to-1 mapping for all client IDs less than HW_FENCE_MAX_STATIC_CLIENTS_INDEX,
+ * otherwise consolidates client IDs of clients with configurable number of sub-clients. Fails if
+ * provided with client IDs for such clients when support for those clients is not configured in
+ * device-tree.
+ *
+ * @drv_data: hw fence driver data
+ * @client_id: external client_id to get internal client_id for
+ *
+ * Returns client_id < drv_data->clients_num if success, otherwise returns HW_FENCE_CLIENT_MAX
+ */
+enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data,
+	enum hw_fence_client_id client_id);
+
+/**
+ * hw_fence_utils_get_queues_num() - Returns number of client queues for the client_id.
+ *
+ * @drv_data: driver data
+ * @client_id: hw fence driver client id
+ *
+ * Returns: number of client queues
+ */
+int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id);
+
+#endif /* __HW_FENCE_DRV_UTILS_H */

+ 222 - 0
qcom/opensource/mm-drivers/hw_fence/include/msm_hw_fence_synx_translation.h

@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __MSM_HW_FENCE_SYNX_TRANSLATION_H
+#define __MSM_HW_FENCE_SYNX_TRANSLATION_H
+
+#include <synx_api.h>
+
+extern bool hw_fence_driver_enable;
+
+#ifndef SYNX_HW_FENCE_CLIENT_START
+#define SYNX_HW_FENCE_CLIENT_START 1024
+#define SYNX_HW_FENCE_CLIENT_END 4096
+#define SYNX_MAX_SIGNAL_PER_CLIENT 64
+
+/**
+ * enum synx_client_id : Unique identifier of the supported clients
+ *
+ * @SYNX_CLIENT_HW_FENCE_GFX_CTX0 : HW Fence GFX Client 0
+ * @SYNX_CLIENT_HW_FENCE_IPE_CTX0 : HW Fence IPE Client 0
+ * @SYNX_CLIENT_HW_FENCE_VID_CTX0 : HW Fence Video Client 0
+ * @SYNX_CLIENT_HW_FENCE_DPU0_CTL0 : HW Fence DPU0 Client 0
+ * @SYNX_CLIENT_HW_FENCE_DPU1_CTL0 : HW Fence DPU1 Client 0
+ * @SYNX_CLIENT_HW_FENCE_IFE0_CTX0 : HW Fence IFE0 Client 0
+ * @SYNX_CLIENT_HW_FENCE_IFE1_CTX0 : HW Fence IFE1 Client 0
+ * @SYNX_CLIENT_HW_FENCE_IFE2_CTX0 : HW Fence IFE2 Client 0
+ * @SYNX_CLIENT_HW_FENCE_IFE3_CTX0 : HW Fence IFE3 Client 0
+ * @SYNX_CLIENT_HW_FENCE_IFE4_CTX0 : HW Fence IFE4 Client 0
+ * @SYNX_CLIENT_HW_FENCE_IFE5_CTX0 : HW Fence IFE5 Client 0
+ * @SYNX_CLIENT_HW_FENCE_IFE6_CTX0 : HW Fence IFE6 Client 0
+ * @SYNX_CLIENT_HW_FENCE_IFE7_CTX0 : HW Fence IFE7 Client 0
+ * @SYNX_CLIENT_HW_FENCE_IFE8_CTX0 : HW Fence IFE8 Client 0
+ * @SYNX_CLIENT_HW_FENCE_IFE9_CTX0 : HW Fence IFE9 Client 0
+ * @SYNX_CLIENT_HW_FENCE_IFE10_CTX0 : HW Fence IFE10 Client 0
+ * @SYNX_CLIENT_HW_FENCE_IFE11_CTX0 : HW Fence IFE11 Client 0
+ * @SYNX_CLIENT_HW_FENCE_IFE12_CTX0 : HW Fence IFE12 Client 0
+ * @SYNX_CLIENT_HW_FENCE_IFE13_CTX0 : HW Fence IFE13 Client 0
+ * @SYNX_CLIENT_HW_FENCE_IFE14_CTX0 : HW Fence IFE14 Client 0
+ * @SYNX_CLIENT_HW_FENCE_IFE15_CTX0 : HW Fence IFE15 Client 0
+ */
+enum synx_hwfence_client_id {
+	SYNX_CLIENT_HW_FENCE_GFX_CTX0 = SYNX_HW_FENCE_CLIENT_START,
+	SYNX_CLIENT_HW_FENCE_IPE_CTX0 = SYNX_CLIENT_HW_FENCE_GFX_CTX0 + SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_VID_CTX0 = SYNX_CLIENT_HW_FENCE_IPE_CTX0 + SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_DPU0_CTL0 = SYNX_CLIENT_HW_FENCE_VID_CTX0 + SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_DPU1_CTL0 = SYNX_CLIENT_HW_FENCE_DPU0_CTL0 +
+		SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_IFE0_CTX0 = SYNX_CLIENT_HW_FENCE_DPU1_CTL0 +
+		SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_IFE1_CTX0 = SYNX_CLIENT_HW_FENCE_IFE0_CTX0 +
+		SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_IFE2_CTX0 = SYNX_CLIENT_HW_FENCE_IFE1_CTX0 +
+		SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_IFE3_CTX0 = SYNX_CLIENT_HW_FENCE_IFE2_CTX0 +
+		SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_IFE4_CTX0 = SYNX_CLIENT_HW_FENCE_IFE3_CTX0 +
+		SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_IFE5_CTX0 = SYNX_CLIENT_HW_FENCE_IFE4_CTX0 +
+		SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_IFE6_CTX0 = SYNX_CLIENT_HW_FENCE_IFE5_CTX0 +
+		SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_IFE7_CTX0 = SYNX_CLIENT_HW_FENCE_IFE6_CTX0 +
+		SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_IFE8_CTX0 = SYNX_CLIENT_HW_FENCE_IFE7_CTX0 +
+		SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_IFE9_CTX0 = SYNX_CLIENT_HW_FENCE_IFE8_CTX0 +
+		SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_IFE10_CTX0 = SYNX_CLIENT_HW_FENCE_IFE9_CTX0 +
+		SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_IFE11_CTX0 = SYNX_CLIENT_HW_FENCE_IFE10_CTX0 +
+		SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_IFE12_CTX0 = SYNX_CLIENT_HW_FENCE_IFE11_CTX0 +
+		SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_IFE13_CTX0 = SYNX_CLIENT_HW_FENCE_IFE12_CTX0 +
+		SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_IFE14_CTX0 = SYNX_CLIENT_HW_FENCE_IFE13_CTX0 +
+		SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_IFE15_CTX0 = SYNX_CLIENT_HW_FENCE_IFE14_CTX0 +
+		SYNX_MAX_SIGNAL_PER_CLIENT,
+	SYNX_CLIENT_HW_FENCE_MAX = SYNX_HW_FENCE_CLIENT_END,
+};
+#endif
+
+#if IS_ENABLED(CONFIG_QTI_HW_FENCE)
+/**
+ * synx_hwfence_initialize - Initializes a new client session
+ *
+ * @param params : Pointer to session init params
+ *
+ * @return Client session pointer on success. NULL or error in case of failure.
+ */
+struct synx_session *synx_hwfence_initialize(struct synx_initialization_params *params);
+
+/**
+ * synx_hwfence_uninitialize - Destroys the client session
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ *
+ * @return Status of operation. SYNX_SUCCESS in case of success.
+ */
+int synx_hwfence_uninitialize(struct synx_session *session);
+
+/**
+ * synx_hwfence_create - Creates a synx object
+ *
+ *  Creates a new synx obj and returns the handle to client.
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param params  : Pointer to create params
+ *
+ * @return Status of operation. SYNX_SUCCESS in case of success.
+ * -SYNX_INVALID will be returned if params were invalid.
+ * -SYNX_NOMEM will be returned if the kernel can't allocate space for
+ * synx object.
+ */
+int synx_hwfence_create(struct synx_session *session, struct synx_create_params *params);
+
+/**
+ * synx_hwfence_release - Release the synx object
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param h_synx  : Synx object handle to be destroyed
+ *
+ * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
+ */
+int synx_hwfence_release(struct synx_session *session, u32 h_synx);
+
+/**
+ * synx_hwfence_signal - Signals a synx object with the status argument.
+ *
+ * This function will signal the synx object referenced by h_synx
+ * and invoke any external binding synx objs.
+ * The status parameter will indicate whether the entity
+ * performing the signaling wants to convey an error case or a success case.
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param h_synx  : Synx object handle
+ * @param status  : Status of signaling.
+ *                  Clients can send custom signaling status
+ *                  beyond SYNX_STATE_SIGNALED_MAX.
+ *
+ * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
+ */
+int synx_hwfence_signal(struct synx_session *session, u32 h_synx, enum synx_signal_status status);
+
+/**
+ * synx_hwfence_recover - Recover any possible handle leaks
+ *
+ * Function should be called on HW hang/reset to
+ * recover the Synx handles shared. This cleans up
+ * Synx handles held by the rest HW, and avoids
+ * potential resource leaks.
+ *
+ * Function does not destroy the session, but only
+ * recover synx handles belonging to the session.
+ * Synx session would still be active and clients
+ * need to destroy the session explicitly through
+ * synx_uninitialize API.
+ *
+ * @param id : Client ID of core to recover
+ *
+ * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
+ */
+int synx_hwfence_recover(enum synx_client_id id);
+
+/**
+ * synx_hwfence_import - Imports (looks up) synx object from given handle/fence
+ *
+ * Import subscribes the client session for notification on signal
+ * of handles/fences.
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param params  : Pointer to import params
+ *
+ * @return SYNX_SUCCESS upon success, -SYNX_INVAL if synx object is bad state
+ */
+int synx_hwfence_import(struct synx_session *session, struct synx_import_params *params);
+
+#else /* CONFIG_QTI_HW_FENCE */
+static inline struct synx_session *synx_hwfence_initialize(
+	struct synx_initialization_params *params)
+{
+	return ERR_PTR(-SYNX_INVALID);
+}
+
+static inline int synx_hwfence_uninitialize(struct synx_session *session)
+{
+	return -SYNX_INVALID;
+}
+
+static inline int synx_hwfence_create(struct synx_session *session,
+	struct synx_create_params *params)
+{
+	return -SYNX_INVALID;
+}
+
+static inline int synx_hwfence_release(struct synx_session *session, u32 h_synx)
+{
+	return -SYNX_INVALID;
+}
+
+static inline int synx_hwfence_signal(struct synx_session *session, u32 h_synx,
+	enum synx_signal_status status)
+{
+	return -SYNX_INVALID;
+}
+
+static inline int synx_hwfence_recover(enum synx_client_id id)
+{
+	return -SYNX_INVALID;
+}
+
+static inline int synx_hwfence_import(struct synx_session *session,
+	struct synx_import_params *params)
+{
+	return -SYNX_INVALID;
+}
+
+#endif /* CONFIG_QTI_HW_FENCE */
+#endif /* __MSM_HW_FENCE_SYNX_TRANSLATION_H */

+ 1259 - 0
qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_debug.c

@@ -0,0 +1,1259 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+
+#include "hw_fence_drv_priv.h"
+#include "hw_fence_drv_debug.h"
+#include "hw_fence_drv_ipc.h"
+#include "hw_fence_drv_utils.h"
+
+#define HW_FENCE_DEBUG_MAX_LOOPS 200
+
+#define HFENCE_TBL_MSG \
+	"[%d]hfence[%d] v:%d err:%lu ctx:%llu seq:%llu wait:0x%llx alloc:%d f:0x%llx child_cnt:%d" \
+	"%s ct:%llu tt:%llu wt:%llu\n"
+
+/* each hwfence parent includes one "32-bit" element + "," separator */
+#define HW_FENCE_MAX_PARENTS_SUBLIST_DUMP (MSM_HW_FENCE_MAX_JOIN_PARENTS * 9)
+#define HW_FENCE_MAX_PARENTS_DUMP (sizeof("parent_list[] ") + HW_FENCE_MAX_PARENTS_SUBLIST_DUMP)
+
+/* event dump data includes one "32-bit" element + "|" separator */
+#define HW_FENCE_MAX_DATA_PER_EVENT_DUMP (HW_FENCE_EVENT_MAX_DATA * 9)
+
+#define HFENCE_EVT_MSG "[%d][cpu:%d][%lu] data[%d]:%s\n"
+
+u32 msm_hw_fence_debug_level = HW_FENCE_PRINTK;
+
+/**
+ * struct client_data - Structure holding the data of the debug clients.
+ *
+ * @client_id: client id.
+ * @dma_context: context id to create the dma-fences for the client.
+ * @seqno_cnt: sequence number, this is a counter to simulate the seqno for debugging.
+ * @client_handle: handle for the client, this is returned by the hw-fence driver after
+ *                 a successful registration of the client.
+ * @mem_descriptor: memory descriptor for the client-queues. This is populated by the hw-fence
+ *                 driver after a successful registration of the client.
+ * @list: client node.
+ */
+struct client_data {
+	int client_id;
+	u64 dma_context;
+	u64 seqno_cnt;
+	void *client_handle;
+	struct msm_hw_fence_mem_addr mem_descriptor;
+	struct list_head list;
+};
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static int _get_debugfs_input_client(struct file *file,
+	const char __user *user_buf, size_t count, loff_t *ppos,
+	struct hw_fence_driver_data **drv_data)
+{
+	char buf[10];
+	int client_id;
+
+	if (!file || !file->private_data) {
+		HWFNC_ERR("unexpected data %d\n", !file);
+		return -EINVAL;
+	}
+	*drv_data = file->private_data;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0; /* end of string */
+
+	if (kstrtouint(buf, 0, &client_id))
+		return -EFAULT;
+
+	if (client_id < HW_FENCE_CLIENT_ID_CTX0 || client_id >= HW_FENCE_CLIENT_MAX) {
+		HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", client_id,
+			HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_MAX);
+		return -EINVAL;
+	}
+
+	return client_id;
+}
+
+static int _debugfs_ipcc_trigger(struct file *file, const char __user *user_buf,
+	size_t count, loff_t *ppos, u32 tx_client, u32 rx_client)
+{
+	struct hw_fence_driver_data *drv_data;
+	int client_id, signal_id;
+
+	client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data);
+	if (client_id < 0)
+		return -EINVAL;
+
+	/* Get signal-id that hw-fence driver would trigger for this client */
+	signal_id = hw_fence_ipcc_get_signal_id(drv_data, client_id);
+	if (signal_id < 0)
+		return -EINVAL;
+
+	HWFNC_DBG_IRQ("client_id:%d ipcc write tx_client:%d rx_client:%d signal_id:%d qtime:%llu\n",
+		client_id, tx_client, rx_client, signal_id, hw_fence_get_qtime(drv_data));
+	hw_fence_ipcc_trigger_signal(drv_data, tx_client, rx_client, signal_id);
+
+	return count;
+}
+
+/**
+ * hw_fence_dbg_ipcc_write() - debugfs write to trigger an ipcc irq.
+ * @file: file handler.
+ * @user_buf: user buffer content from debugfs.
+ * @count: size of the user buffer.
+ * @ppos: position offset of the user buffer.
+ *
+ * This debugfs receives as parameter a hw-fence driver client_id, and triggers an ipcc signal
+ * from apps to apps for that client id.
+ */
+static ssize_t hw_fence_dbg_ipcc_write(struct file *file, const char __user *user_buf,
+	size_t count, loff_t *ppos)
+{
+	struct hw_fence_driver_data *drv_data = file->private_data;
+
+	return _debugfs_ipcc_trigger(file, user_buf, count, ppos, drv_data->ipcc_client_pid,
+		drv_data->ipcc_client_vid);
+}
+
+/**
+ * hw_fence_dbg_ipcc_dpu_write() - debugfs write to trigger an ipcc irq to dpu core.
+ * @file: file handler.
+ * @user_buf: user buffer content from debugfs.
+ * @count: size of the user buffer.
+ * @ppos: position offset of the user buffer.
+ *
+ * This debugfs receives as parameter a hw-fence driver client_id, and triggers an ipcc signal
+ * from apps to dpu for that client id.
+ */
+static ssize_t hw_fence_dbg_ipcc_dpu_write(struct file *file, const char __user *user_buf,
+	size_t count, loff_t *ppos)
+{
+	struct hw_fence_driver_data *drv_data = file->private_data;
+
+	return _debugfs_ipcc_trigger(file, user_buf, count, ppos, drv_data->ipcc_client_pid,
+		hw_fence_ipcc_get_client_virt_id(drv_data, HW_FENCE_CLIENT_ID_CTL0));
+
+}
+
+static const struct file_operations hw_fence_dbg_ipcc_dpu_fops = {
+	.open = simple_open,
+	.write = hw_fence_dbg_ipcc_dpu_write,
+};
+
+static const struct file_operations hw_fence_dbg_ipcc_fops = {
+	.open = simple_open,
+	.write = hw_fence_dbg_ipcc_write,
+};
+
+struct client_data *_get_client_node(struct hw_fence_driver_data *drv_data, u32 client_id)
+{
+	struct client_data *node = NULL;
+	bool found = false;
+
+	mutex_lock(&drv_data->debugfs_data.clients_list_lock);
+	list_for_each_entry(node, &drv_data->debugfs_data.clients_list, list) {
+		if (node->client_id == client_id) {
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&drv_data->debugfs_data.clients_list_lock);
+
+	return found ? node : NULL;
+}
+
+/**
+ * hw_fence_dbg_reset_client_wr() - debugfs write to trigger reset in a debug hw-fence client.
+ * @file: file handler.
+ * @user_buf: user buffer content from debugfs.
+ * @count: size of the user buffer.
+ * @ppos: position offset of the user buffer.
+ *
+ * This debugfs receives as parameter a hw-fence driver client_id, and triggers a reset for
+ * this client. Note that this operation will only perform on hw-fence clients created through
+ * the debug framework.
+ */
+static ssize_t hw_fence_dbg_reset_client_wr(struct file *file,
+	const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	int client_id, ret;
+	struct client_data *client_info;
+	struct hw_fence_driver_data *drv_data;
+
+	client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data);
+	if (client_id < 0)
+		return -EINVAL;
+
+	client_info = _get_client_node(drv_data, client_id);
+	if (!client_info || IS_ERR_OR_NULL(client_info->client_handle)) {
+		HWFNC_ERR("client:%d not registered as debug client\n", client_id);
+		return -EINVAL;
+	}
+
+	HWFNC_DBG_H("resetting client: %d\n", client_id);
+	ret = msm_hw_fence_reset_client(client_info->client_handle, 0);
+	if (ret)
+		HWFNC_ERR("failed to reset client:%d\n", client_id);
+
+	return count;
+}
+
+/**
+ * hw_fence_dbg_register_clients_wr() - debugfs write to register a client with the hw-fence
+ *                                      driver for debugging.
+ * @file: file handler.
+ * @user_buf: user buffer content from debugfs.
+ * @count: size of the user buffer.
+ * @ppos: position offset of the user buffer.
+ *
+ * This debugfs receives as parameter a hw-fence driver client_id to register for debug.
+ * Note that if the client_id received was already registered by any other driver, the
+ * registration here will fail.
+ */
+static ssize_t hw_fence_dbg_register_clients_wr(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	int client_id;
+	struct client_data *client_info;
+	struct hw_fence_driver_data *drv_data;
+
+	client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data);
+	if (client_id < 0)
+		return -EINVAL;
+
+	/* we cannot create same debug client twice */
+	if (_get_client_node(drv_data, client_id)) {
+		HWFNC_ERR("client:%d already registered as debug client\n", client_id);
+		return -EINVAL;
+	}
+
+	client_info = kzalloc(sizeof(*client_info), GFP_KERNEL);
+	if (!client_info)
+		return -ENOMEM;
+
+	HWFNC_DBG_H("register client %d\n", client_id);
+	client_info->client_handle = msm_hw_fence_register(client_id,
+		&client_info->mem_descriptor);
+	if (IS_ERR_OR_NULL(client_info->client_handle)) {
+		HWFNC_ERR("error registering as debug client:%d\n", client_id);
+		client_info->client_handle = NULL;
+		return -EFAULT;
+	}
+
+	client_info->dma_context = dma_fence_context_alloc(1);
+	client_info->client_id = client_id;
+
+	mutex_lock(&drv_data->debugfs_data.clients_list_lock);
+	list_add(&client_info->list, &drv_data->debugfs_data.clients_list);
+	mutex_unlock(&drv_data->debugfs_data.clients_list_lock);
+
+	return count;
+}
+
+/**
+ * hw_fence_dbg_tx_and_signal_clients_wr() - debugfs write to simulate the lifecycle of a hw-fence.
+ * @file: file handler.
+ * @user_buf: user buffer content from debugfs.
+ * @count: size of the user buffer.
+ * @ppos: position offset of the user buffer.
+ *
+ * This debugfs receives as parameter the number of iterations that the simulation will run,
+ * each iteration will: create, signal, register-for-signal and destroy a hw-fence.
+ * Note that this simulation relies in the user first registering the clients as debug-clients
+ * through the debugfs 'hw_fence_dbg_register_clients_wr'. If the clients are not previously
+ * registered as debug-clients, this simulation will fail and won't run.
+ */
+static ssize_t hw_fence_dbg_tx_and_signal_clients_wr(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	u32 input_data, client_id_src, client_id_dst, tx_client, rx_client;
+	struct client_data *client_info_src, *client_info_dst;
+	struct hw_fence_driver_data *drv_data;
+	struct msm_hw_fence_client *hw_fence_client, *hw_fence_client_dst;
+	u64 context, seqno, hash;
+	char buf[10];
+	int signal_id, ret;
+
+	if (!file || !file->private_data) {
+		HWFNC_ERR("unexpected data %d\n", file);
+		return -EINVAL;
+	}
+	drv_data = file->private_data;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0; /* end of string */
+
+	if (kstrtouint(buf, 0, &input_data))
+		return -EFAULT;
+
+	if (input_data <= 0) {
+		HWFNC_ERR("won't do anything, write value greather than 0 to start..\n");
+		return 0;
+	} else if (input_data > HW_FENCE_DEBUG_MAX_LOOPS) {
+		HWFNC_ERR("requested loops:%d exceed max:%d, setting max\n", input_data,
+			HW_FENCE_DEBUG_MAX_LOOPS);
+		input_data = HW_FENCE_DEBUG_MAX_LOOPS;
+	}
+
+	client_id_src = HW_FENCE_CLIENT_ID_CTL0;
+	client_id_dst = HW_FENCE_CLIENT_ID_CTL1;
+
+	client_info_src = _get_client_node(drv_data, client_id_src);
+	client_info_dst = _get_client_node(drv_data, client_id_dst);
+
+	if (!client_info_src || IS_ERR_OR_NULL(client_info_src->client_handle) ||
+			!client_info_dst || IS_ERR_OR_NULL(client_info_dst->client_handle)) {
+		/* Make sure we registered this client through debugfs */
+		HWFNC_ERR("client_id_src:%d or client_id_dst:%d not registered as debug client!\n",
+			client_id_src, client_id_dst);
+		return -EINVAL;
+	}
+
+	hw_fence_client = (struct msm_hw_fence_client *)client_info_src->client_handle;
+	hw_fence_client_dst = (struct msm_hw_fence_client *)client_info_dst->client_handle;
+
+	while (drv_data->debugfs_data.create_hw_fences && input_data > 0) {
+
+		/***********************************************************/
+		/***** SRC CLIENT - CREATE HW FENCE & TX QUEUE UPDATE ******/
+		/***********************************************************/
+
+		/* we will use the context and the seqno of the source client */
+		context = client_info_src->dma_context;
+		seqno = client_info_src->seqno_cnt;
+
+		/* linear increment of the seqno for the src client*/
+		client_info_src->seqno_cnt++;
+
+		/* Create hw fence for src client */
+		ret = hw_fence_create(drv_data, hw_fence_client, context, seqno, &hash);
+		if (ret) {
+			HWFNC_ERR("Error creating HW fence\n");
+			goto exit;
+		}
+
+		/* Write to Tx queue */
+		hw_fence_update_queue(drv_data, hw_fence_client, context, seqno, hash,
+			0, 0, 0, HW_FENCE_TX_QUEUE - 1); /* no flags and no error */
+
+		/**********************************************/
+		/***** DST CLIENT - REGISTER WAIT CLIENT ******/
+		/**********************************************/
+		/* use same context and seqno that src client used to create fence */
+		ret = hw_fence_register_wait_client(drv_data, NULL, hw_fence_client_dst, context,
+			seqno, &hash, 0);
+		if (ret) {
+			HWFNC_ERR("failed to register for wait\n");
+			return -EINVAL;
+		}
+
+		/*********************************************/
+		/***** SRC CLIENT - TRIGGER IPCC SIGNAL ******/
+		/*********************************************/
+
+		/* AFTER THIS IS WHEN SVM WILL GET CALLED AND WILL PROCESS SRC AND DST CLIENTS */
+
+		/* Trigger IPCC for SVM to read the queue */
+
+		/* Get signal-id that hw-fence driver would trigger for this client */
+		signal_id = dbg_out_clients_signal_map_no_dpu[client_id_src].ipc_signal_id;
+		if (signal_id < 0)
+			return -EINVAL;
+
+		/*  Write to ipcc to trigger the irq */
+		tx_client = drv_data->ipcc_client_pid;
+		rx_client = drv_data->ipcc_client_vid;
+		HWFNC_DBG_IRQ("client:%d tx_client:%d rx_client:%d signal:%d delay:%d in_data%d\n",
+			client_id_src, tx_client, rx_client, signal_id,
+			drv_data->debugfs_data.hw_fence_sim_release_delay, input_data);
+
+		hw_fence_ipcc_trigger_signal(drv_data, tx_client, rx_client, signal_id);
+
+		/********************************************/
+		/******** WAIT ******************************/
+		/********************************************/
+
+		/* wait between iterations */
+		usleep_range(drv_data->debugfs_data.hw_fence_sim_release_delay,
+			(drv_data->debugfs_data.hw_fence_sim_release_delay + 5));
+
+		/******************************************/
+		/***** SRC CLIENT - CLEANUP HW FENCE ******/
+		/******************************************/
+
+		/* cleanup hw fence for src client */
+		ret = hw_fence_destroy(drv_data, hw_fence_client, context, seqno);
+		if (ret) {
+			HWFNC_ERR("Error destroying HW fence\n");
+			goto exit;
+		}
+
+		input_data--;
+	} /* LOOP.. */
+
+exit:
+	return count;
+}
+
+/**
+ * hw_fence_dbg_create_wr() - debugfs write to simulate the creation of a hw-fence.
+ * @file: file handler.
+ * @user_buf: user buffer content from debugfs.
+ * @count: size of the user buffer.
+ * @ppos: position offset of the user buffer.
+ *
+ * This debugfs receives as parameter the client-id, for which the hw-fence will be created.
+ * Note that this simulation relies in the user first registering the client as a debug-client
+ * through the debugfs 'hw_fence_dbg_register_clients_wr'. If the client is not previously
+ * registered as debug-client, this simulation will fail and won't run.
+ */
+static ssize_t hw_fence_dbg_create_wr(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct msm_hw_fence_create_params params;
+	struct hw_fence_driver_data *drv_data;
+	struct client_data *client_info;
+	struct hw_dma_fence *dma_fence;
+	spinlock_t *fence_lock;
+	static u64 hw_fence_dbg_seqno = 1;
+	int client_id, ret;
+	u64 hash;
+
+	client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data);
+	if (client_id < 0)
+		return -EINVAL;
+
+	client_info = _get_client_node(drv_data, client_id);
+	if (!client_info || IS_ERR_OR_NULL(client_info->client_handle)) {
+		HWFNC_ERR("client:%d not registered as debug client\n", client_id);
+		return -EINVAL;
+	}
+
+	/* create debug dma_fence */
+	fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL);
+	if (!fence_lock)
+		return -ENOMEM;
+
+	dma_fence = kzalloc(sizeof(*dma_fence), GFP_KERNEL);
+	if (!dma_fence) {
+		kfree(fence_lock);
+		return -ENOMEM;
+	}
+
+	snprintf(dma_fence->name, HW_FENCE_NAME_SIZE, "hwfence:id:%d:ctx=%lu:seqno:%lu",
+		client_id, client_info->dma_context, hw_fence_dbg_seqno);
+
+	spin_lock_init(fence_lock);
+	dma_fence_init(&dma_fence->base, &hw_fence_dbg_ops, fence_lock,
+		client_info->dma_context, hw_fence_dbg_seqno);
+
+	HWFNC_DBG_H("creating hw_fence for client:%d ctx:%llu seqno:%llu\n", client_id,
+		client_info->dma_context, hw_fence_dbg_seqno);
+	params.fence = &dma_fence->base;
+	params.handle = &hash;
+	ret = msm_hw_fence_create(client_info->client_handle, &params);
+	if (ret) {
+		HWFNC_ERR("failed to create hw_fence for client:%d ctx:%llu seqno:%llu\n",
+			client_id, client_info->dma_context, hw_fence_dbg_seqno);
+		dma_fence_put(&dma_fence->base);
+		return -EINVAL;
+	}
+	hw_fence_dbg_seqno++;
+
+	/* keep handle in dma_fence, to destroy hw-fence during release */
+	dma_fence->client_handle = client_info->client_handle;
+
+	return count;
+}
+
+static void _dump_fence_helper(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence,
+	char *parents_dump, u64 hash, u32 count)
+{
+	char sublist[HW_FENCE_MAX_PARENTS_SUBLIST_DUMP];
+	u32 parents_cnt;
+	int i, len = 0;
+
+	if (!hw_fence || !parents_dump) {
+		HWFNC_ERR("invalid params hw_fence:0x%pK parents_dump:0x%pK\n", hw_fence,
+			parents_dump);
+		return;
+	}
+
+	memset(parents_dump, 0, sizeof(char) * HW_FENCE_MAX_PARENTS_DUMP);
+	if (hw_fence->parents_cnt) {
+		if (hw_fence->parents_cnt > MSM_HW_FENCE_MAX_JOIN_PARENTS) {
+			HWFNC_ERR("hfence[%d] has invalid parents_cnt:%d greater than max:%d\n",
+				hash, hw_fence->parents_cnt, MSM_HW_FENCE_MAX_JOIN_PARENTS);
+			parents_cnt = MSM_HW_FENCE_MAX_JOIN_PARENTS;
+		} else {
+			parents_cnt = hw_fence->parents_cnt;
+		}
+
+		memset(sublist, 0, sizeof(sublist));
+		for (i = 0; i < parents_cnt; i++)
+			len += scnprintf(sublist + len, HW_FENCE_MAX_PARENTS_SUBLIST_DUMP - len,
+				"%lu,", hw_fence->parent_list[i]);
+		scnprintf(parents_dump, HW_FENCE_MAX_PARENTS_DUMP, " p:[%s]", sublist);
+	}
+
+	HWFNC_DBG_DUMP(prio, HFENCE_TBL_MSG,
+		count, hash, hw_fence->valid, hw_fence->error, hw_fence->ctx_id, hw_fence->seq_id,
+		hw_fence->wait_client_mask, hw_fence->fence_allocator, hw_fence->flags,
+		hw_fence->pending_child_cnt, parents_dump, hw_fence->fence_create_time,
+		hw_fence->fence_trigger_time, hw_fence->fence_wait_time);
+}
+
+void hw_fence_debug_dump_fence(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, u64 hash,
+	u32 count)
+{
+	char parents_dump[HW_FENCE_MAX_PARENTS_DUMP];
+
+	return _dump_fence_helper(prio, hw_fence, parents_dump, hash, count);
+}
+
+static inline int _dump_fence(struct msm_hw_fence *hw_fence, char *buf, int len, int max_size,
+		u32 index, u32 cnt)
+{
+	int ret;
+	char parents_dump[HW_FENCE_MAX_PARENTS_DUMP];
+
+	_dump_fence_helper(HW_FENCE_INFO, hw_fence, parents_dump, index, cnt);
+
+	ret = scnprintf(buf + len, max_size - len, HFENCE_TBL_MSG,
+		cnt, index, hw_fence->valid, hw_fence->error, hw_fence->ctx_id, hw_fence->seq_id,
+		hw_fence->wait_client_mask, hw_fence->fence_allocator, hw_fence->flags,
+		hw_fence->pending_child_cnt, parents_dump, hw_fence->fence_create_time,
+		hw_fence->fence_trigger_time, hw_fence->fence_wait_time);
+
+	return ret;
+}
+
+void hw_fence_debug_dump_table(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data)
+{
+	u32 i, cnt = 0;
+	struct msm_hw_fence *hw_fence;
+
+	for (i = 0; i < drv_data->hw_fences_tbl_cnt; i++) {
+		hw_fence = &drv_data->hw_fences_tbl[i];
+		if (!hw_fence->valid)
+			continue;
+		hw_fence_debug_dump_fence(prio, hw_fence, i, cnt);
+		cnt++;
+	}
+}
+
+static int dump_single_entry(struct hw_fence_driver_data *drv_data, char *buf, u32 *index,
+	int max_size)
+{
+	struct msm_hw_fence *hw_fence;
+	u64 context, seqno, hash = 0;
+	int len = 0;
+
+	context = drv_data->debugfs_data.context_rd;
+	seqno = drv_data->debugfs_data.seqno_rd;
+
+	hw_fence = msm_hw_fence_find(drv_data, NULL, context, seqno, &hash);
+	if (!hw_fence) {
+		HWFNC_ERR("no valid hfence found for context:%lu seqno:%lu hash:%lu",
+				context, seqno, hash);
+		len = scnprintf(buf + len, max_size - len,
+			"no valid hfence found for context:%lu seqno:%lu hash:%lu\n",
+			context, seqno, hash);
+
+		goto exit;
+	}
+
+	len = _dump_fence(hw_fence, buf, len, max_size, hash, 0);
+
+exit:
+	/* move idx to end of table to stop the dump */
+	*index = drv_data->hw_fences_tbl_cnt;
+
+	return len;
+}
+
+static int dump_full_table(struct hw_fence_driver_data *drv_data, char *buf, u32 *index,
+	u32 *cnt, int max_size, int entry_size)
+{
+	struct msm_hw_fence *hw_fence;
+	int len = 0;
+
+	while (((*index)++ < drv_data->hw_fences_tbl_cnt) && (len < (max_size - entry_size))) {
+		hw_fence = &drv_data->hw_fences_tbl[*index];
+
+		if (!hw_fence->valid)
+			continue;
+
+		len += _dump_fence(hw_fence, buf, len, max_size, *index, *cnt);
+		(*cnt)++;
+	}
+
+	return len;
+}
+
+static void _find_earliest_event(struct hw_fence_driver_data *drv_data, u32 *start_index,
+	u64 *start_time)
+{
+	u32 i;
+
+	if (!start_index || !start_time) {
+		HWFNC_ERR("invalid params start_index:0x%pK start_time:0x%pK\n", start_index,
+			start_time);
+		return;
+	}
+
+	mb(); /* make sure data is ready before read */
+	for (i = 0; i < drv_data->total_events; i++) {
+		u64 time = drv_data->events[i].time;
+
+		if (time && (!*start_time || time < *start_time)) {
+			*start_time = time;
+			*start_index = i;
+		}
+	}
+}
+
+static void _dump_event(enum hw_fence_drv_prio prio, struct msm_hw_fence_event *event,
+	char *data, u32 index)
+{
+	u32 data_cnt;
+	int i, len = 0;
+
+	if (!event || !data) {
+		HWFNC_ERR("invalid params event:0x%pK data:0x%pK\n", event, data);
+		return;
+	}
+
+	memset(data, 0, sizeof(char) * HW_FENCE_MAX_DATA_PER_EVENT_DUMP);
+	if (event->data_cnt > HW_FENCE_EVENT_MAX_DATA) {
+		HWFNC_ERR("event[%d] has invalid data_cnt:%lu greater than max_data_cnt:%lu\n",
+			index, event->data_cnt, HW_FENCE_EVENT_MAX_DATA);
+		data_cnt = HW_FENCE_EVENT_MAX_DATA;
+	} else {
+		data_cnt = event->data_cnt;
+	}
+
+	for (i = 0; i < data_cnt; i++)
+		len += scnprintf(data + len, HW_FENCE_MAX_DATA_PER_EVENT_DUMP - len,
+			"%lx|", event->data[i]);
+
+	HWFNC_DBG_DUMP(prio, HFENCE_EVT_MSG, index, event->cpu, event->time, event->data_cnt, data);
+}
+
+void hw_fence_debug_dump_events(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data)
+{
+	char data[HW_FENCE_MAX_DATA_PER_EVENT_DUMP];
+	u32 start_index;
+	u64 start_time;
+	int i;
+
+	if (!drv_data->events) {
+		HWFNC_ERR("events not supported\n");
+		return;
+	}
+
+	_find_earliest_event(drv_data, &start_index, &start_time);
+	for (i = start_index; i < drv_data->total_events && drv_data->events[i].time; i++)
+		_dump_event(prio, &drv_data->events[i], data, i);
+	for (i = 0; i < start_index; i++)
+		_dump_event(prio, &drv_data->events[i], data, i);
+}
+
+/**
+ * hw_fence_dbg_dump_events_rd() - debugfs read to dump the fctl events.
+ * @file: file handler.
+ * @user_buf: user buffer content for debugfs.
+ * @user_buf_size: size of the user buffer.
+ * @ppos: position offset of the user buffer.
+ */
+static ssize_t hw_fence_dbg_dump_events_rd(struct file *file, char __user *user_buf,
+	size_t user_buf_size, loff_t *ppos)
+{
+	struct hw_fence_driver_data *drv_data;
+	u32 entry_size = sizeof(struct msm_hw_fence_event), max_size = SZ_4K;
+	char *buf = NULL;
+	int len = 0;
+	static u64 start_time;
+	static int index, start_index;
+	static bool wraparound;
+
+	if (!file || !file->private_data) {
+		HWFNC_ERR("unexpected data %d\n", file);
+		return -EINVAL;
+	}
+	drv_data = file->private_data;
+
+	if (!drv_data->events) {
+		HWFNC_ERR("events not supported\n");
+		return -EINVAL;
+	}
+
+	if (wraparound && index >= start_index) {
+		HWFNC_DBG_H("no more data index:%d total_events:%d\n", index,
+			drv_data->total_events);
+		start_time = 0;
+		index = 0;
+		wraparound = false;
+		return 0;
+	}
+
+	if (user_buf_size < entry_size) {
+		HWFNC_ERR("Not enough buff size:%d to dump entries:%d\n", user_buf_size,
+			entry_size);
+		return -EINVAL;
+	}
+
+	buf = kzalloc(max_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* find index of earliest event */
+	if (!start_time) {
+		_find_earliest_event(drv_data, &start_index, &start_time);
+		index = start_index;
+		HWFNC_DBG_H("events:0x%pK start_index:%d start_time:%llu total_events:%d\n",
+			drv_data->events, start_index, start_time, drv_data->total_events);
+	}
+
+	HWFNC_DBG_H("++ dump_events index:%d qtime:%llu\n", index, hw_fence_get_qtime(drv_data));
+	while ((!wraparound || index < start_index) && len < (max_size - entry_size)) {
+		char data[HW_FENCE_MAX_DATA_PER_EVENT_DUMP];
+
+		if (drv_data->events[index].time) {
+			_dump_event(HW_FENCE_INFO, &drv_data->events[index], data, index);
+			len += scnprintf(buf + len, max_size - len, HFENCE_EVT_MSG, index,
+				drv_data->events[index].cpu, drv_data->events[index].time,
+				drv_data->events[index].data_cnt, data);
+		}
+
+		index++;
+		if (index >= drv_data->total_events) {
+			index = 0;
+			wraparound = true;
+		}
+	}
+	HWFNC_DBG_H("-- dump_events: index:%d qtime:%llu\n", index, hw_fence_get_qtime(drv_data));
+
+	if (len <= 0 || len > user_buf_size) {
+		HWFNC_ERR("len:%d invalid buff size:%d\n", len, user_buf_size);
+		len = 0;
+		goto exit;
+	}
+
+	if (copy_to_user(user_buf, buf, len)) {
+		HWFNC_ERR("failed to copy to user!\n");
+		len = -EFAULT;
+		goto exit;
+	}
+	*ppos += len;
+exit:
+	kfree(buf);
+	return len;
+}
+
+static void _dump_queue(enum hw_fence_drv_prio prio, struct msm_hw_fence_client *hw_fence_client,
+	int queue_type)
+{
+	struct msm_hw_fence_queue *queue;
+	struct msm_hw_fence_hfi_queue_header *hfi_header;
+	struct msm_hw_fence_queue_payload *payload;
+	u64 timestamp;
+	u32 *read_ptr, queue_entries;
+	int i;
+
+	queue = &hw_fence_client->queues[queue_type - 1];
+
+	if ((queue_type > hw_fence_client->queues_num) || !queue || !queue->va_header
+			|| !queue->va_queue) {
+		HWFNC_ERR("Cannot dump client:%d q_type:%s q_ptr:0x%pK q_header:0x%pK q_va:0x%pK\n",
+			hw_fence_client->client_id,
+			(queue_type == HW_FENCE_TX_QUEUE) ? "TX QUEUE" : "RX QUEUE",
+			queue, queue ? queue->va_header : NULL, queue ? queue->va_queue : NULL);
+		return;
+	}
+	hfi_header = (struct msm_hw_fence_hfi_queue_header *)queue->va_header;
+
+	mb(); /* make sure data is ready before read */
+	HWFNC_DBG_DUMP(prio, "%s va:0x%pK rd_idx:%lu wr_idx:%lu tx_wm:%lu q_size_bytes:%lu\n",
+		(queue_type == HW_FENCE_TX_QUEUE) ? "TX QUEUE" : "RX QUEUE", queue->va_queue,
+		hfi_header->read_index, hfi_header->write_index, hfi_header->tx_wm,
+		queue->q_size_bytes);
+	queue_entries = queue->q_size_bytes / HW_FENCE_CLIENT_QUEUE_PAYLOAD;
+
+	for (i = 0; i < queue_entries; i++) {
+		read_ptr = ((u32 *)queue->va_queue +
+			(i * (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32))));
+		payload = (struct msm_hw_fence_queue_payload *)read_ptr;
+		timestamp = (u64)payload->timestamp_lo | ((u64)payload->timestamp_hi << 32);
+
+		HWFNC_DBG_DUMP(prio,
+			"%s[%d]: hash:%d ctx:%llu seqno:%llu f:%llu d:%llu err:%u time:%llu\n",
+			(queue_type == HW_FENCE_TX_QUEUE) ? "tx" : "rx", i, payload->hash,
+			payload->ctxt_id, payload->seqno, payload->flags, payload->client_data,
+			payload->error, timestamp);
+	}
+}
+
+void hw_fence_debug_dump_queues(enum hw_fence_drv_prio prio,
+	struct msm_hw_fence_client *hw_fence_client)
+{
+	if (!hw_fence_client) {
+		HWFNC_ERR("Invalid params client:0x%pK\n", hw_fence_client);
+		return;
+	}
+
+	HWFNC_DBG_DUMP(prio, "Queues for client %d\n", hw_fence_client->client_id);
+	if (hw_fence_client->queues_num == HW_FENCE_CLIENT_QUEUES)
+		_dump_queue(prio, hw_fence_client, HW_FENCE_RX_QUEUE);
+	_dump_queue(prio, hw_fence_client, HW_FENCE_TX_QUEUE);
+}
+
+/**
+ * hw_fence_dbg_dump_queues_wr() - debugfs wr to dump the hw-fences queues.
+ * @file: file handler.
+ * @user_buf: user buffer content for debugfs.
+ * @count: size of the user buffer.
+ * @ppos: position offset of the user buffer.
+ *
+ * This debugfs dumps the hw-fence queues. Takes as input the desired client to dump.
+ * Dumps to debug msgs the contents of the TX and RX queues for that client, if they exist.
+ */
+static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user *user_buf,
+	size_t count, loff_t *ppos)
+{
+	struct hw_fence_driver_data *drv_data;
+	int client_id;
+
+	if (!file || !file->private_data) {
+		HWFNC_ERR("unexpected data %d\n", file);
+		return -EINVAL;
+	}
+	drv_data = file->private_data;
+
+	client_id = _get_debugfs_input_client(file, user_buf, count, ppos, &drv_data);
+	if (client_id < 0)
+		return -EINVAL;
+
+	if (!drv_data->clients[client_id]) {
+		HWFNC_ERR("client %d not initialized\n", client_id);
+		return -EINVAL;
+	}
+	hw_fence_debug_dump_queues(HW_FENCE_PRINTK, drv_data->clients[client_id]);
+
+	return count;
+}
+
+/**
+ * hw_fence_dbg_dump_table_rd() - debugfs read to dump the hw-fences table.
+ * @file: file handler.
+ * @user_buf: user buffer content for debugfs.
+ * @user_buf_size: size of the user buffer.
+ * @ppos: position offset of the user buffer.
+ *
+ * This debugfs dumps the hw-fence table. By default debugfs will dump all the valid entries of the
+ * whole table. However, if user only wants to dump only one particular entry, user can provide the
+ * context-id and seqno of the dma-fence of interest by writing to this debugfs node (see
+ * documentation for the write in 'hw_fence_dbg_dump_table_wr').
+ */
+static ssize_t hw_fence_dbg_dump_table_rd(struct file *file, char __user *user_buf,
+	size_t user_buf_size, loff_t *ppos)
+{
+	struct hw_fence_driver_data *drv_data;
+	int entry_size = sizeof(struct msm_hw_fence);
+	char *buf = NULL;
+	int len = 0, max_size = SZ_4K;
+	static u32 index, cnt;
+
+	if (!file || !file->private_data) {
+		HWFNC_ERR("unexpected data %d\n", file);
+		return -EINVAL;
+	}
+	drv_data = file->private_data;
+
+	if (!drv_data->hw_fences_tbl) {
+		HWFNC_ERR("Failed to dump table: Null fence table\n");
+		return -EINVAL;
+	}
+
+	if (index >= drv_data->hw_fences_tbl_cnt) {
+		HWFNC_DBG_H("no more data index:%d cnt:%d\n", index, drv_data->hw_fences_tbl_cnt);
+		index = cnt = 0;
+		return 0;
+	}
+
+	if (user_buf_size < entry_size) {
+		HWFNC_ERR("Not enough buff size:%d to dump entries:%d\n", user_buf_size,
+			entry_size);
+		return -EINVAL;
+	}
+
+	buf = kzalloc(max_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	len = drv_data->debugfs_data.entry_rd ?
+		dump_single_entry(drv_data, buf, &index, max_size) :
+		dump_full_table(drv_data, buf, &index, &cnt, max_size, entry_size);
+
+	if (len <= 0 || len > user_buf_size) {
+		HWFNC_ERR("len:%d invalid buff size:%d\n", len, user_buf_size);
+		len = 0;
+		goto exit;
+	}
+
+	if (copy_to_user(user_buf, buf, len)) {
+		HWFNC_ERR("failed to copy to user!\n");
+		len = -EFAULT;
+		goto exit;
+	}
+	*ppos += len;
+exit:
+	kfree(buf);
+	return len;
+}
+
+/**
+ * hw_fence_dbg_dump_table_wr() - debugfs write to control the dump of the hw-fences table.
+ * @file: file handler.
+ * @user_buf: user buffer content from debugfs.
+ * @user_buf_size: size of the user buffer.
+ * @ppos: position offset of the user buffer.
+ *
+ * This debugfs receives as parameters the settings to dump either the whole hw-fences table
+ * or only one element on the table in the next read of the same debugfs node.
+ * If this debugfs receives two input values, it will interpret them as the 'context-id' and the
+ * 'sequence-id' to dump from the hw-fence table in the subsequent reads of the debugfs.
+ * Otherwise, if the debugfs receives only one input value, the next read from the debugfs, will
+ * dump the whole hw-fences table.
+ */
+static ssize_t hw_fence_dbg_dump_table_wr(struct file *file,
+		const char __user *user_buf, size_t user_buf_size, loff_t *ppos)
+{
+	struct hw_fence_driver_data *drv_data;
+	u64 param_0, param_1;
+	char buf[24];
+	int num_input_params;
+
+	if (!file || !file->private_data) {
+		HWFNC_ERR("unexpected data %d\n", file);
+		return -EINVAL;
+	}
+	drv_data = file->private_data;
+
+	if (user_buf_size >= sizeof(buf)) {
+		HWFNC_ERR("wrong size:%d size:%d\n", user_buf_size, sizeof(buf));
+		return -EFAULT;
+	}
+
+	if (copy_from_user(buf, user_buf, user_buf_size))
+		return -EFAULT;
+
+	buf[user_buf_size] = 0; /* end of string */
+
+	/* read the input params */
+	num_input_params = sscanf(buf, "%lu %lu", &param_0, &param_1);
+
+	if (num_input_params == 2) { /* if debugfs receives two input params */
+		drv_data->debugfs_data.context_rd = param_0;
+		drv_data->debugfs_data.seqno_rd = param_1;
+		drv_data->debugfs_data.entry_rd = true;
+	} else if (num_input_params == 1) { /* if debugfs receives one param */
+		drv_data->debugfs_data.context_rd = 0;
+		drv_data->debugfs_data.seqno_rd = 0;
+		drv_data->debugfs_data.entry_rd = false;
+	} else {
+		HWFNC_ERR("invalid num params:%d\n", num_input_params);
+		return -EFAULT;
+	}
+
+	return user_buf_size;
+}
+
+
+
+/**
+ * hw_fence_dbg_create_join_fence() - debugfs write to simulate the lifecycle of a join hw-fence.
+ * @file: file handler.
+ * @user_buf: user buffer content from debugfs.
+ * @count: size of the user buffer.
+ * @ppos: position offset of the user buffer.
+ *
+ * This debugfs will: create, signal, register-for-signal and destroy a join hw-fence.
+ * Note that this simulation relies in the user first registering the clients as debug-clients
+ * through the debugfs 'hw_fence_dbg_register_clients_wr'. If the clients are not previously
+ * registered as debug-clients, this simulation will fail and won't run.
+ */
+static ssize_t hw_fence_dbg_create_join_fence(struct file *file,
+			const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct dma_fence_array *fence_array;
+	struct hw_fence_driver_data *drv_data;
+	struct dma_fence *fence_array_fence;
+	struct client_data *client_info_src, *client_info_dst;
+	u64 hw_fence_dbg_seqno = 1;
+	int client_id_src, client_id_dst;
+	struct msm_hw_fence_create_params params;
+	int i, ret = 0;
+	u64 hash;
+	struct msm_hw_fence_client *hw_fence_client;
+	int tx_client, rx_client, signal_id;
+
+	/* creates 3 fences and a parent fence */
+	int num_fences = 3;
+	struct dma_fence **fences = NULL;
+	spinlock_t **fences_lock = NULL;
+
+	if (!file || !file->private_data) {
+		HWFNC_ERR("unexpected data %d\n", file);
+		return -EINVAL;
+	}
+	drv_data = file->private_data;
+	client_id_src = HW_FENCE_CLIENT_ID_CTL0;
+	client_id_dst = HW_FENCE_CLIENT_ID_CTL1;
+	client_info_src = _get_client_node(drv_data, client_id_src);
+	client_info_dst = _get_client_node(drv_data, client_id_dst);
+	if (!client_info_src || IS_ERR_OR_NULL(client_info_src->client_handle) ||
+			!client_info_dst || IS_ERR_OR_NULL(client_info_dst->client_handle)) {
+		HWFNC_ERR("client_src:%d or client:%d is not register as debug client\n",
+			client_id_src, client_id_dst);
+		return -EINVAL;
+	}
+	hw_fence_client = (struct msm_hw_fence_client *)client_info_src->client_handle;
+
+	fences_lock = kcalloc(num_fences, sizeof(*fences_lock), GFP_KERNEL);
+	if (!fences_lock)
+		return -ENOMEM;
+
+	fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
+	if (!fences) {
+		kfree(fences_lock);
+		return -ENOMEM;
+	}
+
+	/* Create the array of dma fences */
+	for (i = 0; i < num_fences; i++) {
+		struct hw_dma_fence *dma_fence;
+
+		fences_lock[i] = kzalloc(sizeof(spinlock_t), GFP_KERNEL);
+		if (!fences_lock[i]) {
+			_cleanup_fences(i, fences, fences_lock);
+			return -ENOMEM;
+		}
+
+		dma_fence = kzalloc(sizeof(*dma_fence), GFP_KERNEL);
+		if (!dma_fence) {
+			_cleanup_fences(i, fences, fences_lock);
+			return -ENOMEM;
+		}
+		fences[i] = &dma_fence->base;
+
+		spin_lock_init(fences_lock[i]);
+		dma_fence_init(fences[i], &hw_fence_dbg_ops, fences_lock[i],
+			client_info_src->dma_context, hw_fence_dbg_seqno + i);
+	}
+
+	/* create the fence array from array of dma fences */
+	fence_array = dma_fence_array_create(num_fences, fences,
+				client_info_src->dma_context, hw_fence_dbg_seqno + num_fences, 0);
+	if (!fence_array) {
+		HWFNC_ERR("Error creating fence_array\n");
+		_cleanup_fences(num_fences - 1, fences, fences_lock);
+		return -EINVAL;
+	}
+
+	/* create hw fence and write to tx queue for each dma fence */
+	for (i = 0; i < num_fences; i++) {
+		params.fence = fences[i];
+		params.handle = &hash;
+
+		ret = msm_hw_fence_create(client_info_src->client_handle, &params);
+		if (ret) {
+			HWFNC_ERR("Error creating HW fence\n");
+			count = -EINVAL;
+			goto error;
+		}
+
+		/* Write to Tx queue */
+		hw_fence_update_queue(drv_data, hw_fence_client, client_info_src->dma_context,
+			hw_fence_dbg_seqno + i, hash, 0, 0, 0,
+			HW_FENCE_TX_QUEUE - 1);
+	}
+
+	/* wait on the fence array */
+	fence_array_fence = &fence_array->base;
+	msm_hw_fence_wait_update_v2(client_info_dst->client_handle, &fence_array_fence, NULL, NULL,
+		1, 1);
+
+	signal_id = dbg_out_clients_signal_map_no_dpu[client_id_src].ipc_signal_id;
+	if (signal_id < 0) {
+		count = -EINVAL;
+		goto error;
+	}
+
+	/* write to ipcc to trigger the irq */
+	tx_client = drv_data->ipcc_client_pid;
+	rx_client = drv_data->ipcc_client_vid;
+	hw_fence_ipcc_trigger_signal(drv_data, tx_client, rx_client, signal_id);
+
+	usleep_range(drv_data->debugfs_data.hw_fence_sim_release_delay,
+		(drv_data->debugfs_data.hw_fence_sim_release_delay + 5));
+
+error:
+	/* this frees the memory for the fence-array and each dma-fence */
+	dma_fence_put(&fence_array->base);
+
+	/*
+	 * free array of pointers, no need to call kfree in 'fences', since that is released
+	 * from the fence-array release api
+	 */
+	kfree(fences_lock);
+
+	return count;
+}
+
+int process_validation_client_loopback(struct hw_fence_driver_data *drv_data,
+		int client_id)
+{
+	struct msm_hw_fence_client *hw_fence_client;
+
+	if (client_id < HW_FENCE_CLIENT_ID_VAL0 || client_id > HW_FENCE_CLIENT_ID_VAL6) {
+		HWFNC_ERR("invalid client_id: %d min: %d max: %d\n", client_id,
+				HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6);
+		return -EINVAL;
+	}
+
+	mutex_lock(&drv_data->clients_register_lock);
+
+	if (!drv_data->clients[client_id]) {
+		mutex_unlock(&drv_data->clients_register_lock);
+		HWFNC_ERR("Processing workaround for unregistered val client:%d\n", client_id);
+		return -EINVAL;
+	}
+
+	hw_fence_client = drv_data->clients[client_id];
+
+	HWFNC_DBG_IRQ("Processing validation client workaround client_id:%d\n", client_id);
+
+	/* set the atomic flag, to signal the client wait */
+	atomic_set(&hw_fence_client->val_signal, 1);
+
+	/* wake-up waiting client */
+	wake_up_all(&hw_fence_client->wait_queue);
+
+	mutex_unlock(&drv_data->clients_register_lock);
+
+	return 0;
+}
+
+static const struct file_operations hw_fence_reset_client_fops = {
+	.open = simple_open,
+	.write = hw_fence_dbg_reset_client_wr,
+};
+
+static const struct file_operations hw_fence_register_clients_fops = {
+	.open = simple_open,
+	.write = hw_fence_dbg_register_clients_wr,
+};
+
+static const struct file_operations hw_fence_tx_and_signal_clients_fops = {
+	.open = simple_open,
+	.write = hw_fence_dbg_tx_and_signal_clients_wr,
+};
+
+static const struct file_operations hw_fence_create_fops = {
+	.open = simple_open,
+	.write = hw_fence_dbg_create_wr,
+};
+
+static const struct file_operations hw_fence_dump_table_fops = {
+	.open = simple_open,
+	.write = hw_fence_dbg_dump_table_wr,
+	.read = hw_fence_dbg_dump_table_rd,
+};
+
+static const struct file_operations hw_fence_dump_queues_fops = {
+	.open = simple_open,
+	.write = hw_fence_dbg_dump_queues_wr,
+};
+
+static const struct file_operations hw_fence_dump_events_fops = {
+	.open = simple_open,
+	.read = hw_fence_dbg_dump_events_rd,
+};
+
+static const struct file_operations hw_fence_create_join_fence_fops = {
+	.open = simple_open,
+	.write = hw_fence_dbg_create_join_fence,
+};
+
+int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data)
+{
+	struct dentry *debugfs_root;
+
+	debugfs_root = debugfs_create_dir("hw_fence", NULL);
+	if (IS_ERR_OR_NULL(debugfs_root)) {
+		HWFNC_ERR("debugfs_root create_dir fail, error %ld\n",
+			PTR_ERR(debugfs_root));
+		drv_data->debugfs_data.root = NULL;
+		return -EINVAL;
+	}
+
+	mutex_init(&drv_data->debugfs_data.clients_list_lock);
+	INIT_LIST_HEAD(&drv_data->debugfs_data.clients_list);
+	drv_data->debugfs_data.root = debugfs_root;
+	drv_data->debugfs_data.create_hw_fences = true;
+	drv_data->debugfs_data.hw_fence_sim_release_delay = 8333; /* uS */
+
+	debugfs_create_file("ipc_trigger", 0600, debugfs_root, drv_data,
+		&hw_fence_dbg_ipcc_fops);
+	debugfs_create_file("dpu_trigger", 0600, debugfs_root, drv_data,
+		&hw_fence_dbg_ipcc_dpu_fops);
+	debugfs_create_file("hw_fence_reset_client", 0600, debugfs_root, drv_data,
+		&hw_fence_reset_client_fops);
+	debugfs_create_file("hw_fence_register_clients", 0600, debugfs_root, drv_data,
+		&hw_fence_register_clients_fops);
+	debugfs_create_file("hw_fence_tx_and_signal", 0600, debugfs_root, drv_data,
+		&hw_fence_tx_and_signal_clients_fops);
+	debugfs_create_file("hw_fence_create_join_fence", 0600, debugfs_root, drv_data,
+		&hw_fence_create_join_fence_fops);
+	debugfs_create_bool("create_hw_fences", 0600, debugfs_root,
+		&drv_data->debugfs_data.create_hw_fences);
+	debugfs_create_u32("sleep_range_us", 0600, debugfs_root,
+		&drv_data->debugfs_data.hw_fence_sim_release_delay);
+	debugfs_create_file("hw_fence_create", 0600, debugfs_root, drv_data,
+		&hw_fence_create_fops);
+	debugfs_create_u32("hw_fence_debug_level", 0600, debugfs_root, &msm_hw_fence_debug_level);
+	debugfs_create_file("hw_fence_dump_table", 0600, debugfs_root, drv_data,
+		&hw_fence_dump_table_fops);
+	debugfs_create_file("hw_fence_dump_queues", 0600, debugfs_root, drv_data,
+		&hw_fence_dump_queues_fops);
+	debugfs_create_file("hw_sync", 0600, debugfs_root, NULL, &hw_sync_debugfs_fops);
+	debugfs_create_u64("hw_fence_lock_wake_cnt", 0600, debugfs_root,
+		&drv_data->debugfs_data.lock_wake_cnt);
+	debugfs_create_file("hw_fence_dump_events", 0600, debugfs_root, drv_data,
+		&hw_fence_dump_events_fops);
+
+	return 0;
+}
+
+#else
+int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data)
+{
+	return 0;
+}
+#endif /* CONFIG_DEBUG_FS */

+ 428 - 0
qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_ipc.c

@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/of_platform.h>
+#include "hw_fence_drv_priv.h"
+#include "hw_fence_drv_utils.h"
+#include "hw_fence_drv_ipc.h"
+#include "hw_fence_drv_debug.h"
+
+/*
+ * Max size of base table with ipc mappings, with one mapping per client type with configurable
+ * number of subclients
+ */
+#define HW_FENCE_IPC_MAP_MAX (HW_FENCE_MAX_STATIC_CLIENTS_INDEX + \
+	HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE)
+
+/**
+ * struct hw_fence_client_ipc_map - map client id with ipc signal for trigger.
+ * @ipc_client_id_virt: virtual ipc client id for the hw-fence client.
+ * @ipc_client_id_phys: physical ipc client id for the hw-fence client.
+ * @ipc_signal_id: ipc signal id for the hw-fence client.
+ * @update_rxq: bool to indicate if clinet uses rx-queue.
+ * @send_ipc: bool to indicate if client requires ipc interrupt for signaled fences
+ */
+struct hw_fence_client_ipc_map {
+	int ipc_client_id_virt;
+	int ipc_client_id_phys;
+	int ipc_signal_id;
+	bool update_rxq;
+	bool send_ipc;
+};
+
+/**
+ * struct hw_fence_clients_ipc_map - Table makes the 'client to signal' mapping, which is
+ *		used by the hw fence driver to trigger ipc signal when hw fence is already
+ *		signaled.
+ *		This version is for targets that support dpu client id.
+ *
+ * Note that the index of this struct must match the enum hw_fence_client_id
+ */
+struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_IPC_MAP_MAX] = {
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true},/*ctrl q*/
+	{HW_FENCE_IPC_CLIENT_ID_GPU_VID,  HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, false, false},/*ctx0 */
+	{HW_FENCE_IPC_CLIENT_ID_DPU_VID,  HW_FENCE_IPC_CLIENT_ID_DPU_VID, 0, false, true},/* ctl0 */
+	{HW_FENCE_IPC_CLIENT_ID_DPU_VID,  HW_FENCE_IPC_CLIENT_ID_DPU_VID, 1, false, true},/* ctl1 */
+	{HW_FENCE_IPC_CLIENT_ID_DPU_VID,  HW_FENCE_IPC_CLIENT_ID_DPU_VID, 2, false, true},/* ctl2 */
+	{HW_FENCE_IPC_CLIENT_ID_DPU_VID,  HW_FENCE_IPC_CLIENT_ID_DPU_VID, 3, false, true},/* ctl3 */
+	{HW_FENCE_IPC_CLIENT_ID_DPU_VID,  HW_FENCE_IPC_CLIENT_ID_DPU_VID, 4, false, true},/* ctl4 */
+	{HW_FENCE_IPC_CLIENT_ID_DPU_VID,  HW_FENCE_IPC_CLIENT_ID_DPU_VID, 5, false, true},/* ctl5 */
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, false},/*val0*/
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, false},/*val1*/
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, false},/*val2*/
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, false},/*val3*/
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, false},/*val4*/
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, false},/*val5*/
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, false},/*val6*/
+#else
+	{0, 0, 0, false, false}, /* val0 */
+	{0, 0, 0, false, false}, /* val1 */
+	{0, 0, 0, false, false}, /* val2 */
+	{0, 0, 0, false, false}, /* val3 */
+	{0, 0, 0, false, false}, /* val4 */
+	{0, 0, 0, false, false}, /* val5 */
+	{0, 0, 0, false, false}, /* val6 */
+#endif /* CONFIG_DEBUG_FS */
+	{HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_VID, 0, true, true}, /* ipe */
+	{HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_VID, 0, true, true}, /* vpu */
+};
+
+/**
+ * struct hw_fence_clients_ipc_map_v2 - Table makes the 'client to signal' mapping, which is
+ *		used by the hw fence driver to trigger ipc signal when hw fence is already
+ *		signaled.
+ *		This version is for targets that support dpu client id and IPC v2.
+ *
+ * Note that the index of this struct must match the enum hw_fence_client_id for clients ids less
+ * than HW_FENCE_MAX_STATIC_CLIENTS_INDEX.
+ * For clients with configurable sub-clients, the index of this struct matches
+ * HW_FENCE_MAX_STATIC_CLIENTS_INDEX + (client type index - HW_FENCE_MAX_CLIENT_TYPE_STATIC).
+ */
+struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_IPC_MAP_MAX] = {
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 1, true, true},/*ctrlq */
+	{HW_FENCE_IPC_CLIENT_ID_GPU_VID,  HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, false, false},/* ctx0*/
+	{HW_FENCE_IPC_CLIENT_ID_DPU_VID,  HW_FENCE_IPC_CLIENT_ID_DPU_PID, 0, false, true},/* ctl0 */
+	{HW_FENCE_IPC_CLIENT_ID_DPU_VID,  HW_FENCE_IPC_CLIENT_ID_DPU_PID, 1, false, true},/* ctl1 */
+	{HW_FENCE_IPC_CLIENT_ID_DPU_VID,  HW_FENCE_IPC_CLIENT_ID_DPU_PID, 2, false, true},/* ctl2 */
+	{HW_FENCE_IPC_CLIENT_ID_DPU_VID,  HW_FENCE_IPC_CLIENT_ID_DPU_PID, 3, false, true},/* ctl3 */
+	{HW_FENCE_IPC_CLIENT_ID_DPU_VID,  HW_FENCE_IPC_CLIENT_ID_DPU_PID, 4, false, true},/* ctl4 */
+	{HW_FENCE_IPC_CLIENT_ID_DPU_VID,  HW_FENCE_IPC_CLIENT_ID_DPU_PID, 5, false, true},/* ctl5 */
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, false},/*val0*/
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, false},/*val1*/
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, false},/*val2*/
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, false},/*val3*/
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, false},/*val4*/
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, false},/*val5*/
+	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, false},/*val6*/
+#else
+	{0, 0, 0, false, false}, /* val0 */
+	{0, 0, 0, false, false}, /* val1 */
+	{0, 0, 0, false, false}, /* val2 */
+	{0, 0, 0, false, false}, /* val3 */
+	{0, 0, 0, false, false}, /* val4 */
+	{0, 0, 0, false, false}, /* val5 */
+	{0, 0, 0, false, false}, /* val6 */
+#endif /* CONFIG_DEBUG_FS */
+	{HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID, 0, true, true}, /* ipe */
+	{HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true}, /* vpu */
+	{HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, true},/* ife0*/
+	{HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, true},/* ife1*/
+	{HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID, 0, false, true},/* ife2*/
+	{HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID, 0, false, true},/* ife3*/
+	{HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID, 0, false, true},/* ife4*/
+	{HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID, 0, false, true},/* ife5*/
+	{HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID, 0, false, true},/* ife6*/
+	{HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, true},/* ife7*/
+};
+
+int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id)
+{
+	if (!drv_data || client_id >= drv_data->clients_num)
+		return -EINVAL;
+
+	return drv_data->ipc_clients_table[client_id].ipc_client_id_virt;
+}
+
+int hw_fence_ipcc_get_client_phys_id(struct hw_fence_driver_data *drv_data, u32 client_id)
+{
+	if (!drv_data || client_id >= drv_data->clients_num)
+		return -EINVAL;
+
+	return drv_data->ipc_clients_table[client_id].ipc_client_id_phys;
+}
+
+int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id)
+{
+	if (!drv_data || client_id >= drv_data->clients_num)
+		return -EINVAL;
+
+	return drv_data->ipc_clients_table[client_id].ipc_signal_id;
+}
+
+bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id)
+{
+	if (!drv_data || client_id >= drv_data->clients_num)
+		return false;
+
+	return drv_data->ipc_clients_table[client_id].update_rxq;
+}
+
+bool hw_fence_ipcc_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id)
+{
+	if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX)
+		return false;
+
+	return drv_data->ipc_clients_table[client_id].send_ipc;
+}
+
+/**
+ * _get_ipc_phys_client_name() - Returns ipc client name from its physical id, used for debugging.
+ */
+static inline char *_get_ipc_phys_client_name(u32 client_id)
+{
+	switch (client_id) {
+	case HW_FENCE_IPC_CLIENT_ID_APPS_PID:
+		return "APPS_PID";
+	case HW_FENCE_IPC_CLIENT_ID_GPU_PID:
+		return "GPU_PID";
+	case HW_FENCE_IPC_CLIENT_ID_DPU_PID:
+		return "DPU_PID";
+	case HW_FENCE_IPC_CLIENT_ID_IPE_PID:
+		return "IPE_PID";
+	case HW_FENCE_IPC_CLIENT_ID_VPU_PID:
+		return "VPU_PID";
+	case HW_FENCE_IPC_CLIENT_ID_IFE0_PID:
+		return "IFE0_PID";
+	case HW_FENCE_IPC_CLIENT_ID_IFE1_PID:
+		return "IFE1_PID";
+	case HW_FENCE_IPC_CLIENT_ID_IFE2_PID:
+		return "IFE2_PID";
+	case HW_FENCE_IPC_CLIENT_ID_IFE3_PID:
+		return "IFE3_PID";
+	case HW_FENCE_IPC_CLIENT_ID_IFE4_PID:
+		return "IFE4_PID";
+	case HW_FENCE_IPC_CLIENT_ID_IFE5_PID:
+		return "IFE5_PID";
+	case HW_FENCE_IPC_CLIENT_ID_IFE6_PID:
+		return "IFE6_PID";
+	case HW_FENCE_IPC_CLIENT_ID_IFE7_PID:
+		return "IFE7_PID";
+	}
+
+	return "UNKNOWN_PID";
+}
+
+/**
+ * _get_ipc_virt_client_name() - Returns ipc client name from its virtual id, used for debugging.
+ */
+static inline char *_get_ipc_virt_client_name(u32 client_id)
+{
+	switch (client_id) {
+	case HW_FENCE_IPC_CLIENT_ID_APPS_VID:
+		return "APPS_VID";
+	case HW_FENCE_IPC_CLIENT_ID_GPU_VID:
+		return "GPU_VID";
+	case HW_FENCE_IPC_CLIENT_ID_DPU_VID:
+		return "DPU_VID";
+	case HW_FENCE_IPC_CLIENT_ID_IPE_VID:
+		return "IPE_VID";
+	case HW_FENCE_IPC_CLIENT_ID_VPU_VID:
+		return "VPU_VID";
+	case HW_FENCE_IPC_CLIENT_ID_IFE0_VID:
+		return "IFE0_VID";
+	case HW_FENCE_IPC_CLIENT_ID_IFE1_VID:
+		return "IFE1_VID";
+	case HW_FENCE_IPC_CLIENT_ID_IFE2_VID:
+		return "IFE2_VID";
+	case HW_FENCE_IPC_CLIENT_ID_IFE3_VID:
+		return "IFE3_VID";
+	case HW_FENCE_IPC_CLIENT_ID_IFE4_VID:
+		return "IFE4_VID";
+	case HW_FENCE_IPC_CLIENT_ID_IFE5_VID:
+		return "IFE5_VID";
+	case HW_FENCE_IPC_CLIENT_ID_IFE6_VID:
+		return "IFE6_VID";
+	case HW_FENCE_IPC_CLIENT_ID_IFE7_VID:
+		return "IFE7_VID";
+	}
+
+	return "UNKNOWN_VID";
+}
+
+void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data,
+	u32 tx_client_pid, u32 rx_client_vid, u32 signal_id)
+{
+	void __iomem *ptr;
+	u32 val;
+
+	/* Send signal */
+	ptr = IPC_PROTOCOLp_CLIENTc_SEND(drv_data->ipcc_io_mem, drv_data->protocol_id,
+		tx_client_pid);
+	val = (rx_client_vid << 16) | signal_id;
+
+	HWFNC_DBG_IRQ("Sending ipcc from %s (%d) to %s (%d) signal_id:%d [wr:0x%x to off:0x%pK]\n",
+		_get_ipc_phys_client_name(tx_client_pid), tx_client_pid,
+		_get_ipc_virt_client_name(rx_client_vid), rx_client_vid,
+		signal_id, val, ptr);
+	HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr);
+	writel_relaxed(val, ptr);
+
+	/* Make sure value is written */
+	wmb();
+}
+
+static int _hw_fence_ipcc_init_map_with_configurable_clients(struct hw_fence_driver_data *drv_data,
+	struct hw_fence_client_ipc_map *base_table)
+{
+	int i, j, map_idx;
+	size_t size;
+
+	size = drv_data->clients_num * sizeof(struct hw_fence_client_ipc_map);
+	drv_data->ipc_clients_table = kzalloc(size, GFP_KERNEL);
+
+	if (!drv_data->ipc_clients_table)
+		return -ENOMEM;
+
+	/* copy mappings for static hw fence clients */
+	size = HW_FENCE_MAX_STATIC_CLIENTS_INDEX * sizeof(struct hw_fence_client_ipc_map);
+	memcpy(drv_data->ipc_clients_table, base_table, size);
+
+	/* initialize mappings for ipc clients with configurable number of hw fence clients */
+	map_idx = HW_FENCE_MAX_STATIC_CLIENTS_INDEX;
+	for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE; i++) {
+		int client_type = HW_FENCE_MAX_CLIENT_TYPE_STATIC + i;
+		int clients_num = drv_data->hw_fence_client_types[client_type].clients_num;
+
+		for (j = 0; j < clients_num; j++) {
+			/* this should never happen if drv_data->clients_num is correct */
+			if (map_idx >= drv_data->clients_num) {
+				HWFNC_ERR("%s clients_num:%lu exceeds drv_data->clients_num:%lu\n",
+					drv_data->hw_fence_client_types[client_type].name,
+					clients_num, drv_data->clients_num);
+				return -EINVAL;
+			}
+			drv_data->ipc_clients_table[map_idx] =
+				base_table[HW_FENCE_MAX_STATIC_CLIENTS_INDEX + i];
+			drv_data->ipc_clients_table[map_idx].ipc_signal_id = j;
+			map_idx++;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * _hw_fence_ipcc_hwrev_init() - Initializes internal driver struct with corresponding ipcc data,
+ *		according to the ipcc hw revision.
+ * @drv_data: driver data.
+ * @hwrev: ipcc hw revision.
+ */
+static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 hwrev)
+{
+	int ret = 0;
+
+	switch (hwrev) {
+	case HW_FENCE_IPCC_HW_REV_170:
+		drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
+		drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
+		drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KALAMA;
+		drv_data->ipc_clients_table = hw_fence_clients_ipc_map;
+		HWFNC_DBG_INIT("ipcc protocol_id: Kalama\n");
+		break;
+	case HW_FENCE_IPCC_HW_REV_203:
+		drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
+		drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID;
+		drv_data->protocol_id = HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE; /* Fence */
+		ret = _hw_fence_ipcc_init_map_with_configurable_clients(drv_data,
+			hw_fence_clients_ipc_map_v2);
+		HWFNC_DBG_INIT("ipcc protocol_id: Pineapple\n");
+		break;
+	default:
+		return -1;
+	}
+
+	return ret;
+}
+
+int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data)
+{
+	void __iomem *ptr;
+	u32 val;
+	int ret;
+
+	HWFNC_DBG_H("enable ipc +\n");
+
+	ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-ipc-ver", &val);
+	if (ret || !val) {
+		HWFNC_ERR("missing hw fences ipc-ver entry or invalid ret:%d val:%d\n", ret, val);
+		return -EINVAL;
+	}
+
+	if (_hw_fence_ipcc_hwrev_init(drv_data, val)) {
+		HWFNC_ERR("ipcc protocol id not supported\n");
+		return -EINVAL;
+	}
+
+	/* Enable compute l1 (protocol_id = 2) */
+	val = 0x00000000;
+	ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, drv_data->protocol_id,
+		drv_data->ipcc_client_pid);
+	HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr);
+	writel_relaxed(val, ptr);
+
+	/* Enable Client-Signal pairs from APPS(NS) (0x8) to APPS(NS) (0x8) */
+	val = 0x000080000;
+	ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem, drv_data->protocol_id,
+		drv_data->ipcc_client_pid);
+	HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr);
+	writel_relaxed(val, ptr);
+
+	HWFNC_DBG_H("enable ipc -\n");
+
+	return 0;
+}
+
+int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data)
+{
+	struct hw_fence_client_ipc_map *hw_fence_client;
+	bool protocol_enabled = false;
+	void __iomem *ptr;
+	u32 val;
+	int i;
+
+	HWFNC_DBG_H("enable dpu ipc +\n");
+
+	if (!drv_data || !drv_data->protocol_id || !drv_data->ipc_clients_table) {
+		HWFNC_ERR("invalid drv data\n");
+		return -1;
+	}
+
+	HWFNC_DBG_H("ipcc_io_mem:0x%lx\n", (u64)drv_data->ipcc_io_mem);
+
+	HWFNC_DBG_H("Initialize dpu signals\n");
+	/* Enable Client-Signal pairs from DPU (25) to APPS(NS) (8) */
+	for (i = 0; i < drv_data->clients_num; i++) {
+		hw_fence_client = &drv_data->ipc_clients_table[i];
+
+		/* skip any client that is not a dpu client */
+		if (hw_fence_client->ipc_client_id_virt != HW_FENCE_IPC_CLIENT_ID_DPU_VID)
+			continue;
+
+		if (!protocol_enabled) {
+			/*
+			 * First DPU client will enable the protocol for dpu, e.g. compute l1
+			 * (protocol_id = 2) or fencing protocol, depending on the target, for the
+			 * dpu client (vid = 25, pid = 9).
+			 * Sets bit(1) to clear when RECV_ID is read
+			 */
+			val = 0x00000001;
+			ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem,
+				drv_data->protocol_id, hw_fence_client->ipc_client_id_phys);
+			HWFNC_DBG_H("Write:0x%x to RegOffset:0x%lx\n", val, (u64)ptr);
+			writel_relaxed(val, ptr);
+
+			protocol_enabled = true;
+		}
+
+		/* Enable signals for dpu client */
+		HWFNC_DBG_H("dpu client:%d vid:%d pid:%d signal:%d\n", i,
+			hw_fence_client->ipc_client_id_virt, hw_fence_client->ipc_client_id_phys,
+			hw_fence_client->ipc_signal_id);
+
+		/* Enable input apps-signal for dpu */
+		val = (HW_FENCE_IPC_CLIENT_ID_APPS_VID << 16) |
+				(hw_fence_client->ipc_signal_id & 0xFFFF);
+		ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem,
+			drv_data->protocol_id, hw_fence_client->ipc_client_id_phys);
+		HWFNC_DBG_H("Write:0x%x to RegOffset:0x%lx\n", val, (u64)ptr);
+		writel_relaxed(val, ptr);
+	}
+
+	HWFNC_DBG_H("enable dpu ipc -\n");
+
+	return 0;
+}

+ 1766 - 0
qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_priv.c

@@ -0,0 +1,1766 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+
+#include "hw_fence_drv_priv.h"
+#include "hw_fence_drv_utils.h"
+#include "hw_fence_drv_ipc.h"
+#include "hw_fence_drv_debug.h"
+
+/* Global atomic lock */
+#define GLOBAL_ATOMIC_STORE(drv_data, lock, val) global_atomic_store(drv_data, lock, val)
+
+#define IS_HW_FENCE_TX_QUEUE(queue_type) ((queue_type) == HW_FENCE_TX_QUEUE - 1)
+
+#define REQUIRES_IDX_TRANSLATION(queue) \
+	((queue)->rd_wr_idx_factor && ((queue)->rd_wr_idx_start || (queue)->rd_wr_idx_factor > 1))
+
+#define IDX_TRANSLATE_CUSTOM_TO_DEFAULT(queue, idx) \
+	(((idx) - (queue)->rd_wr_idx_start) * (queue)->rd_wr_idx_factor)
+
+#define IDX_TRANSLATE_DEFAULT_TO_CUSTOM(queue, idx) \
+	(((idx) / (queue)->rd_wr_idx_factor) + (queue)->rd_wr_idx_start)
+
+inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data)
+{
+#ifdef HWFENCE_USE_SLEEP_TIMER
+	return readl_relaxed(drv_data->qtime_io_mem);
+#else /* USE QTIMER */
+	return arch_timer_read_counter();
+#endif /* HWFENCE_USE_SLEEP_TIMER */
+}
+
+static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data,
+	enum hw_fence_mem_reserve mem_reserve_id,
+	struct msm_hw_fence_mem_addr *mem_descriptor,
+	struct msm_hw_fence_queue *queues, int queues_num,
+	int client_id)
+{
+	struct msm_hw_fence_hfi_queue_table_header *hfi_table_header;
+	struct msm_hw_fence_hfi_queue_header *hfi_queue_header;
+	struct hw_fence_client_type_desc *desc;
+	void *ptr, *qptr;
+	phys_addr_t phys, qphys;
+	u32 size, start_queue_offset, txq_idx_start = 0, txq_idx_factor = 1;
+	int headers_size, queue_size, payload_size;
+	int start_padding = 0, end_padding = 0;
+	int i, ret = 0;
+	bool skip_txq_wr_idx = false;
+
+	HWFNC_DBG_INIT("mem_reserve_id:%d client_id:%d\n", mem_reserve_id, client_id);
+	switch (mem_reserve_id) {
+	case HW_FENCE_MEM_RESERVE_CTRL_QUEUE:
+		headers_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE;
+		queue_size = drv_data->hw_fence_ctrl_queue_size;
+		payload_size = HW_FENCE_CTRL_QUEUE_PAYLOAD;
+		break;
+	case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
+		if (client_id >= drv_data->clients_num ||
+				!drv_data->hw_fence_client_queue_size[client_id].type) {
+			HWFNC_ERR("Invalid client_id:%d for clients_num:%lu\n", client_id,
+				drv_data->clients_num);
+			return -EINVAL;
+		}
+
+		desc = drv_data->hw_fence_client_queue_size[client_id].type;
+		start_padding = desc->start_padding;
+		end_padding = desc->end_padding;
+		headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num) + start_padding +
+			end_padding;
+		queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries;
+		payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD;
+		txq_idx_start = desc->txq_idx_start;
+		txq_idx_factor = desc->txq_idx_factor ? desc->txq_idx_factor : 1;
+		skip_txq_wr_idx = desc->skip_txq_wr_idx;
+		break;
+	default:
+		HWFNC_ERR("Unexpected mem reserve id: %d\n", mem_reserve_id);
+		return -EINVAL;
+	}
+
+	/* Reserve Virtual and Physical memory for HFI headers */
+	ret = hw_fence_utils_reserve_mem(drv_data, mem_reserve_id, &phys, &ptr, &size, client_id);
+	if (ret) {
+		HWFNC_ERR("Failed to reserve id:%d client %d\n", mem_reserve_id, client_id);
+		return -ENOMEM;
+	}
+	HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size);
+
+	/* Populate Memory descriptor with address */
+	mem_descriptor->virtual_addr = ptr;
+	mem_descriptor->device_addr = phys;
+	mem_descriptor->size = size; /* bytes */
+	mem_descriptor->mem_data = NULL; /* Currently we don't need any special info */
+
+	HWFNC_DBG_INIT("Initialize headers: headers_size:%d start_padding:%d end_padding:%d\n",
+		headers_size, start_padding, end_padding);
+	/* Initialize headers info within hfi memory */
+	hfi_table_header = (struct msm_hw_fence_hfi_queue_table_header *)ptr;
+	hfi_table_header->version = 0;
+	hfi_table_header->size = size; /* bytes */
+	/* Offset, from the Base Address, where the first queue header starts */
+	hfi_table_header->qhdr0_offset = HW_FENCE_HFI_TABLE_HEADER_SIZE + start_padding;
+	hfi_table_header->qhdr_size = HW_FENCE_HFI_QUEUE_HEADER_SIZE;
+	hfi_table_header->num_q = queues_num; /* number of queues */
+	hfi_table_header->num_active_q = queues_num;
+
+	/* Initialize Queues Info within HFI memory */
+
+	/*
+	 * Calculate offset where hfi queue header starts, which it is at the
+	 * end of the hfi table header
+	 */
+	HWFNC_DBG_INIT("Initialize queues\n");
+	hfi_queue_header = (struct msm_hw_fence_hfi_queue_header *)
+					   ((char *)ptr + hfi_table_header->qhdr0_offset);
+	for (i = 0; i < queues_num; i++) {
+		HWFNC_DBG_INIT("init queue[%d]\n", i);
+
+		/* Calculate the offset where the Queue starts */
+		start_queue_offset = headers_size + (i * queue_size); /* Bytes */
+		qphys = phys + start_queue_offset; /* start of the PA for the queue elems */
+		qptr = (char *)ptr + start_queue_offset; /* start of the va for queue elems */
+
+		/* Set the physical start address in the HFI queue header */
+		hfi_queue_header->start_addr = qphys;
+
+		/* Set the queue type (i.e. RX or TX queue) */
+		hfi_queue_header->type = IS_HW_FENCE_TX_QUEUE(i) ? HW_FENCE_TX_QUEUE :
+			HW_FENCE_RX_QUEUE;
+
+		/* Set the size of this header */
+		hfi_queue_header->queue_size = queue_size;
+
+		/* Set the payload size */
+		hfi_queue_header->pkt_size = payload_size;
+
+		/* Set write index for clients' tx queues that index from nonzero value */
+		if (txq_idx_start && IS_HW_FENCE_TX_QUEUE(i) && !hfi_queue_header->write_index) {
+			if (skip_txq_wr_idx)
+				hfi_queue_header->tx_wm = txq_idx_start;
+			hfi_queue_header->read_index = txq_idx_start;
+			hfi_queue_header->write_index = txq_idx_start;
+			HWFNC_DBG_INIT("init:TX_QUEUE client:%d rd_idx=%s=%lu\n", client_id,
+				skip_txq_wr_idx ? "wr_idx=tx_wm" : "wr_idx",
+				txq_idx_start);
+		}
+
+		/* Update memory for hfi_queue_header */
+		wmb();
+
+		/* Store Memory info in the Client data */
+		queues[i].va_queue = qptr;
+		queues[i].pa_queue = qphys;
+		queues[i].va_header = hfi_queue_header;
+		queues[i].q_size_bytes = queue_size;
+		HWFNC_DBG_INIT("init:%s client:%d q[%d] va=0x%pK pa=0x%x hd:0x%pK sz:%u pkt:%d\n",
+			hfi_queue_header->type == HW_FENCE_TX_QUEUE ? "TX_QUEUE" : "RX_QUEUE",
+			client_id, i, queues[i].va_queue, queues[i].pa_queue, queues[i].va_header,
+			queues[i].q_size_bytes, payload_size);
+
+		/* Store additional tx queue rd_wr_idx properties */
+		if (IS_HW_FENCE_TX_QUEUE(i)) {
+			queues[i].rd_wr_idx_start = txq_idx_start;
+			queues[i].rd_wr_idx_factor = txq_idx_factor;
+			queues[i].skip_wr_idx = skip_txq_wr_idx;
+		} else {
+			queues[i].rd_wr_idx_factor = 1;
+		}
+		HWFNC_DBG_INIT("rd_wr_idx_start:%lu rd_wr_idx_factor:%lu skip_wr_idx:%s\n",
+			queues[i].rd_wr_idx_start, queues[i].rd_wr_idx_factor,
+			queues[i].skip_wr_idx ? "true" : "false");
+
+		/* Next header */
+		hfi_queue_header++;
+	}
+
+	return ret;
+}
+
+static inline bool _lock_client_queue(int queue_type)
+{
+	/* Only lock Rx Queue */
+	return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? true : false;
+}
+
+char *_get_queue_type(int queue_type)
+{
+	return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? "RXQ" : "TXQ";
+}
+
+static void _translate_queue_indexes_custom_to_default(struct msm_hw_fence_queue *queue,
+	u32 *read_idx, u32 *write_idx)
+{
+	if (REQUIRES_IDX_TRANSLATION(queue)) {
+		*read_idx = IDX_TRANSLATE_CUSTOM_TO_DEFAULT(queue, *read_idx);
+		*write_idx = IDX_TRANSLATE_CUSTOM_TO_DEFAULT(queue, *write_idx);
+		HWFNC_DBG_Q("rd_idx_u32:%lu wr_idx_u32:%lu rd_wr_idx start:%lu factor:%lu\n",
+			*read_idx, *write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor);
+	}
+}
+
+int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client,
+		 struct msm_hw_fence_queue_payload *payload, int queue_type)
+{
+	struct msm_hw_fence_queue *queue;
+
+	if (queue_type >= HW_FENCE_CLIENT_QUEUES || !hw_fence_client || !payload) {
+		HWFNC_ERR("Invalid queue type:%s hw_fence_client:0x%pK payload:0x%pK\n", queue_type,
+			hw_fence_client, payload);
+		return -EINVAL;
+	}
+
+	queue = &hw_fence_client->queues[queue_type];
+	HWFNC_DBG_Q("read client:%lu queue:0x%pK\n", hw_fence_client->client_id, queue);
+
+	return hw_fence_read_queue_helper(queue, payload);
+}
+
+int hw_fence_read_queue_helper(struct msm_hw_fence_queue *queue,
+		 struct msm_hw_fence_queue_payload *payload)
+{
+	struct msm_hw_fence_hfi_queue_header *hfi_header;
+	u32 read_idx, write_idx, to_read_idx;
+	u32 *read_ptr;
+	u32 payload_size_u32, q_size_u32;
+	struct msm_hw_fence_queue_payload *read_ptr_payload;
+
+	hfi_header = queue->va_header;
+
+	q_size_u32 = (queue->q_size_bytes / sizeof(u32));
+	payload_size_u32 = (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32));
+	HWFNC_DBG_Q("sizeof payload:%d\n", sizeof(struct msm_hw_fence_queue_payload));
+
+	if (!hfi_header || !payload) {
+		HWFNC_ERR("Invalid queue\n");
+		return -EINVAL;
+	}
+
+	/* Make sure data is ready before read */
+	mb();
+
+	/* Get read and write index */
+	read_idx = readl_relaxed(&hfi_header->read_index);
+	write_idx = readl_relaxed(&hfi_header->write_index);
+
+	/* translate read and write indexes from custom indexing to dwords with no offset */
+	_translate_queue_indexes_custom_to_default(queue, &read_idx, &write_idx);
+
+	HWFNC_DBG_Q("read rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n",
+		&hfi_header->read_index, &hfi_header->write_index, read_idx, write_idx, queue);
+
+	if (read_idx == write_idx) {
+		HWFNC_DBG_Q("Nothing to read!\n");
+		return -EINVAL;
+	}
+
+	/* Move the pointer where we need to read and cast it */
+	read_ptr = ((u32 *)queue->va_queue + read_idx);
+	read_ptr_payload = (struct msm_hw_fence_queue_payload *)read_ptr;
+	HWFNC_DBG_Q("read_ptr:0x%pK queue: va=0x%pK pa=0x%pK read_ptr_payload:0x%pK\n", read_ptr,
+		queue->va_queue, queue->pa_queue, read_ptr_payload);
+
+	/* Calculate the index after the read */
+	to_read_idx = read_idx + payload_size_u32;
+
+	/*
+	 * wrap-around case, here we are reading the last element of the queue, therefore set
+	 * to_read_idx, which is the index after the read, to the beginning of the
+	 * queue
+	 */
+	if (to_read_idx >= q_size_u32)
+		to_read_idx = 0;
+
+	/* translate to_read_idx to custom indexing with offset */
+	if (REQUIRES_IDX_TRANSLATION(queue)) {
+		to_read_idx = IDX_TRANSLATE_DEFAULT_TO_CUSTOM(queue, to_read_idx);
+		HWFNC_DBG_Q("translated to_read_idx:%lu rd_wr_idx start:%lu factor:%lu\n",
+			to_read_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor);
+	}
+
+	/* Read the Client Queue */
+	*payload = *read_ptr_payload;
+
+	/* update the read index */
+	writel_relaxed(to_read_idx, &hfi_header->read_index);
+
+	/* update memory for the index */
+	wmb();
+
+	/* Return one if queue still has contents after read */
+	return to_read_idx == write_idx ? 0 : 1;
+}
+
+static int _get_update_queue_params(struct msm_hw_fence_queue *queue,
+	struct msm_hw_fence_hfi_queue_header **hfi_header, u32 *q_size_u32, u32 *payload_size,
+	u32 *payload_size_u32, u32 **wr_ptr)
+{
+	if (!queue) {
+		HWFNC_ERR("invalid queue\n");
+		return -EINVAL;
+	}
+
+	*hfi_header = queue->va_header;
+	if (!*hfi_header) {
+		HWFNC_ERR("Invalid queue hfi_header\n");
+		return -EINVAL;
+	}
+
+	*q_size_u32 = (queue->q_size_bytes / sizeof(u32));
+	*payload_size = sizeof(struct msm_hw_fence_queue_payload);
+	*payload_size_u32 = (*payload_size / sizeof(u32));
+
+	/* if skipping update wr_index, then use hfi_header->tx_wm instead */
+	if (queue->skip_wr_idx)
+		*wr_ptr = &((*hfi_header)->tx_wm);
+	else
+		*wr_ptr = &((*hfi_header)->write_index);
+
+	return 0;
+}
+
+/*
+ * This function writes to the queue of the client. The 'queue_type' determines
+ * if this function is writing to the rx or tx queue
+ */
+int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash,
+	u64 flags, u64 client_data, u32 error, int queue_type)
+{
+	struct msm_hw_fence_hfi_queue_header *hfi_header;
+	struct msm_hw_fence_queue *queue;
+	u32 read_idx;
+	u32 write_idx;
+	u32 to_write_idx;
+	u32 q_size_u32;
+	u32 q_free_u32;
+	u32 *q_payload_write_ptr;
+	u32 payload_size, payload_size_u32;
+	struct msm_hw_fence_queue_payload *write_ptr_payload;
+	bool lock_client = false;
+	u32 lock_idx;
+	u64 timestamp;
+	u32 *wr_ptr;
+	int ret = 0;
+
+	if (queue_type >= hw_fence_client->queues_num) {
+		HWFNC_ERR("Invalid queue type:%d client_id:%d q_num:%lu\n", queue_type,
+			hw_fence_client->client_id, hw_fence_client->queues_num);
+		return -EINVAL;
+	}
+
+	queue = &hw_fence_client->queues[queue_type];
+	if (_get_update_queue_params(queue, &hfi_header, &q_size_u32, &payload_size,
+			&payload_size_u32, &wr_ptr)) {
+		HWFNC_ERR("Invalid client:%d q_type:%d queue\n", hw_fence_client->client_id,
+			queue_type);
+		return -EINVAL;
+	}
+
+	/*
+	 * We need to lock the client if there is an Rx Queue update, since that
+	 * is the only time when HW Fence driver can have a race condition updating
+	 * the Rx Queue, which also could be getting updated by the Fence CTL
+	 */
+	lock_client = _lock_client_queue(queue_type);
+	if (lock_client) {
+		lock_idx = hw_fence_client->client_id - 1;
+
+		if (lock_idx >= drv_data->client_lock_tbl_cnt) {
+			HWFNC_ERR("lock for client id:%d exceed max:%d\n",
+				hw_fence_client->client_id, drv_data->client_lock_tbl_cnt);
+			return -EINVAL;
+		}
+		HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", hw_fence_client->client_id, lock_idx);
+
+		/* lock the client rx queue to update */
+		GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 1); /* lock */
+	}
+
+	/* Make sure data is ready before read */
+	mb();
+
+	/* Get read and write index */
+	read_idx = readl_relaxed(&hfi_header->read_index);
+	write_idx = readl_relaxed(wr_ptr);
+
+	HWFNC_DBG_Q("wr client:%d r_ptr:0x%pK w_ptr:0x%pK r_idx:%d w_idx:%d q:0x%pK type:%d s:%s\n",
+		hw_fence_client->client_id, &hfi_header->read_index, wr_ptr,
+		read_idx, write_idx, queue, queue_type, queue->skip_wr_idx ? "true" : "false");
+
+	/* translate read and write indexes from custom indexing to dwords with no offset */
+	_translate_queue_indexes_custom_to_default(queue, &read_idx, &write_idx);
+
+	/* Check queue to make sure message will fit */
+	q_free_u32 = read_idx <= write_idx ? (q_size_u32 - (write_idx - read_idx)) :
+		(read_idx - write_idx);
+	if (q_free_u32 <= payload_size_u32) {
+		HWFNC_ERR("cannot fit the message size:%d\n", payload_size_u32);
+		ret = -EINVAL;
+		goto exit;
+	}
+	HWFNC_DBG_Q("q_free_u32:%d payload_size_u32:%d\n", q_free_u32, payload_size_u32);
+
+	/* Move the pointer where we need to write and cast it */
+	q_payload_write_ptr = ((u32 *)queue->va_queue + write_idx);
+	write_ptr_payload = (struct msm_hw_fence_queue_payload *)q_payload_write_ptr;
+	HWFNC_DBG_Q("q_payload_write_ptr:0x%pK queue: va=0x%pK pa=0x%pK write_ptr_payload:0x%pK\n",
+		q_payload_write_ptr, queue->va_queue, queue->pa_queue, write_ptr_payload);
+
+	/* calculate the index after the write */
+	to_write_idx = write_idx + payload_size_u32;
+
+	HWFNC_DBG_Q("to_write_idx:%d write_idx:%d payload_size:%u\n", to_write_idx, write_idx,
+		payload_size_u32);
+	HWFNC_DBG_L("client_id:%d update %s hash:%llu ctx_id:%llu seqno:%llu flags:%llu error:%u\n",
+		hw_fence_client->client_id, _get_queue_type(queue_type),
+		hash, ctxt_id, seqno, flags, error);
+
+	/*
+	 * wrap-around case, here we are writing to the last element of the queue, therefore
+	 * set to_write_idx, which is the index after the write, to the beginning of the
+	 * queue
+	 */
+	if (to_write_idx >= q_size_u32)
+		to_write_idx = 0;
+
+	/* translate to_write_idx to custom indexing with offset */
+	if (REQUIRES_IDX_TRANSLATION(queue)) {
+		to_write_idx = IDX_TRANSLATE_DEFAULT_TO_CUSTOM(queue, to_write_idx);
+		HWFNC_DBG_Q("translated to_write_idx:%lu rd_wr_idx start:%lu factor:%lu\n",
+			to_write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor);
+	}
+
+	/* Update Client Queue */
+	writeq_relaxed(payload_size, &write_ptr_payload->size);
+	writew_relaxed(HW_FENCE_PAYLOAD_TYPE_1, &write_ptr_payload->type);
+	writew_relaxed(HW_FENCE_PAYLOAD_REV(1, 0), &write_ptr_payload->version);
+	writeq_relaxed(ctxt_id, &write_ptr_payload->ctxt_id);
+	writeq_relaxed(seqno, &write_ptr_payload->seqno);
+	writeq_relaxed(hash, &write_ptr_payload->hash);
+	writeq_relaxed(flags, &write_ptr_payload->flags);
+	writeq_relaxed(client_data, &write_ptr_payload->client_data);
+	writel_relaxed(error, &write_ptr_payload->error);
+	timestamp = hw_fence_get_qtime(drv_data);
+	writel_relaxed(timestamp, &write_ptr_payload->timestamp_lo);
+	writel_relaxed(timestamp >> 32, &write_ptr_payload->timestamp_hi);
+
+	/* update memory for the message */
+	wmb();
+
+	/* update the write index */
+	writel_relaxed(to_write_idx, wr_ptr);
+
+	/* update memory for the index */
+	wmb();
+
+exit:
+	if (lock_client)
+		GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0); /* unlock */
+
+	return ret;
+}
+
+int hw_fence_update_existing_txq_payload(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client, u64 hash, u32 error)
+{
+	u32 q_size_u32, payload_size, payload_size_u32, read_idx, write_idx, second_idx, *wr_ptr;
+	struct msm_hw_fence_queue_payload tmp, *first_payload, *second_payload;
+	struct msm_hw_fence_hfi_queue_header *hfi_header;
+	struct msm_hw_fence_queue *queue;
+	int ret = 0;
+
+	queue = &hw_fence_client->queues[HW_FENCE_TX_QUEUE - 1];
+	if (_get_update_queue_params(queue, &hfi_header, &q_size_u32, &payload_size,
+			&payload_size_u32, &wr_ptr)) {
+		HWFNC_ERR("Invalid client:%d tx queue\n", hw_fence_client->client_id);
+		return -EINVAL;
+	}
+
+	/* Make sure data is ready before read */
+	mb();
+
+	/* Get read and write index */
+	read_idx = hfi_header->read_index;
+	write_idx = *wr_ptr;
+
+	/* translate read and write indexes from custom indexing to dwords with no offset */
+	_translate_queue_indexes_custom_to_default(queue, &read_idx, &write_idx);
+
+	if (read_idx == write_idx) {
+		HWFNC_DBG_Q("Empty queue, no entry matches with hash:%llu\n", hash);
+		return -EINVAL;
+	}
+
+	first_payload = (struct msm_hw_fence_queue_payload *)((u32 *)queue->va_queue + read_idx);
+	HWFNC_DBG_Q("client:%d txq: va=0x%pK pa=0x%pK idx:%d ptr_payload:0x%pK\n",
+		hw_fence_client->client_id, queue->va_queue, queue->pa_queue, read_idx,
+		first_payload);
+
+	if (first_payload->hash == hash) {
+		/* Swap not needed, update first payload in client queue with fence error */
+		first_payload->error = error;
+	} else {
+		/* Check whether second entry matches hash */
+		second_idx = read_idx + payload_size_u32;
+
+		/* wrap-around case */
+		if (second_idx >= q_size_u32)
+			second_idx = 0;
+
+		if (second_idx == write_idx) {
+			HWFNC_ERR("Failed to find matching entry with hash:%llu\n", hash);
+			return -EINVAL;
+		}
+
+		second_payload = (struct msm_hw_fence_queue_payload *)
+			((u32 *)queue->va_queue + second_idx);
+		HWFNC_DBG_Q("client:%d txq: va=0x%pK pa=0x%pK idx:%d ptr_payload:0x%pK\n",
+			hw_fence_client->client_id, queue->va_queue, queue->pa_queue, second_idx,
+			second_payload);
+
+		if (second_payload->hash != hash) {
+			HWFNC_ERR("hash:%llu not found in first two queue payloads:%u, %u\n", hash,
+				read_idx, second_idx);
+			return -EINVAL;
+		}
+
+		/* swap first and second payload, updating error field in new first payload */
+		tmp = *first_payload;
+		*first_payload = *second_payload;
+		first_payload->error = error;
+		*second_payload = tmp;
+
+		HWFNC_DBG_L("client_id:%d txq move from idx:%u to idx:%u hash:%llu c:%llu s:%llu\n",
+			hw_fence_client->client_id, read_idx, second_idx, hash, tmp.ctxt_id,
+			tmp.seqno);
+	}
+
+	/* update memory for the messages */
+	wmb();
+
+	HWFNC_DBG_L("client_id:%d update tx queue index:%u hash:%llu error:%u\n",
+		hw_fence_client->client_id, read_idx, hash, error);
+
+	return ret;
+}
+
+static int init_global_locks(struct hw_fence_driver_data *drv_data)
+{
+	struct msm_hw_fence_mem_addr *mem_descriptor;
+	phys_addr_t phys;
+	void *ptr;
+	u32 size;
+	int ret;
+
+	ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_LOCKS_REGION, &phys, &ptr,
+		&size, 0);
+	if (ret) {
+		HWFNC_ERR("Failed to reserve clients locks mem %d\n", ret);
+		return -ENOMEM;
+	}
+	HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size);
+
+	/* Populate Memory descriptor with address */
+	mem_descriptor = &drv_data->clients_locks_mem_desc;
+	mem_descriptor->virtual_addr = ptr;
+	mem_descriptor->device_addr = phys;
+	mem_descriptor->size = size;
+	mem_descriptor->mem_data = NULL; /* not storing special info for now */
+
+	/* Initialize internal pointers for managing the tables */
+	drv_data->client_lock_tbl = (u64 *)drv_data->clients_locks_mem_desc.virtual_addr;
+	drv_data->client_lock_tbl_cnt = drv_data->clients_locks_mem_desc.size / sizeof(u64);
+
+	return 0;
+}
+
+static int init_hw_fences_table(struct hw_fence_driver_data *drv_data)
+{
+	struct msm_hw_fence_mem_addr *mem_descriptor;
+	phys_addr_t phys;
+	void *ptr;
+	u32 size;
+	int ret;
+
+	ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_TABLE, &phys, &ptr,
+		&size, 0);
+	if (ret) {
+		HWFNC_ERR("Failed to reserve table mem %d\n", ret);
+		return -ENOMEM;
+	}
+	HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size);
+
+	/* Populate Memory descriptor with address */
+	mem_descriptor = &drv_data->hw_fences_mem_desc;
+	mem_descriptor->virtual_addr = ptr;
+	mem_descriptor->device_addr = phys;
+	mem_descriptor->size = size;
+	mem_descriptor->mem_data = NULL; /* not storing special info for now */
+
+	/* Initialize internal pointers for managing the tables */
+	drv_data->hw_fences_tbl = (struct msm_hw_fence *)drv_data->hw_fences_mem_desc.virtual_addr;
+	drv_data->hw_fences_tbl_cnt = drv_data->hw_fences_mem_desc.size /
+		sizeof(struct msm_hw_fence);
+
+	HWFNC_DBG_INIT("hw_fences_table:0x%pK cnt:%u\n", drv_data->hw_fences_tbl,
+		drv_data->hw_fences_tbl_cnt);
+
+	return 0;
+}
+
+static int init_hw_fences_events(struct hw_fence_driver_data *drv_data)
+{
+	phys_addr_t phys;
+	void *ptr;
+	u32 size;
+	int ret;
+
+	ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_EVENTS_BUFF, &phys, &ptr,
+		&size, 0);
+	if (ret) {
+		HWFNC_DBG_INFO("Failed to reserve events buffer %d\n", ret);
+		return -ENOMEM;
+	}
+	drv_data->events = (struct msm_hw_fence_event *)ptr;
+	drv_data->total_events = size / sizeof(struct msm_hw_fence_event);
+	HWFNC_DBG_INIT("events:0x%pK total_events:%u event_sz:%u total_size:%u\n", drv_data->events,
+		drv_data->total_events, sizeof(struct msm_hw_fence_event), size);
+
+	return 0;
+}
+
+static int init_ctrl_queue(struct hw_fence_driver_data *drv_data)
+{
+	struct msm_hw_fence_mem_addr *mem_descriptor;
+	int ret;
+
+	mem_descriptor = &drv_data->ctrl_queue_mem_desc;
+
+	/* Init ctrl queue */
+	ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CTRL_QUEUE,
+		mem_descriptor, drv_data->ctrl_queues,
+		HW_FENCE_CTRL_QUEUES, 0);
+	if (ret)
+		HWFNC_ERR("Failure to init ctrl queue\n");
+
+	return ret;
+}
+
+int hw_fence_init(struct hw_fence_driver_data *drv_data)
+{
+	int ret;
+	__le32 *mem;
+
+	ret = hw_fence_utils_parse_dt_props(drv_data);
+	if (ret) {
+		HWFNC_ERR("failed to set dt properties\n");
+		goto exit;
+	}
+
+	/* Allocate hw fence driver mem pool and share it with HYP */
+	ret = hw_fence_utils_alloc_mem(drv_data);
+	if (ret) {
+		HWFNC_ERR("failed to alloc base memory\n");
+		goto exit;
+	}
+
+	/* Initialize ctrl queue */
+	ret = init_ctrl_queue(drv_data);
+	if (ret)
+		goto exit;
+
+	ret = init_global_locks(drv_data);
+	if (ret)
+		goto exit;
+	HWFNC_DBG_INIT("Locks allocated at 0x%pK total locks:%d\n", drv_data->client_lock_tbl,
+		drv_data->client_lock_tbl_cnt);
+
+	/* Initialize hw fences table */
+	ret = init_hw_fences_table(drv_data);
+	if (ret)
+		goto exit;
+
+	/* Initialize event log */
+	ret = init_hw_fences_events(drv_data);
+	if (ret)
+		HWFNC_DBG_INFO("Unable to init events\n");
+
+	/* Map ipcc registers */
+	ret = hw_fence_utils_map_ipcc(drv_data);
+	if (ret) {
+		HWFNC_ERR("ipcc regs mapping failed\n");
+		goto exit;
+	}
+
+	/* Map time register */
+	ret = hw_fence_utils_map_qtime(drv_data);
+	if (ret) {
+		HWFNC_ERR("qtime reg mapping failed\n");
+		goto exit;
+	}
+
+	/* Init debugfs */
+	ret = hw_fence_debug_debugfs_register(drv_data);
+	if (ret) {
+		HWFNC_ERR("debugfs init failed\n");
+		goto exit;
+	}
+
+	/* Init vIRQ from VM */
+	ret = hw_fence_utils_init_virq(drv_data);
+	if (ret) {
+		HWFNC_ERR("failed to init virq\n");
+		goto exit;
+	}
+
+	mem = drv_data->io_mem_base;
+	HWFNC_DBG_H("memory ptr:0x%pK val:0x%x\n", mem, *mem);
+
+	HWFNC_DBG_INIT("HW Fences Table Initialized: 0x%pK cnt:%d\n",
+		drv_data->hw_fences_tbl, drv_data->hw_fences_tbl_cnt);
+
+exit:
+	return ret;
+}
+
+int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client,
+	struct msm_hw_fence_mem_addr *mem_descriptor)
+{
+	int ret;
+
+	if (!drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].type) {
+		HWFNC_ERR("invalid client_id:%d not reserved client queue; check dt props\n",
+			hw_fence_client->client_id);
+		return -EINVAL;
+	}
+
+	/* Init client queues */
+	ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CLIENT_QUEUE,
+		&hw_fence_client->mem_descriptor, hw_fence_client->queues,
+		drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].type->queues_num,
+		hw_fence_client->client_id);
+	if (ret) {
+		HWFNC_ERR("Failure to init the queue for client:%d\n",
+			hw_fence_client->client_id);
+		goto exit;
+	}
+
+	/* Init client memory descriptor */
+	memcpy(mem_descriptor, &hw_fence_client->mem_descriptor,
+		sizeof(struct msm_hw_fence_mem_addr));
+
+exit:
+	return ret;
+}
+
+int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client)
+{
+	int ret = 0;
+
+	/*
+	 * Initialize IPCC Signals for this client
+	 *
+	 * NOTE: For each Client HW-Core, the client drivers might be the ones making
+	 * it's own initialization (in case that any hw-sequence must be enforced),
+	 * however, if that is not the case, any per-client ipcc init to enable the
+	 * signaling, can go here.
+	 */
+	switch ((int)hw_fence_client->client_id_ext) {
+	case HW_FENCE_CLIENT_ID_CTX0:
+		/* nothing to initialize for gpu client */
+		break;
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+	case HW_FENCE_CLIENT_ID_VAL0:
+	case HW_FENCE_CLIENT_ID_VAL1:
+	case HW_FENCE_CLIENT_ID_VAL2:
+	case HW_FENCE_CLIENT_ID_VAL3:
+	case HW_FENCE_CLIENT_ID_VAL4:
+	case HW_FENCE_CLIENT_ID_VAL5:
+	case HW_FENCE_CLIENT_ID_VAL6:
+		/* nothing to initialize for validation clients */
+		break;
+#endif /* CONFIG_DEBUG_FS */
+	case HW_FENCE_CLIENT_ID_CTL0:
+	case HW_FENCE_CLIENT_ID_CTL1:
+	case HW_FENCE_CLIENT_ID_CTL2:
+	case HW_FENCE_CLIENT_ID_CTL3:
+	case HW_FENCE_CLIENT_ID_CTL4:
+	case HW_FENCE_CLIENT_ID_CTL5:
+		/* initialize ipcc signals for dpu clients */
+		HWFNC_DBG_H("init_controller_signal: DPU client_id_ext:%d initialized:%d\n",
+			hw_fence_client->client_id_ext, drv_data->ipcc_dpu_initialized);
+		if (!drv_data->ipcc_dpu_initialized) {
+			drv_data->ipcc_dpu_initialized = true;
+
+			/* Init dpu client ipcc signal */
+			hw_fence_ipcc_enable_dpu_signaling(drv_data);
+		}
+		break;
+	case HW_FENCE_CLIENT_ID_IPE ... HW_FENCE_CLIENT_ID_IPE +
+			MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1:
+		/* nothing to initialize for IPE client */
+		break;
+	case HW_FENCE_CLIENT_ID_VPU ... HW_FENCE_CLIENT_ID_VPU +
+			MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1:
+		/* nothing to initialize for VPU client */
+		break;
+	case HW_FENCE_CLIENT_ID_IFE0 ... HW_FENCE_CLIENT_ID_IFE7 +
+			MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1:
+		/* nothing to initialize for IFE clients */
+		break;
+	default:
+		HWFNC_ERR("Unexpected client_id_ext:%d\n", hw_fence_client->client_id_ext);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+int hw_fence_init_controller_resources(struct msm_hw_fence_client *hw_fence_client)
+{
+
+	/*
+	 * Initialize Fence Controller resources for this Client,
+	 *  here we need to use the CTRL queue to communicate to the Fence
+	 *  Controller the shared memory for the Rx/Tx queue for this client
+	 *  as well as any information that Fence Controller might need to
+	 *  know for this client.
+	 *
+	 * NOTE: For now, we are doing a static allocation of the
+	 *  client's queues, so currently we don't need any notification
+	 *  to the Fence CTL here through the CTRL queue.
+	 *  Later-on we might need it, once the PVM to SVM (and vice versa)
+	 *  communication for initialization is supported.
+	 */
+
+	return 0;
+}
+
+void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client)
+{
+	/*
+	 * Deallocate any resource allocated for this client.
+	 *  If fence controller was notified about existence of this client,
+	 *  we will need to notify fence controller that this client is gone
+	 *
+	 * NOTE: Since currently we are doing a 'fixed' memory for the clients queues,
+	 *  we don't need any notification to the Fence Controller, yet..
+	 *  however, if the memory allocation is removed from 'fixed' to a dynamic
+	 *  allocation, then we will need to notify FenceCTL about the client that is
+	 *  going-away here.
+	 */
+	mutex_lock(&drv_data->clients_register_lock);
+	drv_data->clients[hw_fence_client->client_id] = NULL;
+	mutex_unlock(&drv_data->clients_register_lock);
+
+	/* Deallocate client's object */
+	HWFNC_DBG_LUT("freeing client_id:%d\n", hw_fence_client->client_id);
+	kfree(hw_fence_client);
+}
+
+static inline int _calculate_hash(u32 table_total_entries, u64 context, u64 seqno,
+	u64 step, u64 *hash)
+{
+	u64 m_size = table_total_entries;
+	int val = 0;
+
+	if (step == 0) {
+		u64 a_multiplier = HW_FENCE_HASH_A_MULT;
+		u64 c_multiplier = HW_FENCE_HASH_C_MULT;
+		u64 b_multiplier = context + (context - 1); /* odd multiplier */
+
+		/*
+		 * if m, is power of 2, we can optimize with right shift,
+		 * for now we don't do it, to avoid assuming a power of two
+		 */
+		*hash = (a_multiplier * seqno * b_multiplier + (c_multiplier * context)) % m_size;
+	} else {
+		if (step >= m_size) {
+			/*
+			 * If we already traversed the whole table, return failure since this means
+			 * there are not available spots, table is either full or full-enough
+			 * that we couldn't find an available spot after traverse the whole table.
+			 * Ideally table shouldn't be so full that we cannot find a value after some
+			 * iterations, so this maximum step size could be optimized to fail earlier.
+			 */
+			HWFNC_ERR("Fence Table tranversed and no available space!\n");
+			val = -EINVAL;
+		} else {
+			/*
+			 * Linearly increment the hash value to find next element in the table
+			 * note that this relies in the 'scrambled' data from the original hash
+			 * Also, add a mod division to wrap-around in case that we reached the
+			 * end of the table
+			 */
+			*hash = (*hash + 1) % m_size;
+		}
+	}
+
+	return val;
+}
+
+static inline struct msm_hw_fence *_get_hw_fence(u32 table_total_entries,
+	struct msm_hw_fence *hw_fences_tbl,
+	u64 hash)
+{
+	if (hash >= table_total_entries) {
+		HWFNC_ERR("hash:%llu out of max range:%llu\n",
+			hash, table_total_entries);
+		return NULL;
+	}
+
+	return &hw_fences_tbl[hash];
+}
+
+static bool _is_hw_fence_free(struct msm_hw_fence *hw_fence, u64 context, u64 seqno)
+{
+	/* If valid is set, the hw fence is not free */
+	return hw_fence->valid ? false : true;
+}
+
+static bool _hw_fence_match(struct msm_hw_fence *hw_fence, u64 context, u64 seqno)
+{
+	return ((hw_fence->ctx_id == context && hw_fence->seq_id == seqno) ? true : false);
+}
+
+/* clears everything but the 'valid' field */
+static void _cleanup_hw_fence(struct msm_hw_fence *hw_fence)
+{
+	int i;
+
+	hw_fence->error = 0;
+	wmb(); /* update memory to avoid mem-abort */
+	hw_fence->ctx_id = 0;
+	hw_fence->seq_id = 0;
+	hw_fence->wait_client_mask = 0;
+	hw_fence->fence_allocator = 0;
+	hw_fence->fence_signal_client = 0;
+
+	hw_fence->flags = 0;
+
+	hw_fence->fence_create_time = 0;
+	hw_fence->fence_trigger_time = 0;
+	hw_fence->fence_wait_time = 0;
+	hw_fence->debug_refcount = 0;
+	hw_fence->parents_cnt = 0;
+	hw_fence->pending_child_cnt = 0;
+
+	for (i = 0; i < MSM_HW_FENCE_MAX_JOIN_PARENTS; i++)
+		hw_fence->parent_list[i] = HW_FENCE_INVALID_PARENT_FENCE;
+
+	memset(hw_fence->client_data, 0, sizeof(hw_fence->client_data));
+}
+
+/* This function must be called with the hw fence lock */
+static void  _reserve_hw_fence(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence *hw_fence, u32 client_id,
+	u64 context, u64 seqno, u32 hash, u32 pending_child_cnt)
+{
+	_cleanup_hw_fence(hw_fence);
+
+	/* reserve this HW fence */
+	hw_fence->valid = 1;
+
+	hw_fence->ctx_id = context;
+	hw_fence->seq_id = seqno;
+	hw_fence->flags = 0; /* fence just reserved, there shouldn't be any flags set */
+	hw_fence->fence_allocator = client_id;
+	hw_fence->fence_create_time = hw_fence_get_qtime(drv_data);
+	hw_fence->debug_refcount++;
+
+	HWFNC_DBG_LUT("Reserved fence client:%d ctx:%llu seq:%llu hash:%llu\n",
+		client_id, context, seqno, hash);
+}
+
+/* This function must be called with the hw fence lock */
+static void  _unreserve_hw_fence(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence *hw_fence, u32 client_id,
+	u64 context, u64 seqno, u32 hash, u32 pending_child_cnt)
+{
+	_cleanup_hw_fence(hw_fence);
+
+	/* unreserve this HW fence */
+	hw_fence->valid = 0;
+
+	HWFNC_DBG_LUT("Unreserved fence client:%d ctx:%llu seq:%llu hash:%llu\n",
+		client_id, context, seqno, hash);
+}
+
+/* This function must be called with the hw fence lock */
+static void  _reserve_join_fence(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence *hw_fence, u32 client_id, u64 context,
+	u64 seqno, u32 hash, u32 pending_child_cnt)
+{
+	_cleanup_hw_fence(hw_fence);
+
+	/* reserve this HW fence */
+	hw_fence->valid = true;
+
+	hw_fence->ctx_id = context;
+	hw_fence->seq_id = seqno;
+	hw_fence->fence_allocator = client_id;
+	hw_fence->fence_create_time = hw_fence_get_qtime(drv_data);
+	hw_fence->debug_refcount++;
+
+	hw_fence->pending_child_cnt = pending_child_cnt;
+
+	HWFNC_DBG_LUT("Reserved join fence client:%d ctx:%llu seq:%llu hash:%llu\n",
+		client_id, context, seqno, hash);
+}
+
+/* This function must be called with the hw fence lock */
+static void  _fence_found(struct hw_fence_driver_data *drv_data,
+	 struct msm_hw_fence *hw_fence, u32 client_id,
+	u64 context, u64 seqno, u32 hash, u32 pending_child_cnt)
+{
+	/*
+	 * Do nothing, when this find fence fn is invoked, all processing is done outside.
+	 * Currently just keeping this function for debugging purposes, can be removed
+	 * in final versions
+	 */
+	HWFNC_DBG_LUT("Found fence client:%d ctx:%llu seq:%llu hash:%llu\n",
+		client_id, context, seqno, hash);
+}
+
+char *_get_op_mode(enum hw_fence_lookup_ops op_code)
+{
+	switch (op_code) {
+	case HW_FENCE_LOOKUP_OP_CREATE:
+		return "CREATE";
+	case HW_FENCE_LOOKUP_OP_DESTROY:
+		return "DESTROY";
+	case HW_FENCE_LOOKUP_OP_CREATE_JOIN:
+		return "CREATE_JOIN";
+	case HW_FENCE_LOOKUP_OP_FIND_FENCE:
+		return "FIND_FENCE";
+	default:
+		return "UNKNOWN";
+	}
+
+	return "UNKNOWN";
+}
+
+struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence *hw_fences_tbl, u64 context, u64 seqno, u32 client_id,
+	u32 pending_child_cnt, enum hw_fence_lookup_ops op_code, u64 *hash)
+{
+	bool (*compare_fnc)(struct msm_hw_fence *hfence, u64 context, u64 seqno);
+	void (*process_fnc)(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hfence,
+			u32 client_id, u64 context, u64 seqno, u32 hash, u32 pending);
+	struct msm_hw_fence *hw_fence = NULL;
+	u64 step = 0;
+	int ret = 0;
+	bool hw_fence_found = false;
+
+	if (!hash | !drv_data | !hw_fences_tbl) {
+		HWFNC_ERR("Invalid input for hw_fence_lookup\n");
+		return NULL;
+	}
+
+	*hash = ~0;
+
+	HWFNC_DBG_LUT("hw_fence_lookup: %d\n", op_code);
+
+	switch (op_code) {
+	case HW_FENCE_LOOKUP_OP_CREATE:
+		compare_fnc = &_is_hw_fence_free;
+		process_fnc = &_reserve_hw_fence;
+		break;
+	case HW_FENCE_LOOKUP_OP_DESTROY:
+		compare_fnc = &_hw_fence_match;
+		process_fnc = &_unreserve_hw_fence;
+		break;
+	case HW_FENCE_LOOKUP_OP_CREATE_JOIN:
+		compare_fnc = &_is_hw_fence_free;
+		process_fnc = &_reserve_join_fence;
+		break;
+	case HW_FENCE_LOOKUP_OP_FIND_FENCE:
+		compare_fnc = &_hw_fence_match;
+		process_fnc = &_fence_found;
+		break;
+	default:
+		HWFNC_ERR("Unknown op code:%d\n", op_code);
+		return NULL;
+	}
+
+	while (!hw_fence_found && (step < drv_data->hw_fence_table_entries)) {
+
+		/* Calculate the Hash for the Fence */
+		ret = _calculate_hash(drv_data->hw_fence_table_entries, context, seqno, step, hash);
+		if (ret) {
+			HWFNC_ERR("error calculating hash ctx:%llu seqno:%llu hash:%llu\n",
+				context, seqno, *hash);
+			break;
+		}
+		HWFNC_DBG_LUT("calculated hash:%llu [ctx:%llu seqno:%llu]\n", *hash, context,
+			seqno);
+
+		/* Get element from the table using the hash */
+		hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, *hash);
+		HWFNC_DBG_LUT("hw_fence_tbl:0x%pK hw_fence:0x%pK, hash:%llu valid:0x%x\n",
+			hw_fences_tbl, hw_fence, *hash, hw_fence ? hw_fence->valid : 0xbad);
+		if (!hw_fence) {
+			HWFNC_ERR("bad hw fence ctx:%llu seqno:%llu hash:%llu\n",
+				context, seqno, *hash);
+			break;
+		}
+
+		GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1);
+
+		/* compare to either find a free fence or find an allocated fence */
+		if (compare_fnc(hw_fence, context, seqno)) {
+
+			/* Process the hw fence found by the algorithm */
+			if (process_fnc) {
+				process_fnc(drv_data, hw_fence, client_id, context, seqno, *hash,
+					pending_child_cnt);
+
+				/* update memory table with processing */
+				wmb();
+			}
+
+			HWFNC_DBG_L("client_id:%lu op:%s ctx:%llu seqno:%llu hash:%llu step:%llu\n",
+				client_id, _get_op_mode(op_code), context, seqno, *hash, step);
+
+			hw_fence_found = true;
+		} else {
+			if ((op_code == HW_FENCE_LOOKUP_OP_CREATE ||
+				op_code == HW_FENCE_LOOKUP_OP_CREATE_JOIN) &&
+				seqno == hw_fence->seq_id && context == hw_fence->ctx_id) {
+				/* ctx & seqno must be unique creating a hw-fence */
+				HWFNC_ERR("cannot create hw fence with same ctx:%llu seqno:%llu\n",
+					context, seqno);
+				GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0);
+				break;
+			}
+			/* compare can fail if we have a collision, we will linearly resolve it */
+			HWFNC_DBG_H("compare failed for hash:%llu [ctx:%llu seqno:%llu]\n", *hash,
+				context, seqno);
+		}
+
+		GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0);
+
+		/* Increment step for the next loop */
+		step++;
+	}
+
+	/* If we iterated through the whole list and didn't find the fence, return null */
+	if (!hw_fence_found) {
+		HWFNC_ERR("fail to create hw-fence step:%llu\n", step);
+		hw_fence = NULL;
+	}
+
+	HWFNC_DBG_LUT("lookup:%d hw_fence:%pK ctx:%llu seqno:%llu hash:%llu flags:0x%llx\n",
+		op_code, hw_fence, context, seqno, *hash, hw_fence ? hw_fence->flags : -1);
+
+	return hw_fence;
+}
+
+int hw_fence_create(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client,
+	u64 context, u64 seqno, u64 *hash)
+{
+	u32 client_id = hw_fence_client->client_id;
+	struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
+
+	int ret = 0;
+
+	/* allocate hw fence in table */
+	if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl,
+		context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_CREATE, hash)) {
+		HWFNC_ERR("Fail to create fence client:%lu ctx:%llu seqno:%llu\n",
+			client_id, context, seqno);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static  inline int _hw_fence_cleanup(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence *hw_fences_tbl, u32 client_id, u64 context, u64 seqno) {
+	u64 hash;
+
+	if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl,
+			context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_DESTROY, &hash))
+		return -EINVAL;
+
+	return 0;
+}
+
+int hw_fence_destroy(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client,
+	u64 context, u64 seqno)
+{
+	u32 client_id = hw_fence_client->client_id;
+	struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
+	int ret = 0;
+
+	/* remove hw fence from table*/
+	if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, context, seqno)) {
+		HWFNC_ERR("Fail destroying fence client:%lu ctx:%llu seqno:%llu\n",
+			client_id, context, seqno);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client, u64 hash)
+{
+	u32 client_id = hw_fence_client->client_id;
+	struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
+	struct msm_hw_fence *hw_fence = NULL;
+	int ret = 0;
+
+	hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, hash);
+	if (!hw_fence) {
+		HWFNC_ERR("bad hw fence hash:%llu client:%lu\n", hash, client_id);
+		return -EINVAL;
+	}
+
+	if (hw_fence->fence_allocator != client_id) {
+		HWFNC_ERR("client:%lu cannot destroy fence hash:%llu fence_allocator:%lu\n",
+			client_id, hash, hw_fence->fence_allocator);
+		return -EINVAL;
+	}
+
+	/* remove hw fence from table*/
+	if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, hw_fence->ctx_id,
+			hw_fence->seq_id)) {
+		HWFNC_ERR("Fail destroying fence client:%lu ctx:%llu seqno:%llu hash:%llu\n",
+			client_id, hw_fence->ctx_id, hw_fence->seq_id, hash);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct msm_hw_fence *_hw_fence_process_join_fence(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client,
+	struct dma_fence_array *array, u64 *hash, bool create)
+{
+	struct msm_hw_fence *hw_fences_tbl;
+	struct msm_hw_fence *join_fence = NULL;
+	u64 context, seqno;
+	u32 client_id, pending_child_cnt;
+
+	/*
+	 * NOTE: For now we are allocating the join fences from the same table as all
+	 * the other fences (i.e. drv_data->hw_fences_tbl), functionally this will work, however,
+	 * this might impact the lookup algorithm, since the "join-fences" are created with the
+	 * context and seqno of a fence-array, and those might not be changing by the client,
+	 * so this will linearly increment the look-up and very likely impact the other fences if
+	 * these join-fences start to fill-up a particular region of the fences global table.
+	 * So we might have to allocate a different table altogether for these join fences.
+	 * However, to do this, just alloc another table and change it here:
+	 */
+	hw_fences_tbl = drv_data->hw_fences_tbl;
+
+	context = array->base.context;
+	seqno = array->base.seqno;
+	pending_child_cnt = array->num_fences;
+	client_id = HW_FENCE_JOIN_FENCE_CLIENT_ID;
+
+	if (create) {
+		/* allocate the fence */
+		join_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context,
+			seqno, client_id, pending_child_cnt, HW_FENCE_LOOKUP_OP_CREATE_JOIN, hash);
+		if (!join_fence)
+			HWFNC_ERR("Fail to create join fence client:%lu ctx:%llu seqno:%llu\n",
+				client_id, context, seqno);
+	} else {
+		/* destroy the fence */
+		if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, context, seqno))
+			HWFNC_ERR("Fail destroying join fence client:%lu ctx:%llu seqno:%llu\n",
+				client_id, context, seqno);
+	}
+
+	return join_fence;
+}
+
+struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client,
+	u64 context, u64 seqno, u64 *hash)
+{
+	struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
+	struct msm_hw_fence *hw_fence;
+	u32 client_id = hw_fence_client ? hw_fence_client->client_id : 0xff;
+
+	/* find the hw fence */
+	hw_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context,
+		seqno, client_id, 0, HW_FENCE_LOOKUP_OP_FIND_FENCE, hash);
+	if (!hw_fence)
+		HWFNC_ERR("Fail to find hw fence client:%lu ctx:%llu seqno:%llu\n",
+			client_id, context, seqno);
+
+	return hw_fence;
+}
+
+static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash,
+	u64 flags, u64 client_data, u32 error)
+{
+	u32 tx_client_id = drv_data->ipcc_client_pid; /* phys id for tx client */
+	u32 rx_client_id = hw_fence_client->ipc_client_vid; /* virt id for rx client */
+
+	HWFNC_DBG_H("We must signal the client now! hfence hash:%llu\n", hash);
+
+	/* Call fence error callback */
+	if (error && hw_fence_client->fence_error_cb) {
+		hw_fence_utils_fence_error_cb(hw_fence_client, hw_fence->ctx_id, hw_fence->seq_id,
+			hash, flags, error);
+	} else {
+		/* Write to Rx queue */
+		if (hw_fence_client->update_rxq)
+			hw_fence_update_queue(drv_data, hw_fence_client, hw_fence->ctx_id,
+				hw_fence->seq_id, hash, flags, client_data, error,
+				HW_FENCE_RX_QUEUE - 1);
+
+		/* Signal the hw fence now */
+		if (hw_fence_client->send_ipc)
+			hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id,
+				hw_fence_client->ipc_signal_id);
+	}
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+	if (hw_fence_client->client_id >= HW_FENCE_CLIENT_ID_VAL0
+			&& hw_fence_client->client_id <= HW_FENCE_CLIENT_ID_VAL6)
+		process_validation_client_loopback(drv_data, hw_fence_client->client_id);
+#endif /* CONFIG_DEBUG_FS */
+}
+
+static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client, int iteration, struct dma_fence_array *array,
+	struct msm_hw_fence *join_fence, u64 hash_join_fence)
+{
+	struct dma_fence *child_fence;
+	struct msm_hw_fence *hw_fence_child;
+	int idx, j;
+	u64 hash = 0;
+
+	if (!array->fences)
+		goto destroy_fence;
+
+	/* cleanup the child-fences from the parent join-fence */
+	for (idx = iteration; idx >= 0; idx--) {
+		child_fence = array->fences[idx];
+		if (!child_fence) {
+			HWFNC_ERR("invalid child fence idx:%d\n", idx);
+			continue;
+		}
+
+		hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context,
+			child_fence->seqno, &hash);
+		if (!hw_fence_child) {
+			HWFNC_ERR("Cannot cleanup child fence context:%lu seqno:%lu hash:%lu\n",
+				child_fence->context, child_fence->seqno, hash);
+
+			/*
+			 * ideally this should not have happened, but if it did, try to keep
+			 * cleaning-up other fences after printing the error
+			 */
+			continue;
+		}
+
+		/* lock the child while we clean it up from the parent join-fence */
+		GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 1); /* lock */
+		for (j = hw_fence_child->parents_cnt; j > 0; j--) {
+
+			if (j > MSM_HW_FENCE_MAX_JOIN_PARENTS) {
+				HWFNC_ERR("Invalid max parents_cnt:%d, will reset to max:%d\n",
+					hw_fence_child->parents_cnt, MSM_HW_FENCE_MAX_JOIN_PARENTS);
+
+				j = MSM_HW_FENCE_MAX_JOIN_PARENTS;
+			}
+
+			if (hw_fence_child->parent_list[j - 1] == hash_join_fence) {
+				hw_fence_child->parent_list[j - 1] = HW_FENCE_INVALID_PARENT_FENCE;
+
+				if (hw_fence_child->parents_cnt)
+					hw_fence_child->parents_cnt--;
+
+				/* update memory for the table update */
+				wmb();
+			}
+		}
+		GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */
+	}
+
+destroy_fence:
+	/* destroy join fence */
+	_hw_fence_process_join_fence(drv_data, hw_fence_client, array, &hash_join_fence,
+		false);
+}
+
+int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client, struct dma_fence_array *array,
+	u64 *hash_join_fence, u64 client_data)
+{
+	struct msm_hw_fence *join_fence;
+	struct msm_hw_fence *hw_fence_child;
+	struct dma_fence *child_fence;
+	bool signal_join_fence = false;
+	u64 hash;
+	int i, ret = 0;
+	enum hw_fence_client_data_id data_id;
+
+	if (client_data) {
+		data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext);
+		if (data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) {
+			HWFNC_ERR("Populating client_data:%llu with invalid client_id_ext:%d\n",
+				client_data, hw_fence_client->client_id_ext);
+			return -EINVAL;
+		}
+	}
+
+	/*
+	 * Create join fence from the join-fences table,
+	 * This function initializes:
+	 * join_fence->pending_child_count = array->num_fences
+	 */
+	join_fence = _hw_fence_process_join_fence(drv_data, hw_fence_client, array,
+		hash_join_fence, true);
+	if (!join_fence) {
+		HWFNC_ERR("cannot alloc hw fence for join fence array\n");
+		return -EINVAL;
+	}
+
+	/* update this as waiting client of the join-fence */
+	GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */
+	join_fence->wait_client_mask |= BIT(hw_fence_client->client_id);
+	GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 0); /* unlock */
+
+	/* Iterate through fences of the array */
+	for (i = 0; i < array->num_fences; i++) {
+		child_fence = array->fences[i];
+
+		/* Nested fence-arrays are not supported */
+		if (to_dma_fence_array(child_fence)) {
+			HWFNC_ERR("This is a nested fence, fail!\n");
+			ret = -EINVAL;
+			goto error_array;
+		}
+
+		/* All elements in the fence-array must be hw-fences */
+		if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &child_fence->flags)) {
+			HWFNC_ERR("DMA Fence in FenceArray is not a HW Fence\n");
+			ret = -EINVAL;
+			goto error_array;
+		}
+
+		/* Find the HW Fence in the Global Table */
+		hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context,
+			child_fence->seqno, &hash);
+		if (!hw_fence_child) {
+			HWFNC_ERR("Cannot find child fence context:%lu seqno:%lu hash:%lu\n",
+				child_fence->context, child_fence->seqno, hash);
+			ret = -EINVAL;
+			goto error_array;
+		}
+
+		GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 1); /* lock */
+		if (hw_fence_child->flags & MSM_HW_FENCE_FLAG_SIGNAL) {
+
+			/* child fence is already signaled */
+			GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */
+			join_fence->error |= hw_fence_child->error;
+			if (--join_fence->pending_child_cnt == 0)
+				signal_join_fence = true;
+
+			/* update memory for the table update */
+			wmb();
+
+			GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 0); /* unlock */
+		} else {
+
+			/* child fence is not signaled */
+			hw_fence_child->parents_cnt++;
+
+			if (hw_fence_child->parents_cnt >= MSM_HW_FENCE_MAX_JOIN_PARENTS
+					|| hw_fence_child->parents_cnt < 1) {
+
+				/* Max number of parents for a fence is exceeded */
+				HWFNC_ERR("DMA Fence in FenceArray exceeds parents:%d\n",
+					hw_fence_child->parents_cnt);
+				hw_fence_child->parents_cnt--;
+
+				/* update memory for the table update */
+				wmb();
+
+				/* unlock */
+				GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0);
+				ret = -EINVAL;
+				goto error_array;
+			}
+
+			hw_fence_child->parent_list[hw_fence_child->parents_cnt - 1] =
+				*hash_join_fence;
+
+			/* update memory for the table update */
+			wmb();
+		}
+		GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */
+	}
+
+	if (client_data)
+		join_fence->client_data[data_id] = client_data;
+
+	/* all fences were signaled, signal client now */
+	if (signal_join_fence) {
+
+		/* signal the join hw fence */
+		_fence_ctl_signal(drv_data, hw_fence_client, join_fence, *hash_join_fence, 0,
+			client_data, join_fence->error);
+		set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &array->base.flags);
+
+		/*
+		 * job of the join-fence is finished since we already signaled,
+		 * we can delete it now. This can happen when all the fences that
+		 * are part of the join-fence are already signaled.
+		 */
+		_hw_fence_process_join_fence(drv_data, hw_fence_client, array, hash_join_fence,
+			false);
+	} else if (!array->num_fences) {
+		/*
+		 * if we didn't signal the join-fence and the number of fences is not set in
+		 * the fence-array, then fail here, otherwise driver would create a join-fence
+		 * with no-childs that won't be signaled at all or an incomplete join-fence
+		 */
+		HWFNC_ERR("invalid fence-array ctx:%llu seqno:%llu without fences\n",
+			array->base.context, array->base.seqno);
+		goto error_array;
+	}
+
+	return ret;
+
+error_array:
+	_cleanup_join_and_child_fences(drv_data, hw_fence_client, i, array, join_fence,
+		*hash_join_fence);
+
+	return -EINVAL;
+}
+
+int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data,
+		struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context,
+		u64 seqno, u64 *hash, u64 client_data)
+{
+	struct msm_hw_fence *hw_fence;
+	enum hw_fence_client_data_id data_id;
+
+	if (client_data) {
+		data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext);
+		if (data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) {
+			HWFNC_ERR("Populating client_data:%llu with invalid client_id_ext:%d\n",
+				client_data, hw_fence_client->client_id);
+			return -EINVAL;
+		}
+	}
+
+	/* find the hw fence within the table */
+	hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, seqno, hash);
+	if (!hw_fence) {
+		HWFNC_ERR("Cannot find fence!\n");
+		return -EINVAL;
+	}
+
+	GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */
+
+	/* register client in the hw fence */
+	hw_fence->wait_client_mask |= BIT(hw_fence_client->client_id);
+	hw_fence->fence_wait_time = hw_fence_get_qtime(drv_data);
+	hw_fence->debug_refcount++;
+	if (client_data)
+		hw_fence->client_data[data_id] = client_data;
+
+	/* update memory for the table update */
+	wmb();
+
+	GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */
+
+	/* if hw fence already signaled, signal the client */
+	if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) {
+		if (fence != NULL)
+			set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
+		_fence_ctl_signal(drv_data, hw_fence_client, hw_fence, *hash, 0, client_data, 0);
+	}
+
+	return 0;
+}
+
+int hw_fence_process_fence(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client,
+	struct dma_fence *fence, u64 *hash, u64 client_data)
+{
+	int ret = 0;
+
+	if (!drv_data | !hw_fence_client | !fence) {
+		HWFNC_ERR("Invalid Input!\n");
+		return -EINVAL;
+	}
+	/* fence must be hw-fence */
+	if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) {
+		HWFNC_ERR("DMA Fence in is not a HW Fence flags:0x%llx\n", fence->flags);
+		return -EINVAL;
+	}
+
+	ret = hw_fence_register_wait_client(drv_data, fence, hw_fence_client, fence->context,
+		fence->seqno, hash, client_data);
+	if (ret)
+		HWFNC_ERR("Error registering for wait client:%d\n", hw_fence_client->client_id);
+
+	return ret;
+}
+
+static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence *hw_fence, u64 hash, int error)
+{
+	enum hw_fence_client_id wait_client_id;
+	enum hw_fence_client_data_id data_id;
+	struct msm_hw_fence_client *hw_fence_wait_client;
+	u64 client_data = 0;
+
+	/* signal with an error all the waiting clients for this fence */
+	for (wait_client_id = 0; wait_client_id <= drv_data->rxq_clients_num; wait_client_id++) {
+		if (hw_fence->wait_client_mask & BIT(wait_client_id)) {
+			hw_fence_wait_client = drv_data->clients[wait_client_id];
+
+			if (!hw_fence_wait_client)
+				continue;
+
+			data_id = hw_fence_get_client_data_id(hw_fence_wait_client->client_id_ext);
+
+			if (data_id < HW_FENCE_MAX_CLIENTS_WITH_DATA)
+				client_data = hw_fence->client_data[data_id];
+
+			_fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence,
+				hash, 0, client_data, error);
+		}
+	}
+}
+
+void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client)
+{
+	struct msm_hw_fence_hfi_queue_header *hfi_header;
+	struct msm_hw_fence_queue *queue;
+	u32 rd_idx, wr_idx, lock_idx;
+
+	queue = &hw_fence_client->queues[HW_FENCE_TX_QUEUE - 1];
+	hfi_header = queue->va_header;
+
+	/* For the client TxQ: set the read-index same as last write that was done by the client */
+	mb(); /* make sure data is ready before read */
+	wr_idx = readl_relaxed(&hfi_header->write_index);
+	if (queue->skip_wr_idx)
+		hfi_header->tx_wm = wr_idx;
+	writel_relaxed(wr_idx, &hfi_header->read_index);
+	wmb(); /* make sure data is updated after write the index*/
+	HWFNC_DBG_Q("update tx queue %s to match write_index:%lu\n",
+		queue->skip_wr_idx ? "read_index=tx_wm" : "read_index", wr_idx);
+
+	/* For the client RxQ: set the write-index same as last read done by the client */
+	if (hw_fence_client->update_rxq) {
+		lock_idx = hw_fence_client->client_id - 1;
+
+		if (lock_idx >= drv_data->client_lock_tbl_cnt) {
+			HWFNC_ERR("cannot reset rxq, lock for client id:%d exceed max:%d\n",
+				hw_fence_client->client_id, drv_data->client_lock_tbl_cnt);
+			return;
+		}
+		HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", hw_fence_client->client_id, lock_idx);
+
+		/* lock the client rx queue to update */
+		GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 1);
+
+		queue = &hw_fence_client->queues[HW_FENCE_RX_QUEUE - 1];
+		hfi_header = queue->va_header;
+
+		mb(); /* make sure data is ready before read */
+		rd_idx = readl_relaxed(&hfi_header->read_index);
+		writel_relaxed(rd_idx, &hfi_header->write_index);
+		wmb(); /* make sure data is updated after write the index */
+
+		/* unlock */
+		GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0);
+		HWFNC_DBG_Q("update rx queue write_index to match read_index:%lu\n", rd_idx);
+	}
+}
+
+int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash,
+	u32 reset_flags)
+{
+	int ret = 0;
+	int error = (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_ERROR) ? 0 : MSM_HW_FENCE_ERROR_RESET;
+
+	GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */
+	if (hw_fence->wait_client_mask & BIT(hw_fence_client->client_id)) {
+		HWFNC_DBG_H("clearing client:%d wait bit for fence: ctx:%d seqno:%d\n",
+			hw_fence_client->client_id, hw_fence->ctx_id,
+			hw_fence->seq_id);
+		hw_fence->wait_client_mask &= ~BIT(hw_fence_client->client_id);
+
+		/* update memory for the table update */
+		wmb();
+	}
+	GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */
+
+	if (hw_fence->fence_allocator == hw_fence_client->client_id) {
+
+		/* if fence is not signaled, signal with error all the waiting clients */
+		if (!(hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL))
+			_signal_all_wait_clients(drv_data, hw_fence, hash, error);
+
+		if (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_DESTROY)
+			goto skip_destroy;
+
+		ret = hw_fence_destroy(drv_data, hw_fence_client,
+			hw_fence->ctx_id, hw_fence->seq_id);
+		if (ret) {
+			HWFNC_ERR("Error destroying HW fence: ctx:%d seqno:%d\n",
+				hw_fence->ctx_id, hw_fence->seq_id);
+		}
+	}
+
+skip_destroy:
+	return ret;
+}
+
+enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id client_id)
+{
+	enum hw_fence_client_data_id data_id;
+
+	switch (client_id) {
+	case HW_FENCE_CLIENT_ID_CTX0:
+		data_id = HW_FENCE_CLIENT_DATA_ID_CTX0;
+		break;
+	case HW_FENCE_CLIENT_ID_VAL0:
+		data_id = HW_FENCE_CLIENT_DATA_ID_VAL0;
+		break;
+	case HW_FENCE_CLIENT_ID_VAL1:
+		data_id = HW_FENCE_CLIENT_DATA_ID_VAL1;
+		break;
+	case HW_FENCE_CLIENT_ID_IPE:
+		data_id = HW_FENCE_CLIENT_DATA_ID_IPE;
+		break;
+	case HW_FENCE_CLIENT_ID_VPU:
+		data_id = HW_FENCE_CLIENT_DATA_ID_VPU;
+		break;
+	default:
+		data_id = HW_FENCE_MAX_CLIENTS_WITH_DATA;
+		break;
+	}
+
+	return data_id;
+}

+ 1104 - 0
qcom/opensource/mm-drivers/hw_fence/src/hw_fence_drv_utils.c

@@ -0,0 +1,1104 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/gunyah/gh_rm_drv.h>
+#include <linux/gunyah/gh_dbl.h>
+#include <linux/qcom_scm.h>
+#include <linux/version.h>
+#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
+#include <linux/gh_cpusys_vm_mem_access.h>
+#endif
+#include <soc/qcom/secure_buffer.h>
+
+#include "hw_fence_drv_priv.h"
+#include "hw_fence_drv_utils.h"
+#include "hw_fence_drv_ipc.h"
+#include "hw_fence_drv_debug.h"
+
+/**
+ * MAX_CLIENT_QUEUE_MEM_SIZE:
+ * Maximum memory size for client queues of a hw fence client.
+ */
+#define MAX_CLIENT_QUEUE_MEM_SIZE 0x100000
+
+/**
+ * HW_FENCE_MAX_CLIENT_TYPE:
+ * Total number of client types with and without configurable number of sub-clients
+ */
+#define HW_FENCE_MAX_CLIENT_TYPE (HW_FENCE_MAX_CLIENT_TYPE_STATIC + \
+	HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE)
+
+/**
+ * HW_FENCE_MIN_RXQ_CLIENTS:
+ * Minimum number of static hw fence clients with rxq
+ */
+#define HW_FENCE_MIN_RXQ_CLIENTS HW_FENCE_CLIENT_ID_VAL6
+
+/**
+ * HW_FENCE_MIN_RXQ_CLIENT_TYPE:
+ * Minimum number of static hw fence client types with rxq (GFX, DPU, VAL)
+ */
+#define HW_FENCE_MIN_RXQ_CLIENT_TYPE 3
+
+/* Maximum number of clients for each client type */
+#define HW_FENCE_CLIENT_TYPE_MAX_GPU 1
+#define HW_FENCE_CLIENT_TYPE_MAX_DPU 6
+#define HW_FENCE_CLIENT_TYPE_MAX_VAL 7
+#define HW_FENCE_CLIENT_TYPE_MAX_IPE 32
+#define HW_FENCE_CLIENT_TYPE_MAX_VPU 32
+#define HW_FENCE_CLIENT_TYPE_MAX_IFE 32
+
+/**
+ * HW_FENCE_CTRL_QUEUE_DOORBELL:
+ * Bit set in doorbell flags mask if hw fence driver should read ctrl rx queue
+ */
+#define HW_FENCE_CTRL_QUEUE_DOORBELL 0
+
+/**
+ * HW_FENCE_DOORBELL_FLAGS_ID_LAST:
+ * Last doorbell flags id for which HW Fence Driver can receive doorbell
+ */
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+#define HW_FENCE_DOORBELL_FLAGS_ID_LAST HW_FENCE_CLIENT_ID_VAL6
+#else
+#define HW_FENCE_DOORBELL_FLAGS_ID_LAST HW_FENCE_CTRL_QUEUE_DOORBELL
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * HW_FENCE_DOORBELL_MASK:
+ * Each bit in this mask represents possible doorbell flag ids for which hw fence driver can receive
+ */
+#define HW_FENCE_DOORBELL_MASK \
+	GENMASK(HW_FENCE_DOORBELL_FLAGS_ID_LAST, HW_FENCE_CTRL_QUEUE_DOORBELL)
+
+/**
+ * HW_FENCE_MAX_ITER_READ:
+ * Maximum number of iterations when reading queue
+ */
+#define HW_FENCE_MAX_ITER_READ 100
+
+/**
+ * HW_FENCE_MAX_EVENTS:
+ * Maximum number of HW Fence debug events
+ */
+#define HW_FENCE_MAX_EVENTS 1000
+
+/**
+ * struct hw_fence_client_types - Table describing all supported client types, used to parse
+ *                                device-tree properties related to client queue size.
+ *
+ * The fields name, init_id, and max_clients_num are constants. Default values for clients_num,
+ * queues_num, and skip_txq_wr_idx are provided in this table, and clients_num, queues_num,
+ * queue_entries, and skip_txq_wr_idx can be read from device-tree.
+ *
+ * If a value for queue entries is not parsed for the client type, then the default number of client
+ * queue entries (parsed from device-tree) is used.
+ *
+ * Notes:
+ * 1. Client types must be in the same order as client_ids within the enum 'hw_fence_client_id'.
+ * 2. Each HW Fence client ID must be described by one of the client types in this table.
+ * 3. A new client type must set: name, init_id, max_clients_num, clients_num, queues_num, and
+ *    skip_txq_wr_idx.
+ * 4. Either constant HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE or HW_FENCE_MAX_CLIENT_TYPE_STATIC must
+ *    be incremented as appropriate for new client types.
+ */
+struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = {
+	{"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU,
+		HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false},
+	{"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU,
+		HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false},
+	{"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL,
+		HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false},
+	{"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0, HW_FENCE_CLIENT_QUEUES,
+		0, 0, 0, 0, 0, 0, false},
+	{"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, HW_FENCE_CLIENT_QUEUES,
+		0, 0, 0, 0, 0, 0, false},
+	{"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
+		true},
+	{"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
+		true},
+	{"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
+		true},
+	{"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
+		true},
+	{"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
+		true},
+	{"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
+		true},
+	{"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
+		true},
+	{"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
+		true},
+};
+
+static void _lock(uint64_t *wait)
+{
+#if defined(__aarch64__)
+	__asm__(
+		// Sequence to wait for lock to be free (i.e. zero)
+		"PRFM PSTL1KEEP, [%x[i_lock]]\n\t"
+		"1:\n\t"
+		"LDAXR W5, [%x[i_lock]]\n\t"
+		"CBNZ W5, 1b\n\t"
+		// Sequence to set PVM BIT0
+		"LDR W7, =0x1\n\t"              // Load BIT0 (0x1) into W7
+		"STXR W5, W7, [%x[i_lock]]\n\t" // Atomic Store exclusive BIT0 (lock = 0x1)
+		"CBNZ W5, 1b\n\t"               // If cannot set it, goto 1
+		:
+		: [i_lock] "r" (wait)
+		: "memory");
+#endif
+}
+
+static void _unlock(struct hw_fence_driver_data *drv_data, uint64_t *lock)
+{
+	uint64_t lock_val;
+
+#if defined(__aarch64__)
+	__asm__(
+		// Sequence to clear PVM BIT0
+		"2:\n\t"
+		"LDAXR W5, [%x[i_out]]\n\t"             // Atomic Fetch Lock
+		"AND W6, W5, #0xFFFFFFFFFFFFFFFE\n\t"   // AND to clear BIT0 (lock &= ~0x1))
+		"STXR W5, W6, [%x[i_out]]\n\t"          // Store exclusive result
+		"CBNZ W5, 2b\n\t"                       // If cannot store exclusive, goto 2
+		:
+		: [i_out] "r" (lock)
+		: "memory");
+#endif
+	mb(); /* Make sure the memory is updated */
+
+	lock_val = *lock; /* Read the lock value */
+	HWFNC_DBG_LOCK("unlock: lock_val after:0x%llx\n", lock_val);
+	if (lock_val & 0x2) { /* check if SVM BIT1 is set*/
+		/*
+		 * SVM is in WFI state, since SVM acquire bit is set
+		 * Trigger IRQ to Wake-Up SVM Client
+		 */
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+		drv_data->debugfs_data.lock_wake_cnt++;
+		HWFNC_DBG_LOCK("triggering ipc to unblock SVM lock_val:%d cnt:%llu\n", lock_val,
+			drv_data->debugfs_data.lock_wake_cnt);
+#endif
+		hw_fence_ipcc_trigger_signal(drv_data,
+			drv_data->ipcc_client_pid,
+			drv_data->ipcc_client_vid, 30); /* Trigger APPS Signal 30 */
+	}
+}
+
+void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, bool val)
+{
+	if (val) {
+		preempt_disable();
+		_lock(lock);
+	} else {
+		_unlock(drv_data, lock);
+		preempt_enable();
+	}
+}
+
+int hw_fence_utils_fence_error_cb(struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id,
+	u64 seqno, u64 hash, u64 flags, u32 error)
+{
+	struct msm_hw_fence_cb_data cb_data;
+	struct dma_fence fence;
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(hw_fence_client)) {
+		HWFNC_ERR("Invalid client:0x%pK\n", hw_fence_client);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_fence_client->error_cb_lock);
+	if (!error || !hw_fence_client->fence_error_cb) {
+		HWFNC_ERR("Invalid error:%d fence_error_cb:0x%pK\n", error,
+			hw_fence_client->fence_error_cb);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	/* initialize cb_data info */
+	fence.context = ctxt_id;
+	fence.seqno = seqno;
+	fence.flags = flags;
+	fence.error = error;
+	cb_data.fence = &fence;
+	cb_data.data = hw_fence_client->fence_error_cb_userdata;
+
+	HWFNC_DBG_L("invoking cb for client:%d ctx:%llu seq:%llu flags:%llu e:%u data:0x%pK\n",
+		hw_fence_client->client_id, ctxt_id, seqno, flags, error,
+		hw_fence_client->fence_error_cb_userdata);
+
+	hw_fence_client->fence_error_cb(hash, error, &cb_data);
+
+exit:
+	mutex_unlock(&hw_fence_client->error_cb_lock);
+
+	return ret;
+}
+
+static int _process_fence_error_client_loopback(struct hw_fence_driver_data *drv_data,
+	int db_flag_id)
+{
+	struct msm_hw_fence_client *hw_fence_client;
+	struct msm_hw_fence_queue_payload payload;
+	int i, cb_ret, ret = 0, read = 1;
+	u32 client_id;
+
+	for (i = 0; read && i < HW_FENCE_MAX_ITER_READ; i++) {
+		read = hw_fence_read_queue_helper(&drv_data->ctrl_queues[HW_FENCE_RX_QUEUE - 1],
+			&payload);
+		if (read < 0) {
+			HWFNC_DBG_Q("unable to read ctrl rxq for db_flag_id:%d\n", db_flag_id);
+			return read;
+		}
+		if (payload.type != HW_FENCE_PAYLOAD_TYPE_2) {
+			HWFNC_ERR("unsupported payload type in ctrl rxq received:%u expected:%u\n",
+				payload.type, HW_FENCE_PAYLOAD_TYPE_2);
+			ret = -EINVAL;
+			continue;
+		}
+		if (payload.client_data < HW_FENCE_CLIENT_ID_CTX0 ||
+				payload.client_data >= drv_data->clients_num) {
+			HWFNC_ERR("read invalid client_id:%llu from ctrl rxq min:%u max:%u\n",
+				payload.client_data, HW_FENCE_CLIENT_ID_CTX0,
+				drv_data->clients_num);
+			ret = -EINVAL;
+			continue;
+		}
+
+		client_id = payload.client_data;
+		HWFNC_DBG_Q("ctrl rxq rd: it:%d h:%llu ctx:%llu seq:%llu f:%llu e:%u client:%u\n",
+			i, payload.hash, payload.ctxt_id, payload.seqno, payload.flags,
+			payload.error, client_id);
+
+		hw_fence_client = drv_data->clients[client_id];
+		if (!hw_fence_client) {
+			HWFNC_ERR("processing fence error cb for unregistered client_id:%u\n",
+				client_id);
+			ret = -EINVAL;
+			continue;
+		}
+
+		cb_ret = hw_fence_utils_fence_error_cb(hw_fence_client, payload.ctxt_id,
+			payload.seqno, payload.hash, payload.flags, payload.error);
+		if (cb_ret) {
+			HWFNC_ERR("fence_error_cb failed for client:%u ctx:%llu seq:%llu err:%u\n",
+				client_id, payload.ctxt_id, payload.seqno, payload.error);
+			ret = cb_ret;
+		}
+	}
+
+	return ret;
+}
+
+static int _process_doorbell_id(struct hw_fence_driver_data *drv_data, int db_flag_id)
+{
+	int ret;
+
+	HWFNC_DBG_H("Processing doorbell mask id:%d\n", db_flag_id);
+	switch (db_flag_id) {
+	case HW_FENCE_CTRL_QUEUE_DOORBELL:
+		ret = _process_fence_error_client_loopback(drv_data, db_flag_id);
+		break;
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+	case HW_FENCE_CLIENT_ID_VAL0:
+	case HW_FENCE_CLIENT_ID_VAL1:
+	case HW_FENCE_CLIENT_ID_VAL2:
+	case HW_FENCE_CLIENT_ID_VAL3:
+	case HW_FENCE_CLIENT_ID_VAL4:
+	case HW_FENCE_CLIENT_ID_VAL5:
+	case HW_FENCE_CLIENT_ID_VAL6:
+		ret = process_validation_client_loopback(drv_data, db_flag_id);
+		break;
+#endif /* CONFIG_DEBUG_FS */
+	default:
+		HWFNC_ERR("unknown mask id:%d\n", db_flag_id);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags)
+{
+	int db_flag_id = HW_FENCE_CTRL_QUEUE_DOORBELL;
+	u64 mask;
+
+	for (; db_flag_id <= HW_FENCE_DOORBELL_FLAGS_ID_LAST; db_flag_id++) {
+		mask = 1 << db_flag_id;
+		if (mask & db_flags) {
+			HWFNC_DBG_H("db_flag:%d signaled! flags:0x%llx\n", db_flag_id, db_flags);
+
+			if (_process_doorbell_id(drv_data, db_flag_id))
+				HWFNC_ERR("Failed to process db_flag_id:%d\n", db_flag_id);
+
+			/* clear mask for this flag id if nothing else pending finish */
+			db_flags = db_flags & ~(mask);
+			HWFNC_DBG_H("db_flag_id:%d cleared flags:0x%llx mask:0x%llx ~mask:0x%llx\n",
+				db_flag_id, db_flags, mask, ~(mask));
+			if (!db_flags)
+				break;
+		}
+	}
+}
+
+/* doorbell callback */
+static void _hw_fence_cb(int irq, void *data)
+{
+	struct hw_fence_driver_data *drv_data = (struct hw_fence_driver_data *)data;
+	gh_dbl_flags_t clear_flags = HW_FENCE_DOORBELL_MASK;
+	int ret;
+
+	if (!drv_data)
+		return;
+
+	ret = gh_dbl_read_and_clean(drv_data->rx_dbl, &clear_flags, 0);
+	if (ret) {
+		HWFNC_ERR("hw_fence db callback, retrieve flags fail ret:%d\n", ret);
+		return;
+	}
+
+	HWFNC_DBG_IRQ("db callback label:%d irq:%d flags:0x%llx qtime:%llu\n", drv_data->db_label,
+		irq, clear_flags, hw_fence_get_qtime(drv_data));
+
+	hw_fence_utils_process_doorbell_mask(drv_data, clear_flags);
+}
+
+int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data)
+{
+	struct device_node *node = drv_data->dev->of_node;
+	struct device_node *node_compat;
+	const char *compat = "qcom,msm-hw-fence-db";
+	int ret;
+
+	node_compat = of_find_compatible_node(node, NULL, compat);
+	if (!node_compat) {
+		HWFNC_ERR("Failed to find dev node with compat:%s\n", compat);
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->db_label);
+	if (ret) {
+		HWFNC_ERR("failed to find label info %d\n", ret);
+		return ret;
+	}
+
+	HWFNC_DBG_IRQ("registering doorbell db_label:%d\n", drv_data->db_label);
+	drv_data->rx_dbl = gh_dbl_rx_register(drv_data->db_label, _hw_fence_cb, drv_data);
+	if (IS_ERR_OR_NULL(drv_data->rx_dbl)) {
+		ret = PTR_ERR(drv_data->rx_dbl);
+		HWFNC_ERR("Failed to register doorbell\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data,
+				gh_vmid_t self, gh_vmid_t peer)
+{
+	struct qcom_scm_vmperm src_vmlist[] = {{self, PERM_READ | PERM_WRITE | PERM_EXEC}};
+	struct qcom_scm_vmperm dst_vmlist[] = {{self, PERM_READ | PERM_WRITE},
+					       {peer, PERM_READ | PERM_WRITE}};
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
+	u64 srcvmids, dstvmids;
+#else
+	unsigned int srcvmids, dstvmids;
+#endif
+	struct gh_acl_desc *acl;
+	struct gh_sgl_desc *sgl;
+	int ret;
+
+	srcvmids = BIT(src_vmlist[0].vmid);
+	dstvmids = BIT(dst_vmlist[0].vmid) | BIT(dst_vmlist[1].vmid);
+	ret = qcom_scm_assign_mem(drv_data->res.start, resource_size(&drv_data->res), &srcvmids,
+			dst_vmlist, ARRAY_SIZE(dst_vmlist));
+	if (ret) {
+		HWFNC_ERR("%s: qcom_scm_assign_mem failed addr=%x size=%u err=%d\n",
+			__func__, drv_data->res.start, drv_data->size, ret);
+		return ret;
+	}
+
+	acl = kzalloc(offsetof(struct gh_acl_desc, acl_entries[2]), GFP_KERNEL);
+	if (!acl)
+		return -ENOMEM;
+	sgl = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]), GFP_KERNEL);
+	if (!sgl) {
+		kfree(acl);
+		return -ENOMEM;
+	}
+	acl->n_acl_entries = 2;
+	acl->acl_entries[0].vmid = (u16)self;
+	acl->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W;
+	acl->acl_entries[1].vmid = (u16)peer;
+	acl->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W;
+
+	sgl->n_sgl_entries = 1;
+	sgl->sgl_entries[0].ipa_base = drv_data->res.start;
+	sgl->sgl_entries[0].size = resource_size(&drv_data->res);
+
+#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
+	ret = ghd_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, drv_data->label,
+			acl, sgl, NULL, &drv_data->memparcel);
+#else
+	ret = gh_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, drv_data->label,
+			acl, sgl, NULL, &drv_data->memparcel);
+#endif
+	if (ret) {
+		HWFNC_ERR("%s: gh_rm_mem_share failed addr=%x size=%u err=%d\n",
+			__func__, drv_data->res.start, drv_data->size, ret);
+		/* Attempt to give resource back to HLOS */
+		qcom_scm_assign_mem(drv_data->res.start, resource_size(&drv_data->res),
+				&dstvmids, src_vmlist, ARRAY_SIZE(src_vmlist));
+		ret = -EPROBE_DEFER;
+	}
+
+	kfree(acl);
+	kfree(sgl);
+
+	return ret;
+}
+
+static int _is_mem_shared(struct resource *res)
+{
+#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
+	return gh_cpusys_vm_get_share_mem_info(res);
+#else
+	return -EINVAL;
+#endif
+}
+
+static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *data)
+{
+	struct gh_rm_notif_vm_status_payload *vm_status_payload;
+	struct hw_fence_driver_data *drv_data;
+	struct resource res;
+	gh_vmid_t peer_vmid;
+	gh_vmid_t self_vmid;
+	int ret;
+
+	drv_data = container_of(nb, struct hw_fence_driver_data, rm_nb);
+
+	HWFNC_DBG_INIT("cmd:0x%lx ++\n", cmd);
+	if (cmd != GH_RM_NOTIF_VM_STATUS)
+		goto end;
+
+	vm_status_payload = data;
+	HWFNC_DBG_INIT("payload vm_status:%d\n", vm_status_payload->vm_status);
+	if (vm_status_payload->vm_status != GH_RM_VM_STATUS_READY &&
+	    vm_status_payload->vm_status != GH_RM_VM_STATUS_RESET)
+		goto end;
+
+#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
+	if (ghd_rm_get_vmid(drv_data->peer_name, &peer_vmid))
+		goto end;
+
+	if (ghd_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
+		goto end;
+#else
+	if (gh_rm_get_vmid(drv_data->peer_name, &peer_vmid))
+		goto end;
+
+	if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
+		goto end;
+#endif
+
+	if (peer_vmid != vm_status_payload->vmid)
+		goto end;
+
+	switch (vm_status_payload->vm_status) {
+	case GH_RM_VM_STATUS_READY:
+		ret = _is_mem_shared(&res);
+		if (ret) {
+			HWFNC_DBG_INIT("mem not shared ret:%d, attempt share\n", ret);
+			if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid))
+				HWFNC_ERR("failed to share memory\n");
+			else
+				drv_data->vm_ready = true;
+		} else {
+			if (drv_data->res.start == res.start &&
+					resource_size(&drv_data->res) == resource_size(&res)) {
+				drv_data->vm_ready = true;
+				HWFNC_DBG_INIT("mem_ready: add:0x%x size:%d ret:%d\n", res.start,
+					resource_size(&res), ret);
+			} else {
+				HWFNC_ERR("mem-shared mismatch:[0x%x,%d] expected:[0x%x,%d]\n",
+					res.start, resource_size(&res), drv_data->res.start,
+					resource_size(&drv_data->res));
+			}
+		}
+		break;
+	case GH_RM_VM_STATUS_RESET:
+		HWFNC_DBG_INIT("reset\n");
+		break;
+	}
+
+end:
+	return NOTIFY_DONE;
+}
+
+/* Allocates carved-out mapped memory */
+int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data)
+{
+	struct device_node *node = drv_data->dev->of_node;
+	struct device_node *node_compat;
+	const char *compat = "qcom,msm-hw-fence-mem";
+	struct device *dev = drv_data->dev;
+	struct device_node *np;
+	int notifier_ret, ret;
+
+	node_compat = of_find_compatible_node(node, NULL, compat);
+	if (!node_compat) {
+		HWFNC_ERR("Failed to find dev node with compat:%s\n", compat);
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->label);
+	if (ret) {
+		HWFNC_ERR("failed to find label info %d\n", ret);
+		return ret;
+	}
+
+	np = of_parse_phandle(node_compat, "shared-buffer", 0);
+	if (!np) {
+		HWFNC_ERR("failed to read shared-buffer info\n");
+		return -ENOMEM;
+	}
+
+	ret = of_address_to_resource(np, 0, &drv_data->res);
+	of_node_put(np);
+	if (ret) {
+		HWFNC_ERR("of_address_to_resource failed %d\n", ret);
+		return -EINVAL;
+	}
+
+	drv_data->io_mem_base = devm_ioremap_wc(dev, drv_data->res.start,
+		resource_size(&drv_data->res));
+	if (!drv_data->io_mem_base) {
+		HWFNC_ERR("ioremap failed!\n");
+		return -ENXIO;
+	}
+	drv_data->size = resource_size(&drv_data->res);
+	if (drv_data->size < drv_data->used_mem_size) {
+		HWFNC_ERR("0x%x size of carved-out memory region is less than required size:0x%x\n",
+			drv_data->size, drv_data->used_mem_size);
+		return -ENOMEM;
+	}
+
+	HWFNC_DBG_INIT("io_mem_base:0x%x start:0x%x end:0x%x size:0x%x name:%s\n",
+		drv_data->io_mem_base, drv_data->res.start,
+		drv_data->res.end, drv_data->size, drv_data->res.name);
+
+	memset_io(drv_data->io_mem_base, 0x0, drv_data->size);
+
+	/* Register memory with HYP */
+	ret = of_property_read_u32(node_compat, "peer-name", &drv_data->peer_name);
+	if (ret)
+		drv_data->peer_name = GH_SELF_VM;
+
+	drv_data->rm_nb.notifier_call = hw_fence_rm_cb;
+	drv_data->rm_nb.priority = INT_MAX;
+	notifier_ret = gh_rm_register_notifier(&drv_data->rm_nb);
+	HWFNC_DBG_INIT("notifier: ret:%d peer_name:%d notifier_ret:%d\n", ret,
+		drv_data->peer_name, notifier_ret);
+	if (notifier_ret) {
+		HWFNC_ERR("fail to register notifier ret:%d\n", notifier_ret);
+		return -EPROBE_DEFER;
+	}
+
+	return 0;
+}
+
+char *_get_mem_reserve_type(enum hw_fence_mem_reserve type)
+{
+	switch (type) {
+	case HW_FENCE_MEM_RESERVE_CTRL_QUEUE:
+		return "HW_FENCE_MEM_RESERVE_CTRL_QUEUE";
+	case HW_FENCE_MEM_RESERVE_LOCKS_REGION:
+		return "HW_FENCE_MEM_RESERVE_LOCKS_REGION";
+	case HW_FENCE_MEM_RESERVE_TABLE:
+		return "HW_FENCE_MEM_RESERVE_TABLE";
+	case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
+		return "HW_FENCE_MEM_RESERVE_CLIENT_QUEUE";
+	case HW_FENCE_MEM_RESERVE_EVENTS_BUFF:
+		return "HW_FENCE_MEM_RESERVE_EVENTS_BUFF";
+	}
+
+	return "Unknown";
+}
+
+/* Calculates the memory range for each of the elements in the carved-out memory */
+int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data,
+	enum hw_fence_mem_reserve type, phys_addr_t *phys, void **pa, u32 *size, int client_id)
+{
+	int ret = 0;
+	u32 start_offset = 0;
+	u32 remaining_size_bytes;
+	u32 total_events;
+
+	switch (type) {
+	case HW_FENCE_MEM_RESERVE_CTRL_QUEUE:
+		start_offset = 0;
+		*size = drv_data->hw_fence_mem_ctrl_queues_size;
+		break;
+	case HW_FENCE_MEM_RESERVE_LOCKS_REGION:
+		/* Locks region starts at the end of the ctrl queues */
+		start_offset = drv_data->hw_fence_mem_ctrl_queues_size;
+		*size = HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num);
+		break;
+	case HW_FENCE_MEM_RESERVE_TABLE:
+		/* HW Fence table starts at the end of the Locks region */
+		start_offset = drv_data->hw_fence_mem_ctrl_queues_size +
+			HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num);
+		*size = drv_data->hw_fence_mem_fences_table_size;
+		break;
+	case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
+		if (client_id >= drv_data->clients_num ||
+				!drv_data->hw_fence_client_queue_size[client_id].type) {
+			HWFNC_ERR("unexpected client_id:%d for clients_num:%lu\n", client_id,
+				drv_data->clients_num);
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		start_offset = drv_data->hw_fence_client_queue_size[client_id].start_offset;
+		*size = drv_data->hw_fence_client_queue_size[client_id].type->mem_size;
+		break;
+	case HW_FENCE_MEM_RESERVE_EVENTS_BUFF:
+		start_offset = drv_data->used_mem_size;
+		remaining_size_bytes = drv_data->size - start_offset;
+		if (start_offset >= drv_data->size ||
+				remaining_size_bytes < sizeof(struct msm_hw_fence_event)) {
+			HWFNC_DBG_INFO("no space for events total_sz:%lu offset:%lu evt_sz:%lu\n",
+				drv_data->size, start_offset, sizeof(struct msm_hw_fence_event));
+			ret = -ENOMEM;
+			goto exit;
+		}
+
+		total_events = remaining_size_bytes / sizeof(struct msm_hw_fence_event);
+		if (total_events > HW_FENCE_MAX_EVENTS)
+			total_events = HW_FENCE_MAX_EVENTS;
+		*size = total_events * sizeof(struct msm_hw_fence_event);
+		break;
+	default:
+		HWFNC_ERR("Invalid mem reserve type:%d\n", type);
+		ret = -EINVAL;
+		break;
+	}
+
+	if (start_offset + *size > drv_data->size) {
+		HWFNC_ERR("reservation request:%lu exceeds total size:%d\n",
+			start_offset + *size, drv_data->size);
+		return -ENOMEM;
+	}
+
+	HWFNC_DBG_INIT("type:%s (%d) io_mem_base:0x%x start:0x%x start_offset:%lu size:0x%x\n",
+		_get_mem_reserve_type(type), type, drv_data->io_mem_base, drv_data->res.start,
+		start_offset, *size);
+
+
+	*phys = drv_data->res.start + (phys_addr_t)start_offset;
+	*pa = (drv_data->io_mem_base + start_offset); /* offset is in bytes */
+	HWFNC_DBG_H("phys:0x%x pa:0x%pK\n", *phys, *pa);
+
+exit:
+	return ret;
+}
+
+static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_data,
+	struct hw_fence_client_type_desc *desc)
+{
+	u32 max_idx_from_zero, payload_size_u32 = HW_FENCE_CLIENT_QUEUE_PAYLOAD / sizeof(u32);
+	char name[40];
+	u32 tmp[4];
+	bool idx_by_payload = false;
+	int count, ret;
+
+	snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s-extra", desc->name);
+
+	/* check if property is present */
+	ret = of_property_read_bool(drv_data->dev->of_node, name);
+	if (!ret)
+		return 0;
+
+	count = of_property_count_u32_elems(drv_data->dev->of_node, name);
+	if (count <= 0 || count > 4) {
+		HWFNC_ERR("invalid %s extra dt props count:%d\n", desc->name, count);
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, count);
+	if (ret) {
+		HWFNC_ERR("Failed to read %s extra dt properties ret=%d count=%d\n", desc->name,
+			ret, count);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	desc->start_padding = tmp[0];
+	if (count >= 2)
+		desc->end_padding = tmp[1];
+	if (count >= 3)
+		desc->txq_idx_start = tmp[2];
+	if (count >= 4) {
+		if (tmp[3] > 1) {
+			HWFNC_ERR("%s invalid txq_idx_by_payload prop:%lu\n", desc->name, tmp[3]);
+			ret = -EINVAL;
+			goto exit;
+		}
+		idx_by_payload = tmp[3];
+		desc->txq_idx_factor = idx_by_payload ? payload_size_u32 : 1;
+	}
+
+	if (desc->start_padding % sizeof(u32) || desc->end_padding % sizeof(u32) ||
+			(desc->start_padding + desc->end_padding) % sizeof(u64)) {
+		HWFNC_ERR("%s start_padding:%lu end_padding:%lu violates mem alignment\n",
+			desc->name, desc->start_padding, desc->end_padding);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (desc->start_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) {
+		HWFNC_ERR("%s client queues_num:%lu start_padding:%lu will overflow mem_size\n",
+			desc->name, desc->queues_num, desc->start_padding);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (desc->end_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) -
+			desc->start_padding) {
+		HWFNC_ERR("%s client q_num:%lu start_p:%lu end_p:%lu will overflow mem_size\n",
+			desc->name, desc->queues_num, desc->start_padding, desc->end_padding);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	max_idx_from_zero = idx_by_payload ? desc->queue_entries :
+		desc->queue_entries * payload_size_u32;
+	if (desc->txq_idx_start >= U32_MAX - max_idx_from_zero) {
+		HWFNC_ERR("%s txq_idx start:%lu by_payload:%s q_entries:%d will overflow txq_idx\n",
+			desc->name, desc->txq_idx_start, idx_by_payload ? "true" : "false",
+			desc->queue_entries);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	HWFNC_DBG_INIT("%s: start_p=%lu end_p=%lu txq_idx_start:%lu txq_idx_by_payload:%s\n",
+		desc->name, desc->start_padding, desc->end_padding, desc->txq_idx_start,
+		idx_by_payload ? "true" : "false");
+
+exit:
+	return ret;
+}
+
+static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_data,
+	struct hw_fence_client_type_desc *desc)
+{
+	char name[31];
+	u32 tmp[4];
+	u32 queue_size;
+	int ret;
+
+	/* parse client queue properties from device-tree */
+	snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s", desc->name);
+	ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 4);
+	if (ret) {
+		HWFNC_DBG_INIT("missing %s client queue entry or invalid ret:%d\n", desc->name,
+			ret);
+		desc->queue_entries = drv_data->hw_fence_queue_entries;
+	} else {
+		desc->clients_num = tmp[0];
+		desc->queues_num = tmp[1];
+		desc->queue_entries = tmp[2];
+
+		if (tmp[3] > 1) {
+			HWFNC_ERR("%s invalid skip_txq_wr_idx prop:%lu\n", desc->name, tmp[3]);
+			return -EINVAL;
+		}
+		desc->skip_txq_wr_idx = tmp[3];
+	}
+
+	if (desc->clients_num > desc->max_clients_num || !desc->queues_num ||
+			desc->queues_num > HW_FENCE_CLIENT_QUEUES || !desc->queue_entries) {
+		HWFNC_ERR("%s invalid dt: clients_num:%lu queues_num:%lu, queue_entries:%lu\n",
+			desc->name, desc->clients_num, desc->queues_num, desc->queue_entries);
+		return -EINVAL;
+	}
+
+	/* parse extra client queue properties from device-tree */
+	ret = _parse_client_queue_dt_props_extra(drv_data, desc);
+	if (ret) {
+		HWFNC_ERR("%s failed to parse extra dt props\n", desc->name);
+		return -EINVAL;
+	}
+
+	/* compute mem_size */
+	if (desc->queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) {
+		HWFNC_ERR("%s client queue entries:%lu will overflow client queue size\n",
+			desc->name, desc->queue_entries);
+		return -EINVAL;
+	}
+
+	queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries;
+	if (queue_size >= ((U32_MAX & PAGE_MASK) -
+			(HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) +
+			desc->start_padding + desc->end_padding)) / desc->queues_num) {
+		HWFNC_ERR("%s client queue_sz:%lu start_p:%lu end_p:%lu will overflow mem size\n",
+			desc->name, queue_size, desc->start_padding, desc->end_padding);
+		return -EINVAL;
+	}
+
+	desc->mem_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) +
+		(queue_size * desc->queues_num) + desc->start_padding + desc->end_padding);
+
+	if (desc->mem_size > MAX_CLIENT_QUEUE_MEM_SIZE) {
+		HWFNC_ERR("%s client queue mem_size:%lu greater than max mem size:%lu\n",
+			desc->name, desc->mem_size, MAX_CLIENT_QUEUE_MEM_SIZE);
+		return -EINVAL;
+	}
+
+	HWFNC_DBG_INIT("%s: clients=%lu q_num=%lu q_entries=%lu mem_sz=%lu skips_wr_ptr:%s\n",
+		desc->name, desc->clients_num, desc->queues_num, desc->queue_entries,
+		desc->mem_size, desc->skip_txq_wr_idx ? "true" : "false");
+
+	return 0;
+}
+
+static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data)
+{
+	struct hw_fence_client_type_desc *desc;
+	int i, j, ret;
+	u32 start_offset;
+	size_t size;
+	int configurable_clients_num = 0;
+
+	drv_data->rxq_clients_num = HW_FENCE_MIN_RXQ_CLIENTS;
+	for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE; i++) {
+		desc = &hw_fence_client_types[i];
+		ret = _parse_client_queue_dt_props_indv(drv_data, desc);
+		if (ret) {
+			HWFNC_ERR("failed to initialize %s client queue size properties\n",
+				desc->name);
+			return ret;
+		}
+
+		if (i >= HW_FENCE_MIN_RXQ_CLIENT_TYPE &&
+				desc->queues_num == HW_FENCE_CLIENT_QUEUES)
+			drv_data->rxq_clients_num += desc->clients_num;
+
+		if (i >= HW_FENCE_MAX_CLIENT_TYPE_STATIC)
+			configurable_clients_num += desc->clients_num;
+	}
+
+	/* store client type descriptors for configurable client indexing logic */
+	drv_data->hw_fence_client_types = hw_fence_client_types;
+
+	/* clients and size desc are allocated for all static clients regardless of device-tree */
+	drv_data->clients_num = HW_FENCE_MAX_STATIC_CLIENTS_INDEX + configurable_clients_num;
+
+	/* allocate memory for client queue size descriptors */
+	size = drv_data->clients_num * sizeof(struct hw_fence_client_queue_desc);
+	drv_data->hw_fence_client_queue_size = kzalloc(size, GFP_KERNEL);
+	if (!drv_data->hw_fence_client_queue_size)
+		return -ENOMEM;
+
+	/* initialize client queue size desc for each client */
+	start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size +
+		HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num) +
+		drv_data->hw_fence_mem_fences_table_size);
+	for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE; i++) {
+		desc = &hw_fence_client_types[i];
+		for (j = 0; j < desc->clients_num; j++) {
+			enum hw_fence_client_id client_id_ext = desc->init_id + j;
+			enum hw_fence_client_id client_id =
+				hw_fence_utils_get_client_id_priv(drv_data, client_id_ext);
+
+			drv_data->hw_fence_client_queue_size[client_id] =
+				(struct hw_fence_client_queue_desc){desc, start_offset};
+			HWFNC_DBG_INIT("%s client_id_ext:%lu client_id:%lu start_offset:%lu\n",
+				desc->name, client_id_ext, client_id, start_offset);
+			start_offset += desc->mem_size;
+		}
+	}
+	drv_data->used_mem_size = start_offset;
+
+	return 0;
+}
+
+int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data)
+{
+	int ret;
+	size_t size;
+	u32 val = 0;
+
+	ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-table-entries", &val);
+	if (ret || !val) {
+		HWFNC_ERR("missing hw fences table entry or invalid ret:%d val:%d\n", ret, val);
+		return ret;
+	}
+	drv_data->hw_fence_table_entries = val;
+
+	if (drv_data->hw_fence_table_entries >= U32_MAX / sizeof(struct msm_hw_fence)) {
+		HWFNC_ERR("table entries:%lu will overflow table size\n",
+			drv_data->hw_fence_table_entries);
+		return -EINVAL;
+	}
+	drv_data->hw_fence_mem_fences_table_size = (sizeof(struct msm_hw_fence) *
+		drv_data->hw_fence_table_entries);
+
+	ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-queue-entries", &val);
+	if (ret || !val) {
+		HWFNC_ERR("missing queue entries table entry or invalid ret:%d val:%d\n", ret, val);
+		return ret;
+	}
+	drv_data->hw_fence_queue_entries = val;
+
+	/* ctrl queues init */
+
+	if (drv_data->hw_fence_queue_entries >= U32_MAX / HW_FENCE_CTRL_QUEUE_PAYLOAD) {
+		HWFNC_ERR("queue entries:%lu will overflow ctrl queue size\n",
+			drv_data->hw_fence_queue_entries);
+		return -EINVAL;
+	}
+	drv_data->hw_fence_ctrl_queue_size = HW_FENCE_CTRL_QUEUE_PAYLOAD *
+		drv_data->hw_fence_queue_entries;
+
+	if (drv_data->hw_fence_ctrl_queue_size >= (U32_MAX - HW_FENCE_HFI_CTRL_HEADERS_SIZE) /
+			HW_FENCE_CTRL_QUEUES) {
+		HWFNC_ERR("queue size:%lu will overflow ctrl queue mem size\n",
+			drv_data->hw_fence_ctrl_queue_size);
+		return -EINVAL;
+	}
+	drv_data->hw_fence_mem_ctrl_queues_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE +
+		(HW_FENCE_CTRL_QUEUES * drv_data->hw_fence_ctrl_queue_size);
+
+	/* clients queues init */
+
+	ret = _parse_client_queue_dt_props(drv_data);
+	if (ret) {
+		HWFNC_ERR("failed to parse client queue properties\n");
+		return -EINVAL;
+	}
+
+	/* allocate clients */
+
+	size = drv_data->clients_num * sizeof(struct msm_hw_fence_client *);
+	drv_data->clients = kzalloc(size, GFP_KERNEL);
+	if (!drv_data->clients)
+		return -ENOMEM;
+
+	HWFNC_DBG_INIT("table: entries=%lu mem_size=%lu queue: entries=%lu\b",
+		drv_data->hw_fence_table_entries, drv_data->hw_fence_mem_fences_table_size,
+		drv_data->hw_fence_queue_entries);
+	HWFNC_DBG_INIT("ctrl queue: size=%lu mem_size=%lu\b",
+		drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size);
+	HWFNC_DBG_INIT("clients_num: %lu, total_mem_size:%lu\n", drv_data->clients_num,
+		drv_data->used_mem_size);
+
+	return 0;
+}
+
+int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data)
+{
+	int ret;
+	u32 reg_config[2];
+	void __iomem *ptr;
+
+	/* Get ipcc memory range */
+	ret = of_property_read_u32_array(drv_data->dev->of_node, "qcom,ipcc-reg",
+				reg_config, 2);
+	if (ret) {
+		HWFNC_ERR("failed to read ipcc reg: %d\n", ret);
+		return ret;
+	}
+	drv_data->ipcc_reg_base = reg_config[0];
+	drv_data->ipcc_size = reg_config[1];
+
+	/* Mmap ipcc registers */
+	ptr = devm_ioremap(drv_data->dev, drv_data->ipcc_reg_base, drv_data->ipcc_size);
+	if (!ptr) {
+		HWFNC_ERR("failed to ioremap ipcc regs\n");
+		return -ENOMEM;
+	}
+	drv_data->ipcc_io_mem = ptr;
+
+	HWFNC_DBG_H("mapped address:0x%x size:0x%x io_mem:0x%pK\n",
+		drv_data->ipcc_reg_base, drv_data->ipcc_size,
+		drv_data->ipcc_io_mem);
+
+	hw_fence_ipcc_enable_signaling(drv_data);
+
+	return ret;
+}
+
+int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data)
+{
+	int ret = 0;
+	unsigned int reg_config[2];
+	void __iomem *ptr;
+
+	ret = of_property_read_u32_array(drv_data->dev->of_node, "qcom,qtime-reg",
+			reg_config, 2);
+	if (ret) {
+		HWFNC_ERR("failed to read qtimer reg: %d\n", ret);
+		return ret;
+	}
+
+	drv_data->qtime_reg_base = reg_config[0];
+	drv_data->qtime_size = reg_config[1];
+
+	ptr = devm_ioremap(drv_data->dev, drv_data->qtime_reg_base, drv_data->qtime_size);
+	if (!ptr) {
+		HWFNC_ERR("failed to ioremap qtime regs\n");
+		return -ENOMEM;
+	}
+
+	drv_data->qtime_io_mem = ptr;
+
+	return ret;
+}
+
+enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data,
+	enum hw_fence_client_id client_id)
+{
+	int i, client_type, offset;
+	enum hw_fence_client_id client_id_priv;
+
+	if (client_id < HW_FENCE_MAX_STATIC_CLIENTS_INDEX)
+		return client_id;
+
+	/* consolidate external 'hw_fence_client_id' enum into consecutive internal client IDs */
+	client_type = HW_FENCE_MAX_CLIENT_TYPE_STATIC +
+		(client_id - HW_FENCE_MAX_STATIC_CLIENTS_INDEX) /
+		MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT;
+	offset = (client_id - HW_FENCE_MAX_STATIC_CLIENTS_INDEX) %
+		MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT;
+
+	/* invalid client id out of range of supported configurable sub-clients */
+	if (offset >= drv_data->hw_fence_client_types[client_type].clients_num)
+		return HW_FENCE_CLIENT_MAX;
+
+	client_id_priv = HW_FENCE_MAX_STATIC_CLIENTS_INDEX + offset;
+
+	for (i = HW_FENCE_MAX_CLIENT_TYPE_STATIC; i < client_type; i++)
+		client_id_priv += drv_data->hw_fence_client_types[i].clients_num;
+
+	return client_id_priv;
+}
+
+int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id)
+{
+	if (!drv_data || client_id >= drv_data->clients_num ||
+			!drv_data->hw_fence_client_queue_size[client_id].type) {
+		HWFNC_ERR("invalid access to client:%d queues_num\n", client_id);
+		return 0;
+	}
+
+	return drv_data->hw_fence_client_queue_size[client_id].type->queues_num;
+}

+ 629 - 0
qcom/opensource/mm-drivers/hw_fence/src/hw_fence_ioctl.c

@@ -0,0 +1,629 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+#include <linux/sync_file.h>
+
+#include "hw_fence_drv_priv.h"
+#include "hw_fence_drv_utils.h"
+#include "hw_fence_drv_ipc.h"
+#include "hw_fence_drv_debug.h"
+
+#define HW_SYNC_IOCTL_COUNT		ARRAY_SIZE(hw_sync_debugfs_ioctls)
+#define HW_FENCE_ARRAY_SIZE		10
+#define HW_SYNC_IOC_MAGIC		'W'
+#define HW_SYNC_IOC_REG_CLIENT	_IOWR(HW_SYNC_IOC_MAGIC, 10, unsigned long)
+#define HW_SYNC_IOC_UNREG_CLIENT	_IOWR(HW_SYNC_IOC_MAGIC, 11, unsigned long)
+#define HW_SYNC_IOC_CREATE_FENCE	_IOWR(HW_SYNC_IOC_MAGIC, 12,\
+						struct hw_fence_sync_create_data)
+#define HW_SYNC_IOC_CREATE_FENCE_ARRAY	_IOWR(HW_SYNC_IOC_MAGIC, 14,\
+						struct hw_fence_array_sync_create_data)
+#define HW_SYNC_IOC_REG_FOR_WAIT	_IOWR(HW_SYNC_IOC_MAGIC, 16, int)
+#define HW_SYNC_IOC_FENCE_SIGNAL	_IOWR(HW_SYNC_IOC_MAGIC, 17, unsigned long)
+#define HW_SYNC_IOC_FENCE_WAIT	_IOWR(HW_SYNC_IOC_MAGIC, 18, int)
+#define HW_SYNC_IOC_RESET_CLIENT	_IOWR(HW_SYNC_IOC_MAGIC, 19, unsigned long)
+#define HW_FENCE_IOCTL_NR(n)			(_IOC_NR(n) - 2)
+#define HW_IOCTL_DEF(ioctl, _func)	\
+	[HW_FENCE_IOCTL_NR(ioctl)] = {		\
+		.cmd = ioctl,			\
+		.func = _func,			\
+		.name = #ioctl			\
+	}
+
+#define ktime_compare_safe(A, B) ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
+
+/**
+ * struct hw_sync_obj - per client hw sync object.
+ * @context: context id used to create fences.
+ * @client_id: to uniquely represent client.
+ * @client_handle: Pointer to the structure holding the resources
+ *                 allocated to the client.
+ * @mem_descriptor: Memory descriptor of the queue allocated by the
+ *                  hardware fence driver for each client during register.
+ */
+struct hw_sync_obj {
+	u64 context;
+	int client_id;
+	void *client_handle;
+	struct msm_hw_fence_mem_addr mem_descriptor;
+};
+
+/**
+ * struct hw_fence_sync_create_data - data used in creating fences.
+ * @seqno: sequence number.
+ * @incr_context: if set, then the context would be incremented.
+ * @fence: returns the fd of the new sync_file with the created fence.
+ * @hash: fence hash
+ */
+struct hw_fence_sync_create_data {
+	u64 seqno;
+	bool incr_context;
+	__s32 fence;
+	u64 hash;
+};
+
+/**
+ * struct hw_fence_array_sync_create_data - data used in creating multiple fences.
+ * @seqno: sequence number used to create fence array.
+ * @num_fences: number of fence fds received.
+ * @fences: array of fence fds.
+ * @fence_array_fd: fd of fence array.
+ */
+struct hw_fence_array_sync_create_data {
+	u64 seqno;
+	int num_fences;
+	u64 fences[HW_FENCE_ARRAY_SIZE];
+	__s32 fence_array_fd;
+};
+
+/**
+ * struct hw_fence_sync_signal_data - data used to signal fences.
+ * @hash: hash of the fence.
+ * @error_flag: error flag
+ */
+struct hw_fence_sync_signal_data {
+	u64 hash;
+	u32 error_flag;
+};
+
+/**
+ * struct hw_fence_sync_wait_data - data used to wait on fences.
+ * @fence: fence fd.
+ * @timeout_ms: fence wait time out.
+ */
+struct hw_fence_sync_wait_data {
+	__s32 fence;
+	u64 timeout_ms;
+};
+
+/**
+ * struct hw_fence_sync_reset_data - data used to reset client.
+ * @client_id: client id.
+ * @reset_flag: reset flag
+ */
+struct hw_fence_sync_reset_data {
+	int client_id;
+	u32 reset_flag;
+};
+
+typedef long hw_fence_ioctl_t(struct hw_sync_obj *obj, unsigned long arg);
+
+/**
+ * struct hw_sync_ioctl_def - hw_sync driver ioctl entry
+ * @cmd: ioctl command number, without flags
+ * @func: handler for this ioctl
+ * @name: user-readable name for debug output
+ */
+struct hw_sync_ioctl_def {
+	unsigned int cmd;
+	hw_fence_ioctl_t *func;
+	const char *name;
+};
+
+static bool _is_valid_client(struct hw_sync_obj *obj)
+{
+	if (!obj)
+		return false;
+
+	if (obj->client_id < HW_FENCE_CLIENT_ID_VAL0 || obj->client_id > HW_FENCE_CLIENT_ID_VAL6) {
+		HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", obj->client_id,
+				HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6);
+		return false;
+	}
+
+	return true;
+}
+
+static int _get_client_id(struct hw_sync_obj *obj, unsigned long arg)
+{
+	int client_id;
+
+	if (copy_from_user(&client_id, (void __user *)arg, sizeof(client_id)))
+		return -EFAULT;
+
+	if (!obj)
+		return -EINVAL;
+
+	if (client_id < HW_FENCE_CLIENT_ID_VAL0 || client_id > HW_FENCE_CLIENT_ID_VAL6) {
+		HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", client_id,
+				HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6);
+		return -EINVAL;
+	}
+
+	return client_id;
+}
+
+static void *_hw_sync_get_fence(int fd)
+{
+	return fd >= 0 ? sync_file_get_fence(fd) : NULL;
+}
+
+static int hw_sync_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct hw_sync_obj *obj;
+
+	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+	if (!obj)
+		return -ENOMEM;
+
+	obj->context = dma_fence_context_alloc(1);
+	file->private_data = obj;
+
+	return 0;
+}
+
+static int hw_sync_debugfs_release(struct inode *inode, struct file *file)
+{
+	struct hw_sync_obj *obj = file->private_data;
+
+	if (!obj)
+		return -EINVAL;
+
+	kfree(obj);
+
+	return 0;
+}
+
+static long hw_sync_ioctl_reg_client(struct hw_sync_obj *obj, unsigned long arg)
+{
+	int client_id = _get_client_id(obj, arg);
+
+	if (IS_ERR(&client_id)) {
+		return client_id;
+	} else if (obj->client_handle) {
+		HWFNC_ERR("client:%d already registered as validation client\n", client_id);
+		return -EINVAL;
+	}
+
+	obj->client_id = client_id;
+	obj->client_handle = msm_hw_fence_register(obj->client_id, &obj->mem_descriptor);
+	if (IS_ERR_OR_NULL(obj->client_handle))
+		return -EINVAL;
+
+	return 0;
+}
+
+static long hw_sync_ioctl_unreg_client(struct hw_sync_obj *obj, unsigned long arg)
+{
+	int client_id = _get_client_id(obj, arg);
+
+	if (IS_ERR(&client_id)) {
+		return client_id;
+	} else if (client_id != obj->client_id) {
+		HWFNC_ERR("deregistering hw-fence client %d with invalid client_id arg:%d\n",
+			obj->client_id, client_id);
+		return -EINVAL;
+	}
+
+	return msm_hw_fence_deregister(obj->client_handle);
+}
+
+static long hw_sync_ioctl_create_fence(struct hw_sync_obj *obj, unsigned long arg)
+{
+	struct msm_hw_fence_create_params params;
+	struct hw_fence_sync_create_data data;
+	struct hw_dma_fence *fence;
+	spinlock_t *fence_lock;
+	u64 hash;
+	struct sync_file *sync_file;
+	int fd, ret;
+
+	if (!_is_valid_client(obj)) {
+		return -EINVAL;
+	} else if (IS_ERR_OR_NULL(obj->client_handle)) {
+		HWFNC_ERR("client:%d is not register as validation client\n", obj->client_id);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+		return -EFAULT;
+
+	/* create dma fence */
+	fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL);
+	if (!fence_lock)
+		return -ENOMEM;
+
+	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+	if (!fence) {
+		kfree(fence_lock);
+		return -ENOMEM;
+	}
+
+	snprintf(fence->name, HW_FENCE_NAME_SIZE, "hwfence:id:%d:ctx=%lu:seqno:%lu",
+			obj->client_id, obj->context, data.seqno);
+
+	spin_lock_init(fence_lock);
+	dma_fence_init(&fence->base, &hw_fence_dbg_ops, fence_lock, obj->context, data.seqno);
+
+	HWFNC_DBG_H("creating hw_fence for client:%d ctx:%llu seqno:%llu\n", obj->client_id,
+				obj->context, data.seqno);
+	params.fence = &fence->base;
+	params.handle = &hash;
+
+	/* create hw fence */
+	ret = msm_hw_fence_create(obj->client_handle, &params);
+	if (ret) {
+		HWFNC_ERR("failed to create hw_fence for client:%d ctx:%llu seqno:%llu\n",
+			obj->client_id, obj->context, data.seqno);
+		dma_fence_put(&fence->base);
+		return -EINVAL;
+	}
+
+	/* keep handle in dma_fence, to destroy hw-fence during release */
+	fence->client_handle = obj->client_handle;
+
+	if (data.incr_context)
+		obj->context = dma_fence_context_alloc(1);
+
+	/* create fd */
+	fd = get_unused_fd_flags(0);
+	if (fd < 0) {
+		HWFNC_ERR("failed to get fd for client:%d\n", obj->client_id);
+		dma_fence_put(&fence->base);
+		return fd;
+	}
+
+	sync_file = sync_file_create(&fence->base);
+	if (sync_file == NULL) {
+		HWFNC_ERR("couldn't create fence fd, %d\n", fd);
+		dma_fence_put(&fence->base);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	/* Decrement the refcount that sync_file_create increments */
+	dma_fence_put(&fence->base);
+
+	data.fence = fd;
+	data.hash = hash;
+	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+		dma_fence_put(&fence->base);
+		fput(sync_file->file);
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	fd_install(fd, sync_file->file);
+
+	return 0;
+
+exit:
+	put_unused_fd(fd);
+	return ret;
+}
+
+static void _put_child_fences(int i, struct dma_fence **fences)
+{
+	int fence_idx;
+
+	for (fence_idx = i; fence_idx >= 0 ; fence_idx--)
+		dma_fence_put(fences[i]);
+}
+
+static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned long arg)
+{
+	struct dma_fence_array *fence_array;
+	struct hw_fence_array_sync_create_data data;
+	struct dma_fence **fences = NULL;
+	struct sync_file *sync_file;
+	int num_fences, i, fd, ret;
+	struct hw_dma_fence *fence;
+
+	if (!_is_valid_client(obj)) {
+		return -EINVAL;
+	} else if (IS_ERR_OR_NULL(obj->client_handle)) {
+		HWFNC_ERR("client:%d is not register as validation client\n", obj->client_id);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+		return -EFAULT;
+
+	num_fences = data.num_fences;
+	if (num_fences > HW_FENCE_ARRAY_SIZE) {
+		HWFNC_ERR("Number of fences: %d is greater than allowed size: %d\n",
+					num_fences, HW_FENCE_ARRAY_SIZE);
+		return -EINVAL;
+	}
+
+	fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
+	if (!fences) {
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < num_fences; i++) {
+		fd = data.fences[i];
+		if (fd <= 0) {
+			kfree(fences);
+			return -EINVAL;
+		}
+		fence = (struct hw_dma_fence *)_hw_sync_get_fence(fd);
+		if (!fence) {
+			_put_child_fences(i-1, fences);
+			kfree(fences);
+			return -EINVAL;
+		}
+		fences[i] = &fence->base;
+	}
+
+	/* create the fence array from array of dma fences */
+	fence_array = dma_fence_array_create(num_fences, fences, obj->context, data.seqno, 0);
+	if (!fence_array) {
+		HWFNC_ERR("Error creating fence_array\n");
+		/* decrease the refcount incremented for each child fences */
+		for (i = 0; i < num_fences; i++)
+			dma_fence_put(fences[i]);
+		kfree(fences);
+		return -EINVAL;
+	}
+
+	/* create fd */
+	fd = get_unused_fd_flags(0);
+	if (fd <= 0) {
+		HWFNC_ERR("failed to get fd for client:%d\n", obj->client_id);
+		dma_fence_put(&fence_array->base);
+		return fd;
+	}
+
+	sync_file = sync_file_create(&fence_array->base);
+	if (sync_file == NULL) {
+		HWFNC_ERR("couldn't create fence fd, %d\n", fd);
+		dma_fence_put(&fence_array->base);
+		kfree(fence_array);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	/* Decrement the refcount that sync_file_create increments */
+	dma_fence_put(&fence_array->base);
+
+	data.fence_array_fd = fd;
+	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+		fput(sync_file->file);
+		dma_fence_put(&fence_array->base);
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	fd_install(fd, sync_file->file);
+
+	return 0;
+
+exit:
+	put_unused_fd(fd);
+	return ret;
+}
+
+/*
+ * this IOCTL only supports receiving one fence as input-parameter, which can be
+ * either a "dma_fence" or a "dma_fence_array", but eventually we would expand
+ * this API to receive more fences
+ */
+static long hw_sync_ioctl_reg_for_wait(struct hw_sync_obj *obj, unsigned long arg)
+{
+	struct dma_fence *fence;
+	int ret, fd, num_fences = 1;
+
+	if (!_is_valid_client(obj))
+		return -EINVAL;
+
+	if (copy_from_user(&fd, (void __user *)arg, sizeof(fd)))
+		return -EFAULT;
+
+	fence = (struct dma_fence *)_hw_sync_get_fence(fd);
+	if (!fence) {
+		HWFNC_ERR("Invalid fence fd: %d\n", fd);
+		return -EINVAL;
+	}
+
+	ret = msm_hw_fence_wait_update_v2(obj->client_handle, &fence, NULL, NULL, num_fences, 1);
+
+	/* Decrement the refcount that hw_sync_get_fence increments */
+	dma_fence_put(fence);
+
+	return ret;
+}
+
+static long hw_sync_ioctl_fence_signal(struct hw_sync_obj *obj, unsigned long arg)
+{
+	struct msm_hw_fence_client *hw_fence_client;
+	struct hw_fence_sync_signal_data data;
+	int ret, tx_client, rx_client, signal_id;
+
+	if (!_is_valid_client(obj)) {
+		return -EINVAL;
+	} else if (IS_ERR_OR_NULL(obj->client_handle)) {
+		HWFNC_ERR("invalid client handle for the client_id: %d\n", obj->client_id);
+		return -EINVAL;
+	}
+
+	hw_fence_client = (struct msm_hw_fence_client *)obj->client_handle;
+	if (!hw_fence_client) {
+		HWFNC_ERR("invalid client handle\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+		return -EFAULT;
+
+	ret = msm_hw_fence_update_txq(obj->client_handle, data.hash, 0, data.error_flag);
+	if (ret) {
+		HWFNC_ERR("hw fence update txq has failed client_id: %d\n", obj->client_id);
+		return ret;
+	}
+
+	signal_id = dbg_out_clients_signal_map_no_dpu[obj->client_id].ipc_signal_id;
+	if (signal_id < 0)
+		return -EINVAL;
+
+	tx_client = hw_fence_client->ipc_client_pid;
+	rx_client = hw_fence_client->ipc_client_vid;
+	ret = msm_hw_fence_trigger_signal(obj->client_handle, tx_client, rx_client, signal_id);
+	if (ret) {
+		HWFNC_ERR("hw fence trigger signal has failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static long hw_sync_ioctl_fence_wait(struct hw_sync_obj *obj, unsigned long arg)
+{
+	struct msm_hw_fence_client *hw_fence_client;
+	struct msm_hw_fence_queue_payload payload;
+	struct hw_fence_sync_wait_data data;
+	struct dma_fence *fence;
+	ktime_t cur_ktime, exp_ktime;
+	int fd, ret, read = 1, queue_type = HW_FENCE_RX_QUEUE - 1;  /* rx queue index */
+
+	if (!_is_valid_client(obj))
+		return -EINVAL;
+
+	if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+		return -EFAULT;
+
+	fd = data.fence;
+	fence = (struct dma_fence *)_hw_sync_get_fence(fd);
+	if (!fence) {
+		HWFNC_ERR("Invalid fence fd: %d\n", fd);
+		return -EINVAL;
+	}
+
+	hw_fence_client = (struct msm_hw_fence_client *)obj->client_handle;
+	if (!hw_fence_client) {
+		HWFNC_ERR("invalid client handle for fd:%d\n", fd);
+		/* Decrement the refcount that hw_sync_get_fence increments */
+		dma_fence_put(fence);
+		return -EINVAL;
+	}
+
+	exp_ktime = ktime_add_ms(ktime_get(), data.timeout_ms);
+	do {
+		ret = wait_event_timeout(hw_fence_client->wait_queue,
+				atomic_read(&hw_fence_client->val_signal) > 0,
+				msecs_to_jiffies(data.timeout_ms));
+		cur_ktime = ktime_get();
+	} while ((atomic_read(&hw_fence_client->val_signal) <= 0) && (ret == 0) &&
+		ktime_compare_safe(exp_ktime, cur_ktime) > 0);
+
+	if (!ret) {
+		HWFNC_ERR("timed out waiting for the client signal %d\n", data.timeout_ms);
+		/* Decrement the refcount that hw_sync_get_fence increments */
+		dma_fence_put(fence);
+		return -ETIMEDOUT;
+	}
+
+	/* clear doorbell signal flag */
+	atomic_set(&hw_fence_client->val_signal, 0);
+
+	while (read) {
+		read = hw_fence_read_queue(obj->client_handle, &payload, queue_type);
+		if (read < 0) {
+			HWFNC_ERR("unable to read client rxq client_id:%d\n", obj->client_id);
+			break;
+		}
+		HWFNC_DBG_L("rxq read: hash:%llu, flags:%llu, error:%lu\n",
+			payload.hash, payload.flags, payload.error);
+		if (payload.ctxt_id == fence->context && payload.seqno == fence->seqno) {
+			/* Decrement the refcount that hw_sync_get_fence increments */
+			dma_fence_put(fence);
+			return 0;
+		}
+	}
+
+	/* Decrement the refcount that hw_sync_get_fence increments */
+	dma_fence_put(fence);
+
+	HWFNC_ERR("fence received did not match the fence expected\n");
+	HWFNC_ERR("fence received: context:%d seqno:%d fence expected: context:%d seqno:%d\n",
+				payload.ctxt_id, payload.seqno, fence->context, fence->seqno);
+
+	return read;
+}
+
+static long hw_sync_ioctl_reset_client(struct hw_sync_obj *obj, unsigned long arg)
+{
+	int ret;
+	struct hw_fence_sync_reset_data data;
+
+	if (!_is_valid_client(obj)) {
+		return -EINVAL;
+	} else if (IS_ERR_OR_NULL(obj->client_handle)) {
+		HWFNC_ERR("client:%d handle doesn't exists\n", obj->client_id);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+		return -EFAULT;
+
+	ret = msm_hw_fence_reset_client(obj->client_handle, data.reset_flag);
+	if (ret) {
+		HWFNC_ERR("hw fence reset client has failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct hw_sync_ioctl_def hw_sync_debugfs_ioctls[] = {
+	HW_IOCTL_DEF(HW_SYNC_IOC_REG_CLIENT, hw_sync_ioctl_reg_client),
+	HW_IOCTL_DEF(HW_SYNC_IOC_UNREG_CLIENT, hw_sync_ioctl_unreg_client),
+	HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE, hw_sync_ioctl_create_fence),
+	HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE_ARRAY, hw_sync_ioctl_create_fence_array),
+	HW_IOCTL_DEF(HW_SYNC_IOC_REG_FOR_WAIT, hw_sync_ioctl_reg_for_wait),
+	HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_SIGNAL, hw_sync_ioctl_fence_signal),
+	HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_WAIT, hw_sync_ioctl_fence_wait),
+	HW_IOCTL_DEF(HW_SYNC_IOC_RESET_CLIENT, hw_sync_ioctl_reset_client)
+};
+
+static long hw_sync_debugfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct hw_sync_obj *obj = file->private_data;
+	int num = HW_FENCE_IOCTL_NR(cmd);
+	hw_fence_ioctl_t *func;
+
+	if (num >= HW_SYNC_IOCTL_COUNT) {
+		HWFNC_ERR("invalid ioctl num = %d\n", num);
+		return -EINVAL;
+	}
+
+	func = (&hw_sync_debugfs_ioctls[num])->func;
+	if (unlikely(!func)) {
+		HWFNC_ERR("no function num = %d\n", num);
+		return -ENOTTY;
+	}
+
+	return func(obj, arg);
+}
+
+const struct file_operations hw_sync_debugfs_fops = {
+	.open           = hw_sync_debugfs_open,
+	.release        = hw_sync_debugfs_release,
+	.unlocked_ioctl = hw_sync_debugfs_ioctl,
+};

+ 807 - 0
qcom/opensource/mm-drivers/hw_fence/src/msm_hw_fence.c

@@ -0,0 +1,807 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "hw_fence_drv_priv.h"
+#include "hw_fence_drv_utils.h"
+#include "hw_fence_drv_debug.h"
+#include "hw_fence_drv_ipc.h"
+
+struct hw_fence_driver_data *hw_fence_drv_data;
+bool hw_fence_driver_enable;
+
+void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext,
+	struct msm_hw_fence_mem_addr *mem_descriptor)
+{
+	struct msm_hw_fence_client *hw_fence_client;
+	enum hw_fence_client_id client_id;
+	int ret;
+
+	if (!hw_fence_driver_enable)
+		return ERR_PTR(-ENODEV);
+
+	HWFNC_DBG_H("++ client_id_ext:%d\n", client_id_ext);
+
+	if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) {
+		HWFNC_ERR("hw fence driver not ready\n");
+		return ERR_PTR(-EAGAIN);
+	}
+
+	if (!mem_descriptor || client_id_ext >= HW_FENCE_CLIENT_MAX) {
+		HWFNC_ERR("Invalid params: %d client_id_ext:%d\n",
+			!mem_descriptor, client_id_ext);
+		return ERR_PTR(-EINVAL);
+	}
+
+	client_id = hw_fence_utils_get_client_id_priv(hw_fence_drv_data, client_id_ext);
+	if (client_id >= HW_FENCE_CLIENT_MAX) {
+		HWFNC_ERR("Invalid params: client_id:%d client_id_ext:%d\n",
+			client_id, client_id_ext);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Alloc client handle */
+	hw_fence_client =  kzalloc(sizeof(*hw_fence_client), GFP_KERNEL);
+	if (!hw_fence_client)
+		return ERR_PTR(-ENOMEM);
+
+	/* Avoid race condition if multiple-threads request same client at same time */
+	mutex_lock(&hw_fence_drv_data->clients_register_lock);
+	if (hw_fence_drv_data->clients[client_id]) {
+		HWFNC_ERR("client with id %d already registered\n", client_id);
+		mutex_unlock(&hw_fence_drv_data->clients_register_lock);
+		kfree(hw_fence_client);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Mark client as registered */
+	hw_fence_drv_data->clients[client_id] = hw_fence_client;
+	mutex_unlock(&hw_fence_drv_data->clients_register_lock);
+
+	hw_fence_client->client_id = client_id;
+	hw_fence_client->client_id_ext = client_id_ext;
+	hw_fence_client->ipc_client_vid =
+		hw_fence_ipcc_get_client_virt_id(hw_fence_drv_data, client_id);
+	hw_fence_client->ipc_client_pid =
+		hw_fence_ipcc_get_client_phys_id(hw_fence_drv_data, client_id);
+
+	if (hw_fence_client->ipc_client_vid <= 0 || hw_fence_client->ipc_client_pid <= 0) {
+		HWFNC_ERR("Failed to find client:%d ipc vid:%d pid:%d\n", client_id,
+			hw_fence_client->ipc_client_vid, hw_fence_client->ipc_client_pid);
+		ret = -EINVAL;
+		goto error;
+	}
+
+	hw_fence_client->ipc_signal_id = hw_fence_ipcc_get_signal_id(hw_fence_drv_data, client_id);
+	if (hw_fence_client->ipc_signal_id < 0) {
+		HWFNC_ERR("Failed to find client:%d signal\n", client_id);
+		ret = -EINVAL;
+		goto error;
+	}
+
+	hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id);
+	hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id);
+
+	hw_fence_client->queues_num = hw_fence_utils_get_queues_num(hw_fence_drv_data, client_id);
+	if (!hw_fence_client->queues_num || (hw_fence_client->update_rxq &&
+			hw_fence_client->queues_num < HW_FENCE_CLIENT_QUEUES)) {
+		HWFNC_ERR("client:%d invalid q_num:%lu for updates_rxq:%s\n", client_id,
+			hw_fence_client->queues_num,
+			hw_fence_client->update_rxq ? "true" : "false");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	/* Alloc Client HFI Headers and Queues */
+	ret = hw_fence_alloc_client_resources(hw_fence_drv_data,
+		hw_fence_client, mem_descriptor);
+	if (ret)
+		goto error;
+
+	/* Initialize signal for communication with FenceCTL */
+	ret = hw_fence_init_controller_signal(hw_fence_drv_data, hw_fence_client);
+	if (ret)
+		goto error;
+
+	/*
+	 * Update Fence Controller with the address of the Queues and
+	 * the Fences Tables for this client
+	 */
+	ret = hw_fence_init_controller_resources(hw_fence_client);
+	if (ret)
+		goto error;
+
+	mutex_init(&hw_fence_client->error_cb_lock);
+
+	HWFNC_DBG_INIT("Initialized ptr:0x%p client_id:%d q_num:%d ipc signal:%d vid:%d pid:%d\n",
+		hw_fence_client, hw_fence_client->client_id, hw_fence_client->queues_num,
+		hw_fence_client->ipc_signal_id, hw_fence_client->ipc_client_vid,
+		hw_fence_client->ipc_client_pid);
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+	init_waitqueue_head(&hw_fence_client->wait_queue);
+#endif /* CONFIG_DEBUG_FS */
+
+	return (void *)hw_fence_client;
+error:
+
+	/* Free all the allocated resources */
+	hw_fence_cleanup_client(hw_fence_drv_data, hw_fence_client);
+
+	HWFNC_ERR("failed with error:%d\n", ret);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(msm_hw_fence_register);
+
+int msm_hw_fence_deregister(void *client_handle)
+{
+	struct msm_hw_fence_client *hw_fence_client;
+
+	if (IS_ERR_OR_NULL(client_handle)) {
+		HWFNC_ERR("Invalid client handle\n");
+		return -EINVAL;
+	}
+	hw_fence_client = (struct msm_hw_fence_client *)client_handle;
+
+	if (hw_fence_client->client_id >= hw_fence_drv_data->clients_num) {
+		HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id);
+		return -EINVAL;
+	}
+
+	HWFNC_DBG_H("+\n");
+
+	/* Free all the allocated resources */
+	hw_fence_cleanup_client(hw_fence_drv_data, hw_fence_client);
+
+	HWFNC_DBG_H("-\n");
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_hw_fence_deregister);
+
+int msm_hw_fence_create(void *client_handle,
+	struct msm_hw_fence_create_params *params)
+{
+	struct msm_hw_fence_client *hw_fence_client;
+	struct dma_fence_array *array;
+	struct dma_fence *fence;
+	int ret;
+
+	if (IS_ERR_OR_NULL(client_handle) || !params || !params->fence || !params->handle) {
+		HWFNC_ERR("Invalid input\n");
+		return -EINVAL;
+	}
+
+	if (!hw_fence_drv_data->vm_ready) {
+		HWFNC_DBG_H("VM not ready, cannot create fence\n");
+		return -EAGAIN;
+	}
+
+	hw_fence_client = (struct msm_hw_fence_client *)client_handle;
+	fence = (struct dma_fence *)params->fence;
+
+	HWFNC_DBG_H("+\n");
+
+	/* Block any Fence-Array, we should only get individual fences */
+	array = to_dma_fence_array(fence);
+	if (array) {
+		HWFNC_ERR("HW Fence must be created for individual fences\n");
+		return -EINVAL;
+	}
+
+	/* This Fence is already a HW-Fence */
+	if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) {
+		HWFNC_ERR("DMA Fence already has HW Fence Flag set\n");
+		return -EINVAL;
+	}
+
+	/* Create the HW Fence, i.e. add entry in the Global Table for this Fence */
+	ret = hw_fence_create(hw_fence_drv_data, hw_fence_client,
+		  fence->context, fence->seqno, params->handle);
+	if (ret) {
+		HWFNC_ERR("Error creating HW fence\n");
+		return ret;
+	}
+
+	/* If no error, set the HW Fence Flag in the dma-fence */
+	set_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags);
+
+	HWFNC_DBG_H("-\n");
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_hw_fence_create);
+
+int msm_hw_fence_destroy(void *client_handle,
+	struct dma_fence *fence)
+{
+	struct msm_hw_fence_client *hw_fence_client;
+	struct dma_fence_array *array;
+	int ret;
+
+	if (IS_ERR_OR_NULL(client_handle) || !fence) {
+		HWFNC_ERR("Invalid data\n");
+		return -EINVAL;
+	}
+	hw_fence_client = (struct msm_hw_fence_client *)client_handle;
+
+	HWFNC_DBG_H("+\n");
+
+	/* Block any Fence-Array, we should only get individual fences */
+	array = to_dma_fence_array(fence);
+	if (array) {
+		HWFNC_ERR("HW Fence must be destroy for individual fences\n");
+		return -EINVAL;
+	}
+
+	/* This Fence not a HW-Fence */
+	if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) {
+		HWFNC_ERR("DMA Fence is not a HW Fence flags:0x%llx\n", fence->flags);
+		return -EINVAL;
+	}
+
+	/* Destroy the HW Fence, i.e. remove entry in the Global Table for the Fence */
+	ret = hw_fence_destroy(hw_fence_drv_data, hw_fence_client,
+		fence->context, fence->seqno);
+	if (ret) {
+		HWFNC_ERR("Error destroying the HW fence\n");
+		return ret;
+	}
+
+	/* Clear the HW Fence Flag in the dma-fence */
+	clear_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags);
+
+	HWFNC_DBG_H("-\n");
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_hw_fence_destroy);
+
+int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle)
+{
+	struct msm_hw_fence_client *hw_fence_client;
+	int ret;
+
+	if (IS_ERR_OR_NULL(client_handle)) {
+		HWFNC_ERR("Invalid data\n");
+		return -EINVAL;
+	}
+	hw_fence_client = (struct msm_hw_fence_client *)client_handle;
+
+	if (hw_fence_client->client_id >= hw_fence_drv_data->clients_num) {
+		HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id);
+		return -EINVAL;
+	}
+
+	HWFNC_DBG_H("+\n");
+
+	/* Destroy the HW Fence, i.e. remove entry in the Global Table for the Fence */
+	ret = hw_fence_destroy_with_hash(hw_fence_drv_data, hw_fence_client, handle);
+	if (ret) {
+		HWFNC_ERR("Error destroying the HW fence handle:%llu client_id:%d\n", handle,
+			hw_fence_client->client_id);
+		return ret;
+	}
+
+	HWFNC_DBG_H("-\n");
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_hw_fence_destroy_with_handle);
+
+int msm_hw_fence_wait_update_v2(void *client_handle,
+	struct dma_fence **fence_list, u64 *handles, u64 *client_data_list, u32 num_fences,
+	bool create)
+{
+	struct msm_hw_fence_client *hw_fence_client;
+	struct dma_fence_array *array;
+	int i, ret = 0;
+	enum hw_fence_client_data_id data_id;
+
+	if (IS_ERR_OR_NULL(client_handle) || !fence_list || !*fence_list) {
+		HWFNC_ERR("Invalid data\n");
+		return -EINVAL;
+	}
+
+	if (!hw_fence_drv_data->vm_ready) {
+		HWFNC_DBG_H("VM not ready, cannot destroy fence\n");
+		return -EAGAIN;
+	}
+
+	hw_fence_client = (struct msm_hw_fence_client *)client_handle;
+	data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext);
+	if (client_data_list && data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) {
+		HWFNC_ERR("Populating non-NULL client_data_list with invalid client_id_ext:%d\n",
+			hw_fence_client->client_id_ext);
+		return -EINVAL;
+	}
+
+	if (hw_fence_client->client_id > hw_fence_drv_data->rxq_clients_num) {
+		HWFNC_ERR("Transmit-only client client_id:%d client_id_ext:%d register for wait\n",
+			hw_fence_client->client_id, hw_fence_client->client_id_ext);
+		return -EINVAL;
+	}
+
+	HWFNC_DBG_H("+\n");
+
+	/* Process all the list of fences */
+	for (i = 0; i < num_fences; i++) {
+		struct dma_fence *fence = fence_list[i];
+		u64 hash, client_data = 0;
+
+		if (client_data_list)
+			client_data = client_data_list[i];
+
+		/* Process a Fence-Array */
+		array = to_dma_fence_array(fence);
+		if (array) {
+			ret = hw_fence_process_fence_array(hw_fence_drv_data, hw_fence_client,
+				array, &hash, client_data);
+			if (ret) {
+				HWFNC_ERR("Failed to process FenceArray\n");
+				return ret;
+			}
+		} else {
+			/* Process individual Fence */
+			ret = hw_fence_process_fence(hw_fence_drv_data, hw_fence_client, fence,
+				&hash, client_data);
+			if (ret) {
+				HWFNC_ERR("Failed to process Fence\n");
+				return ret;
+			}
+		}
+
+		if (handles)
+			handles[i] = hash;
+	}
+
+	HWFNC_DBG_H("-\n");
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_hw_fence_wait_update_v2);
+
+int msm_hw_fence_wait_update(void *client_handle,
+	struct dma_fence **fence_list, u32 num_fences, bool create)
+{
+	return msm_hw_fence_wait_update_v2(client_handle, fence_list, NULL, NULL, num_fences,
+		create);
+}
+EXPORT_SYMBOL(msm_hw_fence_wait_update);
+
+int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags)
+{
+	struct msm_hw_fence_client *hw_fence_client;
+	struct msm_hw_fence *hw_fences_tbl;
+	int i;
+
+	if (IS_ERR_OR_NULL(client_handle)) {
+		HWFNC_ERR("Invalid client handle!\n");
+		return -EINVAL;
+	}
+
+	if (!hw_fence_drv_data->vm_ready) {
+		HWFNC_DBG_H("VM not ready, cannot reset client\n");
+		return -EAGAIN;
+	}
+
+	hw_fence_client = (struct msm_hw_fence_client *)client_handle;
+	hw_fences_tbl = hw_fence_drv_data->hw_fences_tbl;
+
+	HWFNC_DBG_L("reset fences and queues for client:%d\n", hw_fence_client->client_id);
+	for (i = 0; i < hw_fence_drv_data->hw_fences_tbl_cnt; i++)
+		hw_fence_utils_cleanup_fence(hw_fence_drv_data, hw_fence_client,
+			&hw_fences_tbl[i], i, reset_flags);
+
+	hw_fence_utils_reset_queues(hw_fence_drv_data, hw_fence_client);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_hw_fence_reset_client);
+
+int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id_ext, u32 reset_flags)
+{
+	enum hw_fence_client_id client_id;
+
+	if (client_id_ext >= HW_FENCE_CLIENT_MAX) {
+		HWFNC_ERR("Invalid client_id_ext:%d\n", client_id_ext);
+		return -EINVAL;
+	}
+
+	client_id = hw_fence_utils_get_client_id_priv(hw_fence_drv_data, client_id_ext);
+
+	if (client_id >= HW_FENCE_CLIENT_MAX) {
+		HWFNC_ERR("Invalid client_id:%d client_id_ext:%d\n", client_id, client_id_ext);
+		return -EINVAL;
+	}
+
+	return msm_hw_fence_reset_client(hw_fence_drv_data->clients[client_id],
+		reset_flags);
+}
+EXPORT_SYMBOL(msm_hw_fence_reset_client_by_id);
+
+int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error)
+{
+	struct msm_hw_fence_client *hw_fence_client;
+
+	if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready ||
+			!hw_fence_drv_data->vm_ready) {
+		HWFNC_ERR("hw fence driver  or vm not ready\n");
+		return -EAGAIN;
+	} else if (IS_ERR_OR_NULL(client_handle) ||
+			(handle >= hw_fence_drv_data->hw_fences_tbl_cnt)) {
+		HWFNC_ERR("Invalid handle:%d or client handle:%d max:%d\n", handle,
+			IS_ERR_OR_NULL(client_handle), hw_fence_drv_data->hw_fences_tbl_cnt);
+		return -EINVAL;
+	}
+	hw_fence_client = (struct msm_hw_fence_client *)client_handle;
+
+	/* Write to Tx queue */
+	hw_fence_update_queue(hw_fence_drv_data, hw_fence_client,
+		hw_fence_drv_data->hw_fences_tbl[handle].ctx_id,
+		hw_fence_drv_data->hw_fences_tbl[handle].seq_id, handle,
+		flags, 0, error, HW_FENCE_TX_QUEUE - 1);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_hw_fence_update_txq);
+
+
+int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error, u32 update_flags)
+{
+	struct msm_hw_fence_client *hw_fence_client;
+
+	if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready ||
+			!hw_fence_drv_data->vm_ready) {
+		HWFNC_ERR("hw fence driver or vm not ready\n");
+		return -EAGAIN;
+	} else if (IS_ERR_OR_NULL(client_handle) ||
+			(handle >= hw_fence_drv_data->hw_fences_tbl_cnt) || !error) {
+		HWFNC_ERR("Invalid client_handle:0x%pK or fence handle:%d max:%d or error:%d\n",
+			client_handle, handle, hw_fence_drv_data->hw_fences_tbl_cnt, error);
+		return -EINVAL;
+	} else if (update_flags != MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE) {
+		HWFNC_ERR("invalid flags:0x%x expected:0x%x no support of in-place error update\n",
+			update_flags, MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE);
+		return -EINVAL;
+	}
+	hw_fence_client = (struct msm_hw_fence_client *)client_handle;
+
+	/* Write to Tx queue */
+	hw_fence_update_existing_txq_payload(hw_fence_drv_data, hw_fence_client,
+		handle, error);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_hw_fence_update_txq_error);
+
+/* tx client has to be the physical, rx client virtual id*/
+int msm_hw_fence_trigger_signal(void *client_handle,
+	u32 tx_client_pid, u32 rx_client_vid,
+	u32 signal_id)
+{
+	struct msm_hw_fence_client *hw_fence_client;
+
+	if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready
+			|| !hw_fence_drv_data->vm_ready) {
+		HWFNC_ERR("hw fence driver or vm not ready\n");
+		return -EAGAIN;
+	} else if (IS_ERR_OR_NULL(client_handle)) {
+		HWFNC_ERR("Invalid client\n");
+		return -EINVAL;
+	}
+	hw_fence_client = (struct msm_hw_fence_client *)client_handle;
+
+	HWFNC_DBG_H("sending ipc for client:%d\n", hw_fence_client->client_id);
+	hw_fence_ipcc_trigger_signal(hw_fence_drv_data, tx_client_pid,
+		rx_client_vid, signal_id);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_hw_fence_trigger_signal);
+
+int msm_hw_fence_register_error_cb(void *client_handle, msm_hw_fence_error_cb_t cb, void *data)
+{
+	struct msm_hw_fence_client *hw_fence_client;
+
+	if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) {
+		HWFNC_ERR("hw fence driver not ready\n");
+		return -EAGAIN;
+	} else if (IS_ERR_OR_NULL(client_handle) || IS_ERR_OR_NULL(cb) || IS_ERR_OR_NULL(data)) {
+		HWFNC_ERR("Invalid params client:0x%pK cb_func:0x%pK data:0x%pK\n", client_handle,
+			cb, data);
+		return -EINVAL;
+	}
+
+	hw_fence_client = (struct msm_hw_fence_client *)client_handle;
+	if (hw_fence_client->fence_error_cb) {
+		HWFNC_ERR("client_id:%d client_id_ext:%d already registered cb_func:%pK data:%pK\n",
+			hw_fence_client->client_id, hw_fence_client->client_id_ext,
+			hw_fence_client->fence_error_cb, hw_fence_client->fence_error_cb_userdata);
+		return -EINVAL;
+	}
+
+	hw_fence_client->fence_error_cb_userdata = data;
+	hw_fence_client->fence_error_cb = cb;
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_hw_fence_register_error_cb);
+
+int msm_hw_fence_deregister_error_cb(void *client_handle)
+{
+	struct msm_hw_fence_client *hw_fence_client;
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) {
+		HWFNC_ERR("hw fence driver not ready\n");
+		return -EAGAIN;
+	} else if (IS_ERR_OR_NULL(client_handle)) {
+		HWFNC_ERR("Invalid client: 0x%pK\n", client_handle);
+		return -EINVAL;
+	}
+
+	hw_fence_client = (struct msm_hw_fence_client *)client_handle;
+	if (!mutex_trylock(&hw_fence_client->error_cb_lock)) {
+		HWFNC_ERR("client_id:%d is modifying or using fence_error_cb:0x%pK data:0x%pK\n",
+			hw_fence_client->client_id, hw_fence_client->fence_error_cb,
+			hw_fence_client->fence_error_cb_userdata);
+		return -EAGAIN;
+	}
+
+	if (!hw_fence_client->fence_error_cb) {
+		HWFNC_ERR("client_id:%d client_id_ext:%d did not register cb:%pK data:%pK\n",
+			hw_fence_client->client_id, hw_fence_client->client_id_ext,
+			hw_fence_client->fence_error_cb, hw_fence_client->fence_error_cb_userdata);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	hw_fence_client->fence_error_cb = NULL;
+	hw_fence_client->fence_error_cb_userdata = NULL;
+
+exit:
+	mutex_unlock(&hw_fence_client->error_cb_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_hw_fence_deregister_error_cb);
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, u32 dump_clients_mask)
+{
+	struct msm_hw_fence_client *hw_fence_client;
+	int client_id;
+
+	if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) {
+		HWFNC_ERR("hw fence driver not ready\n");
+		return -EAGAIN;
+	} else if (IS_ERR_OR_NULL(client_handle)) {
+		HWFNC_ERR("Invalid client handle:%d\n", IS_ERR_OR_NULL(client_handle));
+		return -EINVAL;
+	}
+	hw_fence_client = (struct msm_hw_fence_client *)client_handle;
+
+	if (dump_flags & MSM_HW_FENCE_DBG_DUMP_QUEUES) {
+		hw_fence_debug_dump_queues(HW_FENCE_PRINTK, hw_fence_client);
+
+		if (dump_clients_mask)
+			for (client_id = 0; client_id < HW_FENCE_CLIENT_MAX; client_id++)
+				if ((dump_clients_mask & (1 << client_id)) &&
+						hw_fence_drv_data->clients[client_id])
+					hw_fence_debug_dump_queues(HW_FENCE_PRINTK,
+						hw_fence_drv_data->clients[client_id]);
+	}
+
+	if (dump_flags & MSM_HW_FENCE_DBG_DUMP_TABLE)
+		hw_fence_debug_dump_table(HW_FENCE_PRINTK, hw_fence_drv_data);
+
+	if (dump_flags & MSM_HW_FENCE_DBG_DUMP_EVENTS)
+		hw_fence_debug_dump_events(HW_FENCE_PRINTK, hw_fence_drv_data);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_hw_fence_dump_debug_data);
+
+int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence)
+{
+	struct msm_hw_fence_client *hw_fence_client;
+	struct msm_hw_fence *hw_fence;
+	u64 hash;
+
+	if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) {
+		HWFNC_ERR("hw fence driver not ready\n");
+		return -EAGAIN;
+	} else if (IS_ERR_OR_NULL(client_handle)) {
+		HWFNC_ERR("Invalid client handle:%d\n", IS_ERR_OR_NULL(client_handle));
+		return -EINVAL;
+	} else if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) {
+		HWFNC_ERR("DMA Fence is not a HW Fence ctx:%llu seqno:%llu flags:0x%llx\n",
+			fence->context, fence->seqno, fence->flags);
+		return -EINVAL;
+	}
+	hw_fence_client = (struct msm_hw_fence_client *)client_handle;
+
+	hw_fence = msm_hw_fence_find(hw_fence_drv_data, hw_fence_client, fence->context,
+		fence->seqno, &hash);
+	if (!hw_fence) {
+		HWFNC_ERR("failed to find hw-fence client_id:%d fence:0x%pK ctx:%llu seqno:%llu\n",
+			hw_fence_client->client_id, fence, fence->context, fence->seqno);
+		return -EINVAL;
+	}
+	hw_fence_debug_dump_fence(HW_FENCE_PRINTK, hw_fence, hash, 0);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_hw_fence_dump_fence);
+#endif /* CONFIG_DEBUG_FS */
+
+/* Function used for simulation purposes only. */
+int msm_hw_fence_driver_doorbell_sim(u64 db_mask)
+{
+	if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) {
+		HWFNC_ERR("hw fence driver not ready\n");
+		return -EAGAIN;
+	}
+
+	HWFNC_DBG_IRQ("db callback sim-mode flags:0x%llx qtime:%llu\n",
+		db_mask, hw_fence_get_qtime(hw_fence_drv_data));
+
+	hw_fence_utils_process_doorbell_mask(hw_fence_drv_data, db_mask);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_hw_fence_driver_doorbell_sim);
+
+static int msm_hw_fence_probe_init(struct platform_device *pdev)
+{
+	int rc;
+
+	HWFNC_DBG_H("+\n");
+
+	hw_fence_drv_data = kzalloc(sizeof(*hw_fence_drv_data), GFP_KERNEL);
+	if (!hw_fence_drv_data)
+		return -ENOMEM;
+
+	dev_set_drvdata(&pdev->dev, hw_fence_drv_data);
+	hw_fence_drv_data->dev = &pdev->dev;
+
+	if (hw_fence_driver_enable) {
+		/* Initialize HW Fence Driver resources */
+		rc = hw_fence_init(hw_fence_drv_data);
+		if (rc)
+			goto error;
+
+		mutex_init(&hw_fence_drv_data->clients_register_lock);
+
+		/* set ready value so clients can register */
+		hw_fence_drv_data->resources_ready = true;
+	} else {
+		/* Allocate hw fence driver mem pool and share it with HYP */
+		rc = hw_fence_utils_alloc_mem(hw_fence_drv_data);
+		if (rc) {
+			HWFNC_ERR("failed to alloc base memory\n");
+			goto error;
+		}
+
+		HWFNC_DBG_INFO("hw fence driver not enabled\n");
+	}
+
+	HWFNC_DBG_H("-\n");
+
+	return rc;
+
+error:
+	dev_set_drvdata(&pdev->dev, NULL);
+	kfree(hw_fence_drv_data);
+	hw_fence_drv_data = (void *) -EPROBE_DEFER;
+
+	HWFNC_ERR("error %d\n", rc);
+	return rc;
+}
+
+static int msm_hw_fence_probe(struct platform_device *pdev)
+{
+	int rc = -EINVAL;
+
+	HWFNC_DBG_H("+\n");
+
+	if (!pdev) {
+		HWFNC_ERR("null platform dev\n");
+		return -EINVAL;
+	}
+
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-hw-fence"))
+		rc = msm_hw_fence_probe_init(pdev);
+	if (rc)
+		goto err_exit;
+
+	HWFNC_DBG_H("-\n");
+
+	return 0;
+
+err_exit:
+	HWFNC_ERR("error %d\n", rc);
+	return rc;
+}
+
+static int msm_hw_fence_remove(struct platform_device *pdev)
+{
+	HWFNC_DBG_H("+\n");
+
+	if (!pdev) {
+		HWFNC_ERR("null platform dev\n");
+		return -EINVAL;
+	}
+
+	hw_fence_drv_data = dev_get_drvdata(&pdev->dev);
+	if (!hw_fence_drv_data) {
+		HWFNC_ERR("null driver data\n");
+		return -EINVAL;
+	}
+
+	dev_set_drvdata(&pdev->dev, NULL);
+	kfree(hw_fence_drv_data);
+	hw_fence_drv_data = (void *) -EPROBE_DEFER;
+
+	HWFNC_DBG_H("-\n");
+
+	return 0;
+}
+
+static const struct of_device_id msm_hw_fence_dt_match[] = {
+	{.compatible = "qcom,msm-hw-fence"},
+	{}
+};
+
+static struct platform_driver msm_hw_fence_driver = {
+	.probe = msm_hw_fence_probe,
+	.remove = msm_hw_fence_remove,
+	.driver = {
+		.name = "msm-hw-fence",
+		.of_match_table = of_match_ptr(msm_hw_fence_dt_match),
+	},
+};
+
+static int __init msm_hw_fence_init(void)
+{
+	int rc = 0;
+
+	HWFNC_DBG_H("+\n");
+
+	rc = platform_driver_register(&msm_hw_fence_driver);
+	if (rc) {
+		HWFNC_ERR("%s: failed to register platform driver\n",
+			__func__);
+		return rc;
+	}
+
+	HWFNC_DBG_H("-\n");
+
+	return 0;
+}
+
+static void __exit msm_hw_fence_exit(void)
+{
+	HWFNC_DBG_H("+\n");
+
+	platform_driver_unregister(&msm_hw_fence_driver);
+
+	HWFNC_DBG_H("-\n");
+}
+
+module_param_named(enable, hw_fence_driver_enable, bool, 0600);
+MODULE_PARM_DESC(enable, "Enable hardware fences");
+
+module_init(msm_hw_fence_init);
+module_exit(msm_hw_fence_exit);
+
+MODULE_DESCRIPTION("QTI HW Fence Driver");
+MODULE_LICENSE("GPL v2");

+ 335 - 0
qcom/opensource/mm-drivers/hw_fence/src/msm_hw_fence_synx_translation.c

@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/msm_hw_fence.h>
+#include "msm_hw_fence_synx_translation.h"
+#include "hw_fence_drv_priv.h"
+#include "hw_fence_drv_debug.h"
+
+/**
+ * MAX_SUPPORTED_DPU0:
+ * Maximum number of dpu clients supported
+ */
+#define MAX_SUPPORTED_DPU0 (HW_FENCE_CLIENT_ID_CTL5 - HW_FENCE_CLIENT_ID_CTL0)
+
+static int to_synx_status(int hw_fence_status_code)
+{
+	int synx_status_code;
+
+	switch (hw_fence_status_code) {
+	case 0:
+		synx_status_code = SYNX_SUCCESS;
+		break;
+	case -ENOMEM:
+		synx_status_code = -SYNX_NOMEM;
+		break;
+	case -EPERM:
+		synx_status_code = -SYNX_NOPERM;
+		break;
+	case -ETIMEDOUT:
+		synx_status_code = -SYNX_TIMEOUT;
+		break;
+	case -EALREADY:
+		synx_status_code = -SYNX_ALREADY;
+		break;
+	case -ENOENT:
+		synx_status_code = -SYNX_NOENT;
+		break;
+	case -EINVAL:
+		synx_status_code = -SYNX_INVALID;
+		break;
+	case -EBUSY:
+		synx_status_code = -SYNX_BUSY;
+		break;
+	default:
+		synx_status_code = hw_fence_status_code;
+		break;
+	}
+
+	return synx_status_code;
+}
+
+static enum hw_fence_client_id _get_hw_fence_client_id(enum synx_client_id synx_client_id)
+{
+	enum hw_fence_client_id hw_fence_client_id;
+
+	switch ((int)synx_client_id) {
+	case SYNX_CLIENT_HW_FENCE_GFX_CTX0:
+		hw_fence_client_id = HW_FENCE_CLIENT_ID_CTX0;
+		break;
+	case SYNX_CLIENT_HW_FENCE_IPE_CTX0 ... SYNX_CLIENT_HW_FENCE_IPE_CTX0 +
+			SYNX_MAX_SIGNAL_PER_CLIENT - 1:
+		hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IPE_CTX0 +
+			HW_FENCE_CLIENT_ID_IPE;
+		break;
+	case SYNX_CLIENT_HW_FENCE_VID_CTX0 ... SYNX_CLIENT_HW_FENCE_VID_CTX0 +
+			SYNX_MAX_SIGNAL_PER_CLIENT - 1:
+		hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_VID_CTX0 +
+			HW_FENCE_CLIENT_ID_VPU;
+		break;
+	case SYNX_CLIENT_HW_FENCE_DPU0_CTL0 ... SYNX_CLIENT_HW_FENCE_DPU0_CTL0 + MAX_SUPPORTED_DPU0:
+		hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_DPU0_CTL0 +
+			HW_FENCE_CLIENT_ID_CTL0;
+		break;
+	case SYNX_CLIENT_HW_FENCE_IFE0_CTX0 ... SYNX_CLIENT_HW_FENCE_IFE7_CTX0 +
+			SYNX_MAX_SIGNAL_PER_CLIENT - 1:
+		hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IFE0_CTX0 +
+			HW_FENCE_CLIENT_ID_IFE0;
+		break;
+	default:
+		HWFNC_ERR("Unsupported hw-fence client for synx_id:%d\n", synx_client_id);
+		hw_fence_client_id = HW_FENCE_CLIENT_MAX;
+		break;
+	}
+
+	return hw_fence_client_id;
+}
+
+static bool is_hw_fence_client(enum synx_client_id synx_client_id)
+{
+	return synx_client_id >= SYNX_HW_FENCE_CLIENT_START
+		&& synx_client_id < SYNX_HW_FENCE_CLIENT_END;
+}
+
+struct synx_session *synx_hwfence_initialize(struct synx_initialization_params *params)
+{
+	struct synx_session *session = NULL;
+	enum hw_fence_client_id client_id;
+	void *client_handle;
+
+	if (!hw_fence_driver_enable)
+		return ERR_PTR(-SYNX_INVALID);
+
+	if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->ptr)) {
+		HWFNC_ERR("invalid params:0x%pK params->ptr:0x%pK\n", params,
+			IS_ERR_OR_NULL(params) ? NULL : params->ptr);
+		return ERR_PTR(-SYNX_INVALID);
+	}
+
+	client_id = _get_hw_fence_client_id(params->id);
+	if (!is_hw_fence_client(params->id) || client_id == HW_FENCE_CLIENT_MAX) {
+		HWFNC_ERR("Initializing session for invalid synx_id:%d\n", params->id);
+		return ERR_PTR(-SYNX_INVALID);
+	}
+
+	session = kzalloc(sizeof(struct synx_session), GFP_KERNEL);
+	if (!session)
+		return ERR_PTR(-SYNX_NOMEM);
+
+	client_handle = msm_hw_fence_register(client_id,
+		(struct msm_hw_fence_mem_addr *)params->ptr);
+	if (IS_ERR_OR_NULL(client_handle)) {
+		kfree(session);
+		HWFNC_ERR("failed to initialize synx_id:%d ret:%d\n", params->id,
+			PTR_ERR(client_handle));
+		return ERR_PTR(to_synx_status(PTR_ERR(client_handle)));
+	}
+	session->client = client_handle;
+	session->type = params->id;
+	HWFNC_DBG_INIT("initialized session synx_id:%d hw_fence_id:%d\n", params->id, client_id);
+
+	return session;
+}
+EXPORT_SYMBOL(synx_hwfence_initialize);
+
+int synx_hwfence_uninitialize(struct synx_session *session)
+{
+	int ret;
+
+	if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) {
+		HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session,
+			IS_ERR_OR_NULL(session) ? -1 : session->type);
+		return -SYNX_INVALID;
+	}
+
+	ret = msm_hw_fence_deregister(session->client);
+	if (ret)
+		HWFNC_ERR("Failed to deregister synx_id:%d ret:%d\n", session->type, ret);
+	else
+		kfree(session);
+
+	return to_synx_status(ret);
+}
+EXPORT_SYMBOL(synx_hwfence_uninitialize);
+
+int synx_hwfence_create(struct synx_session *session, struct synx_create_params *params)
+{
+	int ret = 0;
+	struct msm_hw_fence_create_params hwfence_params;
+	u64 handle;
+
+	if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) ||
+			IS_ERR_OR_NULL(params)) {
+		HWFNC_ERR("invalid session:0x%pK synx_id:%d params:0x%pK\n", session,
+			IS_ERR_OR_NULL(session) ? -1 : session->type, params);
+		return -SYNX_INVALID;
+	}
+
+	if (IS_ERR_OR_NULL(params->h_synx) || (params->flags > SYNX_CREATE_MAX_FLAGS) ||
+			!(params->flags & SYNX_CREATE_DMA_FENCE) ||
+			(params->flags & SYNX_CREATE_CSL_FENCE) ||
+			IS_ERR_OR_NULL(params->fence)) {
+		HWFNC_ERR("synx_id:%d invalid create params h_synx:0x%pK flags:0x%x fence:0x%pK\n",
+			session->type, params->h_synx, params->flags, params->fence);
+		return -SYNX_INVALID;
+	}
+
+	hwfence_params.fence = params->fence;
+	hwfence_params.handle = &handle;
+	ret = msm_hw_fence_create(session->client, &hwfence_params);
+	if (ret) {
+		HWFNC_ERR("synx_id:%d failed create fence:0x%pK flags:0x%x ret:%d\n", session->type,
+			params->fence, params->flags, ret);
+		return to_synx_status(ret);
+	}
+	if (handle > U32_MAX) {
+		HWFNC_ERR("synx_id:%d fence handle:%llu would overflow h_synx\n", session->type,
+			handle);
+		msm_hw_fence_destroy_with_handle(session->client, handle);
+		return -SYNX_INVALID;
+	}
+	*params->h_synx = handle;
+
+	return SYNX_SUCCESS;
+}
+EXPORT_SYMBOL(synx_hwfence_create);
+
+int synx_hwfence_release(struct synx_session *session, u32 h_synx)
+{
+	int ret;
+
+	if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) {
+		HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session,
+			IS_ERR_OR_NULL(session) ? -1 : session->type);
+		return -SYNX_INVALID;
+	}
+
+	ret = msm_hw_fence_destroy_with_handle(session->client, h_synx);
+	if (ret)
+		HWFNC_ERR("synx_id:%d failed to destroy fence h_synx:%u ret:%d\n", session->type,
+			h_synx, ret);
+
+	return to_synx_status(ret);
+}
+EXPORT_SYMBOL(synx_hwfence_release);
+
+int synx_hwfence_signal(struct synx_session *session, u32 h_synx, enum synx_signal_status status)
+{
+	int ret;
+
+	if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) {
+		HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session,
+			IS_ERR_OR_NULL(session) ? -1 : session->type);
+		return -SYNX_INVALID;
+	}
+
+	ret = msm_hw_fence_update_txq(session->client, h_synx, 0, (u32)status);
+	if (ret)
+		HWFNC_ERR("synx_id:%d failed to signal fence h_synx:%u status:%d ret:%d\n",
+			session->type, h_synx, status, ret);
+
+	return to_synx_status(ret);
+}
+EXPORT_SYMBOL(synx_hwfence_signal);
+
+int synx_hwfence_recover(enum synx_client_id id)
+{
+	int ret;
+
+	if (!is_hw_fence_client(id)) {
+		HWFNC_ERR("invalid synx_id:%d\n", id);
+		return -SYNX_INVALID;
+	}
+
+	ret = msm_hw_fence_reset_client_by_id(_get_hw_fence_client_id(id),
+		MSM_HW_FENCE_RESET_WITHOUT_DESTROY);
+	if (ret)
+		HWFNC_ERR("synx_id:%d failed to recover ret:%d\n", id, ret);
+
+	return to_synx_status(ret);
+}
+EXPORT_SYMBOL(synx_hwfence_recover);
+
+static int synx_hwfence_import_indv(void *client, struct synx_import_indv_params *params)
+{
+	u64 handle;
+	int ret;
+
+	if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) ||
+			IS_ERR_OR_NULL(params->new_h_synx) ||
+			!(params->flags & SYNX_IMPORT_DMA_FENCE) ||
+			(params->flags & SYNX_IMPORT_SYNX_FENCE) || IS_ERR_OR_NULL(params->fence)) {
+		HWFNC_ERR("invalid client:0x%pK params:0x%pK h_synx:0x%pK flags:0x%x fence:0x%pK\n",
+			client, params, IS_ERR_OR_NULL(params) ? NULL : params->new_h_synx,
+			IS_ERR_OR_NULL(params) ? 0 : params->flags,
+			IS_ERR_OR_NULL(params) ? NULL : params->fence);
+		return -SYNX_INVALID;
+	}
+
+	ret = msm_hw_fence_wait_update_v2(client, (struct dma_fence **)&params->fence, &handle,
+		NULL, 1, true);
+	if (ret) {
+		HWFNC_ERR("failed to import fence:0x%pK flags:0x%x ret:%d\n", params->fence,
+			params->flags, ret);
+		return to_synx_status(ret);
+	}
+	if (handle > U32_MAX) {
+		HWFNC_ERR("fence handle:%llu would overflow new_h_synx\n", handle);
+		msm_hw_fence_wait_update_v2(client, (struct dma_fence **)&params->fence, &handle,
+			NULL, 1, false);
+		return -SYNX_INVALID;
+	}
+	*params->new_h_synx = handle;
+
+	return SYNX_SUCCESS;
+}
+
+static int synx_hwfence_import_arr(void *client, struct synx_import_arr_params *params)
+{
+	int i, ret;
+
+	if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) || !params->num_fences) {
+		HWFNC_ERR("invalid import arr client:0x%pK params:0x%pK num_fences:%u\n", client,
+			params, IS_ERR_OR_NULL(params) ? -1 : params->num_fences);
+		return -SYNX_INVALID;
+	}
+
+	for (i = 0; i < params->num_fences; i++) {
+		ret = synx_hwfence_import_indv(client, &params->list[i]);
+		if (ret) {
+			HWFNC_ERR("importing fence[%u] 0x%pK failed ret:%d\n", i,
+				params->list[i].fence, ret);
+			return ret;
+		}
+	}
+
+	return SYNX_SUCCESS;
+}
+
+int synx_hwfence_import(struct synx_session *session, struct synx_import_params *params)
+{
+	int ret;
+
+	if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)
+			|| IS_ERR_OR_NULL(params)) {
+		HWFNC_ERR("invalid session:0x%pK synx_id:%d params:0x%pK\n", session,
+			IS_ERR_OR_NULL(session) ? -1 : session->type, params);
+		return -SYNX_INVALID;
+	}
+
+	if (params->type == SYNX_IMPORT_ARR_PARAMS)
+		ret = synx_hwfence_import_arr(session->client, &params->arr);
+	else
+		ret = synx_hwfence_import_indv(session->client, &params->indv);
+
+	if (ret)
+		HWFNC_ERR("synx_id:%d failed to import type:%s fences ret:%d\n", session->type,
+			(params->type == SYNX_IMPORT_ARR_PARAMS) ? "arr" : "indv", ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(synx_hwfence_import);

+ 26 - 0
qcom/opensource/mm-drivers/mm_driver_board.mk

@@ -0,0 +1,26 @@
+#SPDX-License-Identifier: GPL-2.0-only
+
+MM_DRV_DLKM_ENABLE := true
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
+	ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false)
+		MM_DRV_DLKM_ENABLE := false
+	endif
+endif
+
+ifeq ($(MM_DRV_DLKM_ENABLE), true)
+	ifneq ($(TARGET_BOARD_AUTO),true)
+		ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true)
+			BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko
+			BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko
+			BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko
+			ifneq ($(TARGET_BOARD_PLATFORM), taro)
+				BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \
+					       $(KERNEL_MODULES_OUT)/msm_hw_fence.ko
+				BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \
+					               $(KERNEL_MODULES_OUT)/msm_hw_fence.ko
+				BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/sync_fence.ko \
+					                             $(KERNEL_MODULES_OUT)/msm_hw_fence.ko
+			endif
+		endif
+	endif
+endif

+ 17 - 0
qcom/opensource/mm-drivers/mm_driver_product.mk

@@ -0,0 +1,17 @@
+
+PRODUCT_PACKAGES += msm_ext_display.ko
+
+MM_DRV_DLKM_ENABLE := true
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
+	ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false)
+		MM_DRV_DLKM_ENABLE := false
+	endif
+endif
+
+ifeq ($(MM_DRV_DLKM_ENABLE), true)
+	ifneq ($(TARGET_BOARD_PLATFORM), taro)
+		PRODUCT_PACKAGES += sync_fence.ko msm_hw_fence.ko
+	endif
+endif
+
+DISPLAY_MM_DRIVER := msm_ext_display.ko sync_fence.ko msm_hw_fence.ko

+ 95 - 0
qcom/opensource/mm-drivers/mm_drivers_kernel_headers.py

@@ -0,0 +1,95 @@
+ # Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ # Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ #
+ # This program is free software; you can redistribute it and/or modify it
+ # under the terms of the GNU General Public License version 2 as published by
+ # the Free Software Foundation.
+ #
+ # This program is distributed in the hope that it will be useful, but WITHOUT
+ # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ # FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ # more details.
+ #
+ # You should have received a copy of the GNU General Public License along with
+ # this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import argparse
+import filecmp
+import os
+import re
+import subprocess
+import sys
+
+def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h):
+    if not h.startswith(prefix):
+        print('error: expected prefix [%s] on header [%s]' % (prefix, h))
+        return False
+
+    out_h = os.path.join(gen_dir, h[len(prefix):])
+    (out_h_dirname, out_h_basename) = os.path.split(out_h)
+    env = os.environ.copy()
+    env["LOC_UNIFDEF"] = unifdef
+    cmd = ["sh", headers_install, h, out_h]
+
+    if True:
+        print('run_headers_install: cmd is %s' % cmd)
+
+    result = subprocess.call(cmd, env=env)
+
+    if result != 0:
+        print('error: run_headers_install: cmd %s failed %d' % (cmd, result))
+        return False
+    return True
+
+def gen_mm_drivers_headers(verbose, gen_dir, headers_install, unifdef, mm_drivers_include_uapi):
+    error_count = 0
+    for h in mm_drivers_include_uapi:
+        mm_drivers_uapi_include_prefix = os.path.join(h.split('sync_fence/include/uapi')[0],
+			'sync_fence', 'include', 'uapi') + os.sep
+        if not run_headers_install(
+                verbose, gen_dir, headers_install, unifdef,
+                mm_drivers_uapi_include_prefix, h): error_count += 1
+    return error_count
+
+def main():
+    """Parse command line arguments and perform top level control."""
+    parser = argparse.ArgumentParser(
+            description=__doc__,
+            formatter_class=argparse.RawDescriptionHelpFormatter)
+
+    # Arguments that apply to every invocation of this script.
+    parser.add_argument(
+            '--verbose', action='store_true',
+            help='Print output that describes the workings of this script.')
+    parser.add_argument(
+            '--header_arch', required=True,
+            help='The arch for which to generate headers.')
+    parser.add_argument(
+            '--gen_dir', required=True,
+            help='Where to place the generated files.')
+    parser.add_argument(
+            '--mm_drivers_include_uapi', required=True, nargs='*',
+            help='The list of techpack/*/include/uapi header files.')
+    parser.add_argument(
+            '--headers_install', required=True,
+            help='The headers_install tool to process input headers.')
+    parser.add_argument(
+              '--unifdef',
+              required=True,
+              help='The unifdef tool used by headers_install.')
+
+    args = parser.parse_args()
+
+    if args.verbose:
+        print('header_arch [%s]' % args.header_arch)
+        print('gen_dir [%s]' % args.gen_dir)
+        print('mm_drivers_include_uapi [%s]' % args.mm_drivers_include_uapi)
+        print('headers_install [%s]' % args.headers_install)
+        print('unifdef [%s]' % args.unifdef)
+
+    return gen_mm_drivers_headers(args.verbose, args.gen_dir,
+            args.headers_install, args.unifdef, args.mm_drivers_include_uapi)
+
+if __name__ == '__main__':
+    sys.exit(main())
+

+ 43 - 0
qcom/opensource/mm-drivers/msm_ext_display/Android.mk

@@ -0,0 +1,43 @@
+LOCAL_PATH := $(call my-dir)
+LOCAL_MODULE_DDK_BUILD := true
+include $(CLEAR_VARS)
+
+# This makefile is only for DLKM
+ifneq ($(findstring vendor,$(LOCAL_PATH)),)
+
+ifneq ($(findstring opensource,$(LOCAL_PATH)),)
+	MSM_EXT_DISPLAY_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/msm_ext_display
+endif # opensource
+
+DLKM_DIR := $(TOP)/device/qcom/common/dlkm
+
+LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+
+###########################################################
+# This is set once per LOCAL_PATH, not per (kernel) module
+KBUILD_OPTIONS := MSM_EXT_DISPLAY_ROOT=$(MSM_EXT_DISPLAY_BLD_DIR)
+KBUILD_OPTIONS += MODNAME=msm_ext_display
+KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
+
+###########################################################
+include $(CLEAR_VARS)
+# For incremental compilation
+LOCAL_SRC_FILES           := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+LOCAL_MODULE              := msm-ext-disp-module-symvers
+LOCAL_MODULE_STEM         := Module.symvers
+LOCAL_MODULE_KBUILD_NAME  := Module.symvers
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+###########################################################
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES   := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+LOCAL_MODULE              := msm_ext_display.ko
+LOCAL_MODULE_KBUILD_NAME  := msm_ext_display.ko
+LOCAL_MODULE_TAGS         := optional
+LOCAL_MODULE_DEBUG_ENABLE := true
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+###########################################################
+endif # DLKM check

+ 10 - 0
qcom/opensource/mm-drivers/msm_ext_display/BUILD.bazel

@@ -0,0 +1,10 @@
+load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
+load(":define_msm_ext_display.bzl", "define_msm_ext_display")
+
+package(
+    default_visibility = [
+      "//visibility:public"
+    ],
+)
+
+define_msm_ext_display()

+ 12 - 0
qcom/opensource/mm-drivers/msm_ext_display/Kbuild

@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+include $(MSM_EXT_DISPLAY_ROOT)/config/kalamammdrivers.conf
+LINUXINCLUDE += -include $(MSM_EXT_DISPLAY_ROOT)/config/kalamammdriversconf.h
+
+obj-m += msm_ext_display.o
+
+msm_ext_display-y := src/msm_ext_display.o
+
+CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\"
+EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \
+		-Wformat-invalid-specifier -Wformat-zero-length -Wnonnull

+ 4 - 0
qcom/opensource/mm-drivers/msm_ext_display/Kconfig

@@ -0,0 +1,4 @@
+config MSM_EXT_DISPLAY
+	bool "Enable msm_ext_display"
+	help
+	  Enable msm_ext_display driver

+ 15 - 0
qcom/opensource/mm-drivers/msm_ext_display/Makefile

@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+
+KBUILD_OPTIONS += MSM_EXT_DISPLAY_ROOT=$(KERNEL_SRC)/$(M)/../
+
+all: modules
+
+modules_install:
+	$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install
+
+%:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
+
+clean:
+	rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
+	rm -rf .tmp_versions

+ 1 - 0
qcom/opensource/mm-drivers/msm_ext_display/defconfig

@@ -0,0 +1 @@
+CONFIG_MSM_EXT_DISPLAY=y

+ 31 - 0
qcom/opensource/mm-drivers/msm_ext_display/define_msm_ext_display.bzl

@@ -0,0 +1,31 @@
+load("//build/kernel/kleaf:kernel.bzl", "ddk_module", "ddk_submodule")
+load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
+load("//msm-kernel:target_variants.bzl", "get_all_variants")
+
+def _define_module(target, variant):
+    tv = "{}_{}".format(target, variant)
+    ddk_module(
+        name = "{}_msm_ext_display".format(tv),
+        srcs = ["src/msm_ext_display.c"],
+        out = "msm_ext_display.ko",
+        defconfig = "defconfig",
+        kconfig = "Kconfig",
+        deps = ["//msm-kernel:all_headers",
+                "//vendor/qcom/opensource/mm-drivers:mm_drivers_headers"],
+        kernel_build = "//msm-kernel:{}".format(tv),
+    )
+
+    copy_to_dist_dir(
+        name = "{}_msm_ext_display_dist".format(tv),
+        data = [":{}_msm_ext_display".format(tv)],
+        dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target),
+        flat = True,
+        wipe_dist_dir = False,
+        allow_duplicate_filenames = False,
+        mode_overrides = {"**/*": "644"},
+        log = "info",
+    )
+
+def define_msm_ext_display():
+    for (t, v) in get_all_variants():
+        _define_module(t, v)

+ 702 - 0
qcom/opensource/mm-drivers/msm_ext_display/src/msm_ext_display.c

@@ -0,0 +1,702 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iopoll.h>
+#include <linux/types.h>
+#include <linux/of_platform.h>
+#include <linux/extcon-provider.h>
+#include <linux/soc/qcom/msm_ext_display.h>
+#include <linux/extcon-provider.h>
+
+struct msm_ext_disp_list {
+	struct msm_ext_disp_init_data *data;
+	struct list_head list;
+};
+
+struct msm_ext_disp {
+	struct msm_ext_disp_data ext_disp_data;
+	struct platform_device *pdev;
+	struct msm_ext_disp_codec_id current_codec;
+	struct msm_ext_disp_audio_codec_ops *ops;
+	struct extcon_dev *audio_sdev[MSM_EXT_DISP_MAX_CODECS];
+	bool audio_session_on;
+	struct list_head display_list;
+	struct mutex lock;
+	bool update_audio;
+};
+
+static const unsigned int msm_ext_disp_supported_cable[] = {
+	EXTCON_DISP_DP,
+	EXTCON_DISP_HDMI,
+	EXTCON_NONE,
+};
+
+static int msm_ext_disp_extcon_register(struct msm_ext_disp *ext_disp, int id)
+{
+	int ret = 0;
+
+	if (!ext_disp || !ext_disp->pdev || id >= MSM_EXT_DISP_MAX_CODECS) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	ext_disp->audio_sdev[id] = devm_extcon_dev_allocate(
+			&ext_disp->pdev->dev,
+			msm_ext_disp_supported_cable);
+	if (IS_ERR(ext_disp->audio_sdev[id]))
+		return PTR_ERR(ext_disp->audio_sdev[id]);
+
+	ret = devm_extcon_dev_register(&ext_disp->pdev->dev,
+		ext_disp->audio_sdev[id]);
+	if (ret) {
+		pr_err("audio registration failed\n");
+		return ret;
+	}
+
+	pr_debug("extcon registration done\n");
+
+	return ret;
+}
+
+static void msm_ext_disp_extcon_unregister(struct msm_ext_disp *ext_disp,
+		int id)
+{
+	if (!ext_disp || !ext_disp->pdev || id >= MSM_EXT_DISP_MAX_CODECS) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	devm_extcon_dev_unregister(&ext_disp->pdev->dev,
+			ext_disp->audio_sdev[id]);
+}
+
+static const char *msm_ext_disp_name(enum msm_ext_disp_type type)
+{
+	switch (type) {
+	case EXT_DISPLAY_TYPE_HDMI:
+		return "EXT_DISPLAY_TYPE_HDMI";
+	case EXT_DISPLAY_TYPE_DP:
+		return "EXT_DISPLAY_TYPE_DP";
+	default: return "???";
+	}
+}
+
+static int msm_ext_disp_add_intf_data(struct msm_ext_disp *ext_disp,
+		struct msm_ext_disp_init_data *data)
+{
+	struct msm_ext_disp_list *node;
+
+	if (!ext_disp || !data) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	node->data = data;
+
+	list_add(&node->list, &ext_disp->display_list);
+
+	pr_debug("Added new display (%s) ctld (%d) stream (%d)\n",
+		msm_ext_disp_name(data->codec.type),
+		data->codec.ctrl_id, data->codec.stream_id);
+
+	return 0;
+}
+
+static int msm_ext_disp_remove_intf_data(struct msm_ext_disp *ext_disp,
+		struct msm_ext_disp_init_data *data)
+{
+	struct msm_ext_disp_list *node;
+	struct list_head *pos = NULL;
+
+	if (!ext_disp || !data) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	list_for_each(pos, &ext_disp->display_list) {
+		node = list_entry(pos, struct msm_ext_disp_list, list);
+		if (node->data == data) {
+			list_del(pos);
+			pr_debug("Deleted the intf data\n");
+			kfree(node);
+			return 0;
+		}
+	}
+
+	pr_debug("Intf data not present for delete op\n");
+
+	return 0;
+}
+
+static int msm_ext_disp_get_intf_data(struct msm_ext_disp *ext_disp,
+		struct msm_ext_disp_codec_id *codec,
+		struct msm_ext_disp_init_data **data)
+{
+	int ret = 0;
+	struct msm_ext_disp_list *node;
+	struct list_head *position = NULL;
+
+	if (!ext_disp || !data || !codec) {
+		pr_err("Invalid params\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	*data = NULL;
+	list_for_each(position, &ext_disp->display_list) {
+		node = list_entry(position, struct msm_ext_disp_list, list);
+		if (node->data->codec.type == codec->type &&
+			node->data->codec.stream_id == codec->stream_id &&
+			node->data->codec.ctrl_id == codec->ctrl_id) {
+			*data = node->data;
+			break;
+		}
+	}
+
+	if (!*data)
+		ret = -ENODEV;
+end:
+	return ret;
+}
+
+static int msm_ext_disp_process_audio(struct msm_ext_disp *ext_disp,
+		struct msm_ext_disp_codec_id *codec,
+		enum msm_ext_disp_cable_state new_state)
+{
+	int ret = 0;
+	int state;
+	struct extcon_dev *audio_sdev;
+
+	if (!ext_disp->ops) {
+		pr_err("codec not registered, skip notification\n");
+		ret = -EPERM;
+		goto end;
+	}
+
+	audio_sdev = ext_disp->audio_sdev[codec->stream_id];
+
+	state = extcon_get_state(audio_sdev, codec->type);
+	if (state == !!new_state) {
+		ret = -EEXIST;
+		pr_debug("same state\n");
+		goto end;
+	}
+
+	ret = extcon_set_state_sync(audio_sdev,
+			codec->type, !!new_state);
+	if (ret)
+		pr_err("Failed to set state. Error = %d\n", ret);
+	else
+		pr_debug("state changed to %d\n", new_state);
+
+end:
+	return ret;
+}
+
+static struct msm_ext_disp *msm_ext_disp_validate_and_get(
+		struct platform_device *pdev,
+		struct msm_ext_disp_codec_id *codec,
+		enum msm_ext_disp_cable_state state)
+{
+	struct msm_ext_disp_data *ext_disp_data;
+	struct msm_ext_disp *ext_disp;
+
+	if (!pdev) {
+		pr_err("invalid platform device\n");
+		goto err;
+	}
+
+	if (!codec ||
+		codec->type >= EXT_DISPLAY_TYPE_MAX ||
+		codec->ctrl_id != 0 ||
+		codec->stream_id >= MSM_EXT_DISP_MAX_CODECS) {
+		pr_err("invalid display codec id\n");
+		goto err;
+	}
+
+	if (state < EXT_DISPLAY_CABLE_DISCONNECT ||
+			state >= EXT_DISPLAY_CABLE_STATE_MAX) {
+		pr_err("invalid HPD state (%d)\n", state);
+		goto err;
+	}
+
+	ext_disp_data = platform_get_drvdata(pdev);
+	if (!ext_disp_data) {
+		pr_err("invalid drvdata\n");
+		goto err;
+	}
+
+	ext_disp = container_of(ext_disp_data,
+			struct msm_ext_disp, ext_disp_data);
+
+	return ext_disp;
+err:
+	return ERR_PTR(-EINVAL);
+}
+
+static int msm_ext_disp_update_audio_ops(struct msm_ext_disp *ext_disp,
+		struct msm_ext_disp_codec_id *codec)
+{
+	int ret = 0;
+	struct msm_ext_disp_init_data *data = NULL;
+
+	ret = msm_ext_disp_get_intf_data(ext_disp, codec, &data);
+	if (ret || !data) {
+		pr_err("Display not found (%s) ctld (%d) stream (%d)\n",
+			msm_ext_disp_name(codec->type),
+			codec->ctrl_id, codec->stream_id);
+		goto end;
+	}
+
+	if (ext_disp->ops) {
+		*ext_disp->ops = data->codec_ops;
+		ext_disp->current_codec = *codec;
+
+		/* update pdev for interface to use */
+		ext_disp->ext_disp_data.intf_pdev = data->pdev;
+		ext_disp->ext_disp_data.intf_data = data->intf_data;
+	}
+
+end:
+	return ret;
+}
+
+static int msm_ext_disp_audio_config(struct platform_device *pdev,
+		struct msm_ext_disp_codec_id *codec,
+		enum msm_ext_disp_cable_state state)
+{
+	int ret = 0;
+	struct msm_ext_disp *ext_disp;
+
+	ext_disp = msm_ext_disp_validate_and_get(pdev, codec, state);
+	if (IS_ERR(ext_disp)) {
+		ret = PTR_ERR(ext_disp);
+		goto end;
+	}
+
+	if (state == EXT_DISPLAY_CABLE_CONNECT) {
+		ret = msm_ext_disp_select_audio_codec(pdev, codec);
+	} else {
+		mutex_lock(&ext_disp->lock);
+		if (ext_disp->ops)
+			memset(ext_disp->ops, 0, sizeof(*ext_disp->ops));
+
+		pr_debug("codec ops cleared for %s\n",
+			msm_ext_disp_name(ext_disp->current_codec.type));
+
+		ext_disp->current_codec.type = EXT_DISPLAY_TYPE_MAX;
+		mutex_unlock(&ext_disp->lock);
+	}
+end:
+	return ret;
+}
+
+static int msm_ext_disp_audio_notify(struct platform_device *pdev,
+		struct msm_ext_disp_codec_id *codec,
+		enum msm_ext_disp_cable_state state)
+{
+	int ret = 0;
+	struct msm_ext_disp *ext_disp;
+
+	ext_disp = msm_ext_disp_validate_and_get(pdev, codec, state);
+	if (IS_ERR(ext_disp)) {
+		ret = PTR_ERR(ext_disp);
+		goto end;
+	}
+
+	mutex_lock(&ext_disp->lock);
+	ret = msm_ext_disp_process_audio(ext_disp, codec, state);
+	mutex_unlock(&ext_disp->lock);
+end:
+	return ret;
+}
+
+static void msm_ext_disp_ready_for_display(struct msm_ext_disp *ext_disp)
+{
+	int ret;
+	struct msm_ext_disp_init_data *data = NULL;
+
+	if (!ext_disp) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	ret = msm_ext_disp_get_intf_data(ext_disp,
+			&ext_disp->current_codec, &data);
+	if (ret) {
+		pr_err("%s not found\n",
+			msm_ext_disp_name(ext_disp->current_codec.type));
+		return;
+	}
+
+	*ext_disp->ops = data->codec_ops;
+	data->codec_ops.ready(ext_disp->pdev);
+}
+
+int msm_hdmi_register_audio_codec(struct platform_device *pdev,
+		struct msm_ext_disp_audio_codec_ops *ops)
+{
+	return msm_ext_disp_register_audio_codec(pdev, ops);
+}
+
+/**
+ * Register audio codec ops to display driver
+ * for HDMI/Display Port usecase support.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int msm_ext_disp_register_audio_codec(struct platform_device *pdev,
+		struct msm_ext_disp_audio_codec_ops *ops)
+{
+	int ret = 0;
+	struct msm_ext_disp *ext_disp = NULL;
+	struct msm_ext_disp_data *ext_disp_data = NULL;
+
+	if (!pdev || !ops) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	ext_disp_data = platform_get_drvdata(pdev);
+	if (!ext_disp_data) {
+		pr_err("Invalid drvdata\n");
+		return -EINVAL;
+	}
+
+	ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
+				ext_disp_data);
+
+	mutex_lock(&ext_disp->lock);
+
+	if (ext_disp->ops) {
+		pr_err("Codec already registered\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	ext_disp->ops = ops;
+
+	pr_debug("audio codec registered\n");
+
+	if (ext_disp->update_audio) {
+		ext_disp->update_audio = false;
+		msm_ext_disp_update_audio_ops(ext_disp, &ext_disp->current_codec);
+		msm_ext_disp_process_audio(ext_disp, &ext_disp->current_codec,
+				EXT_DISPLAY_CABLE_CONNECT);
+	}
+
+end:
+	mutex_unlock(&ext_disp->lock);
+	if (ext_disp->current_codec.type != EXT_DISPLAY_TYPE_MAX)
+		msm_ext_disp_ready_for_display(ext_disp);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_ext_disp_register_audio_codec);
+
+int msm_ext_disp_select_audio_codec(struct platform_device *pdev,
+		struct msm_ext_disp_codec_id *codec)
+{
+	int ret = 0;
+	struct msm_ext_disp *ext_disp = NULL;
+	struct msm_ext_disp_data *ext_disp_data = NULL;
+
+	if (!pdev || !codec) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	ext_disp_data = platform_get_drvdata(pdev);
+	if (!ext_disp_data) {
+		pr_err("Invalid drvdata\n");
+		return -EINVAL;
+	}
+
+	ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
+				ext_disp_data);
+
+	mutex_lock(&ext_disp->lock);
+
+	if (!ext_disp->ops) {
+		pr_warn("Codec is not registered\n");
+		ext_disp->update_audio = true;
+		ext_disp->current_codec = *codec;
+		ret = -EINVAL;
+		goto end;
+	}
+
+	ret = msm_ext_disp_update_audio_ops(ext_disp, codec);
+
+end:
+	mutex_unlock(&ext_disp->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_ext_disp_select_audio_codec);
+
+static int msm_ext_disp_validate_intf(struct msm_ext_disp_init_data *init_data)
+{
+	struct msm_ext_disp_audio_codec_ops *ops;
+
+	if (!init_data) {
+		pr_err("Invalid init_data\n");
+		return -EINVAL;
+	}
+
+	if (!init_data->pdev) {
+		pr_err("Invalid display intf pdev\n");
+		return -EINVAL;
+	}
+
+	if (init_data->codec.type >= EXT_DISPLAY_TYPE_MAX ||
+		init_data->codec.ctrl_id != 0 ||
+		init_data->codec.stream_id >= MSM_EXT_DISP_MAX_CODECS) {
+		pr_err("Invalid codec info type(%d), ctrl(%d) stream(%d)\n",
+				init_data->codec.type,
+				init_data->codec.ctrl_id,
+				init_data->codec.stream_id);
+		return -EINVAL;
+	}
+
+	ops = &init_data->codec_ops;
+
+	if (!ops->audio_info_setup || !ops->get_audio_edid_blk ||
+			!ops->cable_status || !ops->get_intf_id ||
+			!ops->teardown_done || !ops->acknowledge ||
+			!ops->ready) {
+		pr_err("Invalid codec operation pointers\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int msm_ext_disp_register_intf(struct platform_device *pdev,
+		struct msm_ext_disp_init_data *init_data)
+{
+	int ret = 0;
+	struct msm_ext_disp_init_data *data = NULL;
+	struct msm_ext_disp *ext_disp = NULL;
+	struct msm_ext_disp_data *ext_disp_data = NULL;
+
+	if (!pdev || !init_data) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	ext_disp_data = platform_get_drvdata(pdev);
+	if (!ext_disp_data) {
+		pr_err("Invalid drvdata\n");
+		return -EINVAL;
+	}
+
+	ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
+				ext_disp_data);
+
+	mutex_lock(&ext_disp->lock);
+
+	ret = msm_ext_disp_validate_intf(init_data);
+	if (ret)
+		goto end;
+
+	ret = msm_ext_disp_get_intf_data(ext_disp, &init_data->codec, &data);
+	if (!ret) {
+		pr_err("%s already registered. ctrl(%d) stream(%d)\n",
+			msm_ext_disp_name(init_data->codec.type),
+			init_data->codec.ctrl_id,
+			init_data->codec.stream_id);
+		goto end;
+	}
+
+	ret = msm_ext_disp_add_intf_data(ext_disp, init_data);
+	if (ret)
+		goto end;
+
+	init_data->intf_ops.audio_config = msm_ext_disp_audio_config;
+	init_data->intf_ops.audio_notify = msm_ext_disp_audio_notify;
+
+	pr_debug("%s registered. ctrl(%d) stream(%d)\n",
+			msm_ext_disp_name(init_data->codec.type),
+			init_data->codec.ctrl_id,
+			init_data->codec.stream_id);
+end:
+	mutex_unlock(&ext_disp->lock);
+	return ret;
+}
+EXPORT_SYMBOL(msm_ext_disp_register_intf);
+
+int msm_ext_disp_deregister_intf(struct platform_device *pdev,
+		struct msm_ext_disp_init_data *init_data)
+{
+	int ret = 0;
+	struct msm_ext_disp *ext_disp = NULL;
+	struct msm_ext_disp_data *ext_disp_data = NULL;
+
+	if (!pdev || !init_data) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	ext_disp_data = platform_get_drvdata(pdev);
+	if (!ext_disp_data) {
+		pr_err("Invalid drvdata\n");
+		return -EINVAL;
+	}
+
+	ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
+				ext_disp_data);
+
+	mutex_lock(&ext_disp->lock);
+
+	ret = msm_ext_disp_remove_intf_data(ext_disp, init_data);
+	if (ret)
+		goto end;
+
+	init_data->intf_ops.audio_config = NULL;
+	init_data->intf_ops.audio_notify = NULL;
+
+	pr_debug("%s deregistered\n",
+			msm_ext_disp_name(init_data->codec.type));
+end:
+	mutex_unlock(&ext_disp->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_ext_disp_deregister_intf);
+
+static int msm_ext_disp_probe(struct platform_device *pdev)
+{
+	int ret = 0, id;
+	struct device_node *of_node = NULL;
+	struct msm_ext_disp *ext_disp = NULL;
+
+	if (!pdev) {
+		pr_err("No platform device found\n");
+		ret = -ENODEV;
+		goto end;
+	}
+
+	of_node = pdev->dev.of_node;
+	if (!of_node) {
+		pr_err("No device node found\n");
+		ret = -ENODEV;
+		goto end;
+	}
+
+	ext_disp = devm_kzalloc(&pdev->dev, sizeof(*ext_disp), GFP_KERNEL);
+	if (!ext_disp) {
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	platform_set_drvdata(pdev, &ext_disp->ext_disp_data);
+	ext_disp->pdev = pdev;
+
+	for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++) {
+		ret = msm_ext_disp_extcon_register(ext_disp, id);
+		if (ret)
+			goto child_node_failure;
+	}
+
+	ret = of_platform_populate(of_node, NULL, NULL, &pdev->dev);
+	if (ret) {
+		pr_err("Failed to add child devices. Error = %d\n", ret);
+		goto child_node_failure;
+	} else {
+		pr_debug("%s: Added child devices.\n", __func__);
+	}
+
+	mutex_init(&ext_disp->lock);
+
+	INIT_LIST_HEAD(&ext_disp->display_list);
+	ext_disp->current_codec.type = EXT_DISPLAY_TYPE_MAX;
+	ext_disp->update_audio = false;
+
+	return ret;
+
+child_node_failure:
+	for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++)
+		msm_ext_disp_extcon_unregister(ext_disp, id);
+
+	devm_kfree(&ext_disp->pdev->dev, ext_disp);
+end:
+	return ret;
+}
+
+static int msm_ext_disp_remove(struct platform_device *pdev)
+{
+	int ret = 0, id;
+	struct msm_ext_disp *ext_disp = NULL;
+	struct msm_ext_disp_data *ext_disp_data = NULL;
+
+	if (!pdev) {
+		pr_err("No platform device\n");
+		ret = -ENODEV;
+		goto end;
+	}
+
+	ext_disp_data = platform_get_drvdata(pdev);
+	if (!ext_disp_data) {
+		pr_err("No drvdata found\n");
+		ret = -ENODEV;
+		goto end;
+	}
+
+	ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
+				ext_disp_data);
+
+	for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++)
+		msm_ext_disp_extcon_unregister(ext_disp, id);
+
+	mutex_destroy(&ext_disp->lock);
+	devm_kfree(&ext_disp->pdev->dev, ext_disp);
+
+end:
+	return ret;
+}
+
+static const struct of_device_id msm_ext_dt_match[] = {
+	{.compatible = "qcom,msm-ext-disp",},
+	{ /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, msm_ext_dt_match);
+
+static struct platform_driver this_driver = {
+	.probe = msm_ext_disp_probe,
+	.remove = msm_ext_disp_remove,
+	.driver = {
+		.name = "msm-ext-disp",
+		.of_match_table = msm_ext_dt_match,
+	},
+};
+
+static int __init msm_ext_disp_init(void)
+{
+	int ret = 0;
+
+	ret = platform_driver_register(&this_driver);
+	if (ret)
+		pr_err("failed, ret = %d\n", ret);
+
+	return ret;
+}
+
+subsys_initcall(msm_ext_disp_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM External Display");

+ 42 - 0
qcom/opensource/mm-drivers/sync_fence/Android.mk

@@ -0,0 +1,42 @@
+LOCAL_PATH := $(call my-dir)
+LOCAL_MODULE_DDK_BUILD := true
+include $(CLEAR_VARS)
+
+# This makefile is only for DLKM
+ifneq ($(findstring vendor,$(LOCAL_PATH)),)
+
+ifneq ($(findstring opensource,$(LOCAL_PATH)),)
+	SYNC_FENCE_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/sync_fence
+endif # opensource
+
+DLKM_DIR := $(TOP)/device/qcom/common/dlkm
+
+LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+
+###########################################################
+# This is set once per LOCAL_PATH, not per (kernel) module
+KBUILD_OPTIONS := SYNC_FENCE_ROOT=$(SYNC_FENCE_BLD_DIR)
+KBUILD_OPTIONS += MODNAME=sync_fence
+KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
+
+###########################################################
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES           := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+LOCAL_MODULE              := sync-fence-module-symvers
+LOCAL_MODULE_STEM         := Module.symvers
+LOCAL_MODULE_KBUILD_NAME  := Module.symvers
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+###########################################################
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES   := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+LOCAL_MODULE              := sync_fence.ko
+LOCAL_MODULE_KBUILD_NAME  := sync_fence.ko
+LOCAL_MODULE_TAGS         := optional
+LOCAL_MODULE_DEBUG_ENABLE := true
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+###########################################################
+endif # DLKM check

+ 16 - 0
qcom/opensource/mm-drivers/sync_fence/BUILD.bazel

@@ -0,0 +1,16 @@
+load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
+load(":define_sync_fence.bzl", "define_sync_fence")
+
+package(
+    default_visibility = [
+      "//visibility:public"
+    ],
+)
+
+ddk_headers(
+    name = "sync_fence_uapi_headers",
+    hdrs = glob(["include/uapi/sync_fence/*.h"]),
+    includes = ["include"]
+)
+
+define_sync_fence()

+ 16 - 0
qcom/opensource/mm-drivers/sync_fence/Kbuild

@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+KDIR := $(TOP)/kernel_platform/msm-kernel
+LINUXINCLUDE    += -I$(SYNC_FENCE_ROOT)sync_fence/include/
+include $(SYNC_FENCE_ROOT)/config/kalamammdrivers.conf
+LINUXINCLUDE += -include $(SYNC_FENCE_ROOT)/config/kalamammdriversconf.h
+
+ifdef CONFIG_QCOM_SPEC_SYNC
+obj-m += sync_fence.o
+
+sync_fence-y := src/qcom_sync_file.o
+
+CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\"
+endif
+EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \
+		-Wformat-invalid-specifier -Wformat-zero-length -Wnonnull

+ 4 - 0
qcom/opensource/mm-drivers/sync_fence/Kconfig

@@ -0,0 +1,4 @@
+config QCOM_SPEC_SYNC
+	bool "Enable spec fence"
+	help
+	  Enable sync_fence driver

+ 15 - 0
qcom/opensource/mm-drivers/sync_fence/Makefile

@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+
+KBUILD_OPTIONS += SYNC_FENCE_ROOT=$(KERNEL_SRC)/$(M)/../
+
+all: modules
+
+modules_install:
+	$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install
+
+%:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
+
+clean:
+	rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
+	rm -rf .tmp_versions

+ 1 - 0
qcom/opensource/mm-drivers/sync_fence/defconfig

@@ -0,0 +1 @@
+CONFIG_QCOM_SPEC_SYNC=y

+ 33 - 0
qcom/opensource/mm-drivers/sync_fence/define_sync_fence.bzl

@@ -0,0 +1,33 @@
+load("//build/kernel/kleaf:kernel.bzl", "ddk_module")
+load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
+load("//msm-kernel:target_variants.bzl", "get_all_variants")
+
+def _define_module(target, variant):
+    tv = "{}_{}".format(target, variant)
+    ddk_module(
+        name = "{}_sync_fence".format(tv),
+        srcs = ["src/qcom_sync_file.c"],
+        out = "sync_fence.ko",
+        kconfig = "Kconfig",
+        defconfig = "defconfig",
+        deps = [
+            "//msm-kernel:all_headers",
+            "//vendor/qcom/opensource/mm-drivers:mm_drivers_headers",
+        ],
+        kernel_build = "//msm-kernel:{}".format(tv),
+    )
+
+    copy_to_dist_dir(
+        name = "{}_sync_fence_dist".format(tv),
+        data = [":{}_sync_fence".format(tv)],
+        dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target),
+        flat = True,
+        wipe_dist_dir = False,
+        allow_duplicate_filenames = False,
+        mode_overrides = {"**/*": "644"},
+        log = "info",
+    )
+
+def define_sync_fence():
+    for (t, v) in get_all_variants():
+        _define_module(t, v)

+ 6 - 0
qcom/opensource/mm-drivers/sync_fence/include/uapi/Kbuild

@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
+
+# Top-level Makefile calls into asm-$(ARCH)
+# List only non-arch directories below
+
+header-y += sync_fence/

+ 63 - 0
qcom/opensource/mm-drivers/sync_fence/include/uapi/sync_fence/qcom_sync_file.h

@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _UAPI_LINUX_SPEC_SYNC_H
+#define _UAPI_LINUX_SPEC_SYNC_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define SPEC_FENCE_SIGNAL_ANY 0x1
+#define SPEC_FENCE_SIGNAL_ALL 0x2
+
+/**
+ * struct fence_bind_data - data passed to bind ioctl
+ * @out_bind_fd:	file descriptor of second fence
+ * @fds:	file descriptor list of child fences
+ */
+struct fence_bind_data {
+	__u32	out_bind_fd;
+	__u64	fds;
+};
+
+/**
+ * struct fence_create_data - detailed fence information
+ * @num_fences:	Total fences that array needs to carry.
+ * @flags:	Flags specifying on how to signal the array
+ * @out_bind_fd:	Returns the fence fd.
+ */
+struct fence_create_data {
+	__u32	num_fences;
+	__u32	flags;
+	__u32	out_bind_fd;
+};
+
+#define SPEC_SYNC_MAGIC		'>'
+
+/**
+ * DOC: SPEC_SYNC_IOC_BIND - bind two fences
+ *
+ * Takes a struct fence_bind_data.  binds the child fds with the fence array
+ * pointed by fd1.
+ */
+#define SPEC_SYNC_IOC_BIND	_IOWR(SPEC_SYNC_MAGIC, 3, struct fence_bind_data)
+
+/**
+ * DOC: SPEC_SYNC_IOC_CREATE_FENCE - Create a fence array
+ *
+ * Takes a struct fence_create_data. If num_fences is > 0, fence array will be
+ * created and returns the array fd in fence_create_data.fd1
+ */
+#define SPEC_SYNC_IOC_CREATE_FENCE	_IOWR(SPEC_SYNC_MAGIC, 4, struct fence_create_data)
+
+/**
+ * DOC: SPEC_SYNC_IOC_GET_VER - Get Spec driver version
+ *
+ * Returns Spec driver version.
+ */
+#define SPEC_SYNC_IOC_GET_VER	_IOWR(SPEC_SYNC_MAGIC, 5, __u64)
+
+#endif /* _UAPI_LINUX_SPEC_SYNC_H */

+ 584 - 0
qcom/opensource/mm-drivers/sync_fence/src/qcom_sync_file.c

@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
+#include <linux/sync_file.h>
+#include <uapi/sync_fence/qcom_sync_file.h>
+#include <linux/soc/qcom/qcom_sync_file.h>
+
+#define CLASS_NAME	"sync"
+#define DRV_NAME	"spec_sync"
+#define DRV_VERSION	1
+#define NAME_LEN	32
+
+#define FENCE_MIN	1
+#define FENCE_MAX	32
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+	#define MAX_DEVICE_SUPPORTED	2
+#else
+	#define MAX_DEVICE_SUPPORTED	1
+#endif
+
+#define DUMMY_CONTEXT	0xfafadadafafadada
+#define DUMMY_SEQNO	0xefa9ce00efa9ce00
+
+struct dummy_spec_fence {
+	struct dma_fence fence;
+	spinlock_t lock;
+};
+
+struct sync_device {
+	/* device info */
+	struct class *dev_class;
+	dev_t dev_num;
+	struct device *dev;
+	struct cdev *cdev;
+	struct mutex lock;
+	struct dummy_spec_fence *dummy_fence;
+
+	/* device drv data */
+	atomic_t device_available;
+	char name[NAME_LEN];
+	uint32_t version;
+	struct mutex l_lock;
+	struct list_head fence_array_list;
+	wait_queue_head_t wait_queue;
+};
+
+struct fence_array_node {
+	struct dma_fence_array *fence_array;
+	struct list_head list;
+};
+
+/* Speculative Sync Device Driver State */
+static struct sync_device sync_dev;
+
+static const char *spec_fence_get_name_dummy(struct dma_fence *fence)
+{
+	return "dummy_fence";
+}
+
+static const struct dma_fence_ops dummy_spec_fence_ops = {
+	.get_driver_name = spec_fence_get_name_dummy,
+	.get_timeline_name = spec_fence_get_name_dummy,
+};
+
+static bool sanitize_fence_array(struct dma_fence_array *fence)
+{
+	struct fence_array_node *node;
+	int ret = false;
+
+	mutex_lock(&sync_dev.l_lock);
+	list_for_each_entry(node, &sync_dev.fence_array_list, list) {
+		if (node->fence_array == fence) {
+			ret = true;
+			break;
+		}
+	}
+	mutex_unlock(&sync_dev.l_lock);
+
+	return ret;
+}
+
+static void clear_fence_array_tracker(bool force_clear)
+{
+	struct fence_array_node *node, *temp;
+	struct dma_fence_array *array;
+	struct dma_fence *fence;
+	bool is_signaled;
+
+	mutex_lock(&sync_dev.l_lock);
+	list_for_each_entry_safe(node, temp, &sync_dev.fence_array_list, list) {
+		array = node->fence_array;
+		fence = &array->base;
+		is_signaled = dma_fence_is_signaled(fence);
+
+		if (force_clear && !array->fences)
+			array->num_fences = 0;
+
+		pr_debug("force_clear:%d is_signaled:%d pending:%d\n", force_clear, is_signaled,
+			atomic_read(&array->num_pending));
+
+		if (force_clear && !is_signaled && atomic_dec_and_test(&array->num_pending))
+			dma_fence_signal(fence);
+
+		if (force_clear || is_signaled) {
+			dma_fence_put(fence);
+			list_del(&node->list);
+			kfree(node);
+		}
+	}
+	mutex_unlock(&sync_dev.l_lock);
+}
+
+static struct sync_device *spec_fence_init_locked(struct sync_device *obj, const char *name)
+{
+	if (atomic_read(&obj->device_available) >= MAX_DEVICE_SUPPORTED) {
+		pr_err("number of device fds are limited to %d, device opened:%d\n",
+			MAX_DEVICE_SUPPORTED, atomic_read(&obj->device_available));
+		return NULL;
+	} else if (!atomic_read(&obj->device_available)) {
+		memset(obj->name, 0, NAME_LEN);
+		strscpy(obj->name, name, sizeof(obj->name));
+	}
+
+	atomic_inc(&obj->device_available);
+
+	return obj;
+}
+
+static int spec_sync_open(struct inode *inode, struct file *file)
+{
+	char task_comm[TASK_COMM_LEN];
+	struct sync_device *obj = &sync_dev;
+	int ret = 0;
+
+	if (!inode || !inode->i_cdev || !file) {
+		pr_err("NULL pointer passed\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&sync_dev.lock);
+
+	get_task_comm(task_comm, current);
+
+	obj = spec_fence_init_locked(obj, task_comm);
+	if (!obj) {
+		pr_err("Spec device exists owner:%s caller:%s\n", sync_dev.name, task_comm);
+		ret = -EEXIST;
+		goto end;
+	}
+
+	file->private_data = obj;
+
+end:
+	mutex_unlock(&sync_dev.lock);
+	return ret;
+}
+
+static int spec_sync_release(struct inode *inode, struct file *file)
+{
+	int ret = 0;
+	struct sync_device *obj = file->private_data;
+
+	mutex_lock(&sync_dev.lock);
+
+	if (!atomic_read(&obj->device_available)) {
+		pr_err("no device to release!!\n");
+		ret = -ENODEV;
+		goto end;
+	}
+
+	atomic_dec(&obj->device_available);
+
+	if (!atomic_read(&obj->device_available))
+		clear_fence_array_tracker(true);
+
+end:
+	mutex_unlock(&sync_dev.lock);
+	return ret;
+}
+
+static int spec_sync_ioctl_get_ver(struct sync_device *obj, unsigned long __user arg)
+{
+	uint32_t version = obj->version;
+
+	if (copy_to_user((void __user *)arg, &version, sizeof(uint32_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int spec_sync_create_array(struct fence_create_data *f)
+{
+	int fd = get_unused_fd_flags(O_CLOEXEC);
+	struct sync_file *sync_file;
+	struct dma_fence_array *fence_array;
+	struct fence_array_node *node;
+	struct dma_fence **fences;
+	struct dummy_spec_fence *dummy_fence_p = sync_dev.dummy_fence;
+	bool signal_any;
+	int i, ret = 0;
+
+	if (fd < 0) {
+		pr_err("failed to get_unused_fd_flags\n");
+		return fd;
+	}
+
+	if (f->num_fences < FENCE_MIN || f->num_fences > FENCE_MAX) {
+		pr_err("invalid arguments num_fences:%d\n", f->num_fences);
+		ret = -ERANGE;
+		goto error_args;
+	}
+
+	fences = kmalloc_array(f->num_fences, sizeof(void *), GFP_KERNEL|__GFP_ZERO);
+	if (!fences) {
+		ret = -ENOMEM;
+		goto error_args;
+	}
+
+	for (i = 0; i < f->num_fences; i++) {
+		fences[i] = &dummy_fence_p->fence;
+		/*
+		 * Increase dummy-fences refcount here, we must do this since any call to
+		 * fence-array release while dummy-fences are the children of the fence-array
+		 * will decrement the dummy_fence refcount. Therefore, to prevent the release
+		 * of the dummy_fence fences, we must keep an extra refcount for every time that
+		 * the fence-array->release can decrement its children's refcount. the extra
+		 * refcount will be decreased impilictly when dma_fence_put(&fence_array->base)
+		 * called.
+		 */
+		dma_fence_get(&dummy_fence_p->fence);
+	}
+
+	signal_any = f->flags & SPEC_FENCE_SIGNAL_ALL ? false : true;
+
+	fence_array = dma_fence_array_create(f->num_fences, fences,
+				dma_fence_context_alloc(1), 0, signal_any);
+	if (!fence_array) {
+		/* fence-array create failed,  remove extra refcounts */
+		for (i = 0; i < f->num_fences; i++)
+			dma_fence_put(&dummy_fence_p->fence);
+
+		kfree(fences);
+		ret = -EINVAL;
+		goto error_args;
+	}
+
+	/* Set the enable signal such that signalling is not done during wait*/
+	set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence_array->base.flags);
+	set_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence_array->base.flags);
+
+	sync_file = sync_file_create(&fence_array->base);
+	if (!sync_file) {
+		pr_err("sync_file_create fail\n");
+		ret = -EINVAL;
+		goto err;
+	}
+	node = kzalloc((sizeof(struct fence_array_node)), GFP_KERNEL);
+	if (!node) {
+		fput(sync_file->file);
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	fd_install(fd, sync_file->file);
+	node->fence_array = fence_array;
+
+	mutex_lock(&sync_dev.l_lock);
+	list_add_tail(&node->list, &sync_dev.fence_array_list);
+	mutex_unlock(&sync_dev.l_lock);
+
+	pr_debug("spec fd:%d num_fences:%u\n", fd, f->num_fences);
+	return fd;
+
+err:
+	dma_fence_put(&fence_array->base);
+error_args:
+	put_unused_fd(fd);
+	return ret;
+}
+
+static int spec_sync_ioctl_create_fence(struct sync_device *obj, unsigned long __user arg)
+{
+	struct fence_create_data f;
+	int fd;
+
+	if (copy_from_user(&f, (void __user *)arg, sizeof(f)))
+		return -EFAULT;
+
+	fd = spec_sync_create_array(&f);
+	if (fd < 0)
+		return fd;
+
+	f.out_bind_fd = fd;
+
+	if (copy_to_user((void __user *)arg, &f, sizeof(f)))
+		return -EFAULT;
+
+	return 0;
+}
+
+int spec_sync_wait_bind_array(struct dma_fence_array *fence_array, u32 timeout_ms)
+{
+	int ret;
+
+	/* Check if fence-array is a speculative fence */
+	if (!fence_array || !test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence_array->base.flags)) {
+		pr_err("invalid fence!\n");
+		return -EINVAL;
+	} else if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags)) {
+		/* This fence-array is already bound, just return success */
+		return 0;
+	}
+
+	/* Wait for the fence-array bind */
+	ret = wait_event_timeout(sync_dev.wait_queue,
+		test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags),
+		msecs_to_jiffies(timeout_ms));
+	if (!ret) {
+		pr_err("timed out waiting for bind fence-array %d\n", timeout_ms);
+		ret = -ETIMEDOUT;
+	} else {
+		ret = 0;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(spec_sync_wait_bind_array);
+
+static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info)
+{
+	struct dma_fence_array *fence_array;
+	struct dma_fence *fence = NULL;
+	struct dma_fence *user_fence = NULL;
+	int *user_fds, ret = 0, i;
+	u32 num_fences;
+
+	fence = sync_file_get_fence(sync_bind_info->out_bind_fd);
+	if (!fence) {
+		pr_err("dma fence failure out_fd:%d\n", sync_bind_info->out_bind_fd);
+		return -EINVAL;
+	}
+
+	if (dma_fence_is_signaled(fence)) {
+		pr_err("spec fence is already signaled, out_fd:%d\n",
+				sync_bind_info->out_bind_fd);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	fence_array = container_of(fence, struct dma_fence_array, base);
+	if (!sanitize_fence_array(fence_array)) {
+		pr_err("spec fence not found in the registered list out_fd:%d\n",
+				sync_bind_info->out_bind_fd);
+		ret = -EINVAL;
+		goto end;
+	}
+
+	num_fences = fence_array->num_fences;
+
+	for (i = 0; i < num_fences; i++) {
+		if (!(fence_array->fences[i]->context == DUMMY_CONTEXT &&
+			fence_array->fences[i]->seqno == DUMMY_SEQNO)) {
+			pr_err("fence array already populated, spec fd:%d status:%d flags:0x%x\n",
+				sync_bind_info->out_bind_fd, dma_fence_get_status(fence),
+				fence->flags);
+			ret = -EINVAL;
+			goto end;
+		}
+	}
+
+	user_fds = kzalloc(num_fences * (sizeof(int)), GFP_KERNEL);
+	if (!user_fds) {
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	if (copy_from_user(user_fds, (void __user *)sync_bind_info->fds,
+						num_fences * sizeof(int))) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	spin_lock(fence->lock);
+	for (i = 0; i < num_fences; i++) {
+		user_fence = sync_file_get_fence(user_fds[i]);
+		if (!user_fence) {
+			pr_warn("bind fences are invalid !! user_fd:%d out_bind_fd:%d\n",
+				user_fds[i], sync_bind_info->out_bind_fd);
+			ret = -EINVAL;
+			goto bind_invalid;
+		}
+		fence_array->fences[i] = user_fence;
+		/*
+		 * At this point the fence-array fully contains valid fences and no more the
+		 * dummy-fence, therefore, we must release the extra refcount that the
+		 * creation of the speculative fence added to the dummy-fence.
+		 */
+		dma_fence_put(&sync_dev.dummy_fence->fence);
+		pr_debug("spec fd:%d i:%d bind fd:%d error:%d\n", sync_bind_info->out_bind_fd,
+			 i, user_fds[i], fence_array->fences[i]->error);
+	}
+
+	clear_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
+	spin_unlock(fence->lock);
+	dma_fence_enable_sw_signaling(&fence_array->base);
+
+	clear_fence_array_tracker(false);
+
+bind_invalid:
+	set_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags);
+	wake_up_all(&sync_dev.wait_queue);
+
+	if (ret) {
+		dma_fence_set_error(fence, -EINVAL);
+		spin_unlock(fence->lock);
+		dma_fence_signal(fence);
+		clear_fence_array_tracker(false);
+	}
+out:
+	kfree(user_fds);
+end:
+	dma_fence_put(fence);
+	return ret;
+}
+
+static int spec_sync_ioctl_bind(struct sync_device *obj, unsigned long __user arg)
+{
+	struct fence_bind_data sync_bind_info;
+
+	if (copy_from_user(&sync_bind_info, (void __user *)arg, sizeof(struct fence_bind_data)))
+		return -EFAULT;
+
+	if (sync_bind_info.out_bind_fd < 0) {
+		pr_err("Invalid out_fd:%d\n", sync_bind_info.out_bind_fd);
+		return -EINVAL;
+	}
+
+	return spec_sync_bind_array(&sync_bind_info);
+}
+
+static long spec_sync_ioctl(struct file *file, unsigned int cmd,
+			  unsigned long arg)
+{
+	struct sync_device *obj = file->private_data;
+	int ret = 0;
+
+	switch (cmd) {
+	case SPEC_SYNC_IOC_CREATE_FENCE:
+		ret = spec_sync_ioctl_create_fence(obj, arg);
+		break;
+	case SPEC_SYNC_IOC_BIND:
+		ret = spec_sync_ioctl_bind(obj, arg);
+		break;
+	case SPEC_SYNC_IOC_GET_VER:
+		ret = spec_sync_ioctl_get_ver(obj, arg);
+		break;
+	default:
+		ret = -ENOTTY;
+	}
+
+	return ret;
+}
+
+const struct file_operations spec_sync_fops = {
+	.owner = THIS_MODULE,
+	.open = spec_sync_open,
+	.release = spec_sync_release,
+	.unlocked_ioctl = spec_sync_ioctl,
+};
+
+static int spec_sync_register_device(void)
+{
+	struct dummy_spec_fence *dummy_fence_p = NULL;
+	int ret;
+
+	sync_dev.dev_class = class_create(THIS_MODULE, CLASS_NAME);
+	if (sync_dev.dev_class == NULL) {
+		pr_err("%s: class_create fail.\n", __func__);
+		goto res_err;
+	}
+
+	ret = alloc_chrdev_region(&sync_dev.dev_num, 0, 1, DRV_NAME);
+	if (ret) {
+		pr_err("%s: alloc_chrdev_region fail.\n", __func__);
+		goto alloc_chrdev_region_err;
+	}
+
+	sync_dev.dev = device_create(sync_dev.dev_class, NULL,
+					 sync_dev.dev_num,
+					 &sync_dev, DRV_NAME);
+	if (IS_ERR(sync_dev.dev)) {
+		pr_err("%s: device_create fail.\n", __func__);
+		goto device_create_err;
+	}
+
+	sync_dev.cdev = cdev_alloc();
+	if (sync_dev.cdev == NULL) {
+		pr_err("%s: cdev_alloc fail.\n", __func__);
+		goto cdev_alloc_err;
+	}
+	cdev_init(sync_dev.cdev, &spec_sync_fops);
+	sync_dev.cdev->owner = THIS_MODULE;
+
+	ret = cdev_add(sync_dev.cdev, sync_dev.dev_num, 1);
+	if (ret) {
+		pr_err("%s: cdev_add fail.\n", __func__);
+		goto cdev_add_err;
+	}
+
+	sync_dev.version = DRV_VERSION;
+	mutex_init(&sync_dev.lock);
+	mutex_init(&sync_dev.l_lock);
+	INIT_LIST_HEAD(&sync_dev.fence_array_list);
+	init_waitqueue_head(&sync_dev.wait_queue);
+
+	dummy_fence_p = kzalloc(sizeof(struct dummy_spec_fence), GFP_KERNEL);
+	if (!dummy_fence_p) {
+		ret = -ENOMEM;
+		goto cdev_add_err;
+	}
+
+	spin_lock_init(&dummy_fence_p->lock);
+	dma_fence_init(&dummy_fence_p->fence, &dummy_spec_fence_ops, &dummy_fence_p->lock,
+		DUMMY_CONTEXT, DUMMY_SEQNO);
+	sync_dev.dummy_fence = dummy_fence_p;
+
+	return 0;
+
+cdev_add_err:
+	cdev_del(sync_dev.cdev);
+cdev_alloc_err:
+	device_destroy(sync_dev.dev_class, sync_dev.dev_num);
+device_create_err:
+	unregister_chrdev_region(sync_dev.dev_num, 1);
+alloc_chrdev_region_err:
+	class_destroy(sync_dev.dev_class);
+res_err:
+	return -ENODEV;
+}
+
+static int __init spec_sync_init(void)
+{
+	int ret = 0;
+
+	ret = spec_sync_register_device();
+	if (ret) {
+		pr_err("%s: speculative sync driver register fail.\n", __func__);
+		return ret;
+	}
+	return ret;
+}
+
+static void __exit spec_sync_deinit(void)
+{
+	cdev_del(sync_dev.cdev);
+	device_destroy(sync_dev.dev_class, sync_dev.dev_num);
+	unregister_chrdev_region(sync_dev.dev_num, 1);
+	class_destroy(sync_dev.dev_class);
+	dma_fence_put(&sync_dev.dummy_fence->fence);
+}
+
+module_init(spec_sync_init);
+module_exit(spec_sync_deinit);
+
+MODULE_DESCRIPTION("QCOM Speculative Sync Driver");
+MODULE_LICENSE("GPL v2");