Prechádzať zdrojové kódy

Add 'qcom/opensource/synx-kernel/' from commit '2657c18a7869feec83f4383bf72623b8d6a2ef18'

git-subtree-dir: qcom/opensource/synx-kernel
git-subtree-mainline: fe7b3b613ffabc21692c25f237ce62a9e8363d92
git-subtree-split: 2657c18a7869feec83f4383bf72623b8d6a2ef18
Change-Id:
repo: https://git.codelinaro.org/clo/la/platform/vendor/opensource/synx-kernel
tag: LA.VENDOR.14.3.0.r1-17300-lanai.QSSI15.0
David Wronek 5 mesiacov pred
rodič
commit
51ff30338b
33 zmenil súbory, kde vykonal 12646 pridanie a 0 odobranie
  1. 5 0
      qcom/opensource/synx-kernel/Android.bp
  2. 82 0
      qcom/opensource/synx-kernel/Android.mk
  3. 27 0
      qcom/opensource/synx-kernel/BUILD.bazel
  4. 6 0
      qcom/opensource/synx-kernel/Kbuild
  5. 11 0
      qcom/opensource/synx-kernel/Makefile
  6. 5 0
      qcom/opensource/synx-kernel/config/pineapplesynx.conf
  7. 6 0
      qcom/opensource/synx-kernel/config/pineapplesynxconf.h
  8. 18 0
      qcom/opensource/synx-kernel/dt-bindings/ipclite-signals.h
  9. 326 0
      qcom/opensource/synx-kernel/include/uapi/synx/media/synx_header.h
  10. 38 0
      qcom/opensource/synx-kernel/msm/Kbuild
  11. 8 0
      qcom/opensource/synx-kernel/msm/Makefile
  12. 1881 0
      qcom/opensource/synx-kernel/msm/synx/ipclite.c
  13. 443 0
      qcom/opensource/synx-kernel/msm/synx/ipclite.h
  14. 205 0
      qcom/opensource/synx-kernel/msm/synx/ipclite_client.h
  15. 2946 0
      qcom/opensource/synx-kernel/msm/synx/synx.c
  16. 590 0
      qcom/opensource/synx-kernel/msm/synx/synx_api.h
  17. 203 0
      qcom/opensource/synx-kernel/msm/synx/synx_debugfs.c
  18. 144 0
      qcom/opensource/synx-kernel/msm/synx/synx_debugfs.h
  19. 497 0
      qcom/opensource/synx-kernel/msm/synx/synx_debugfs_util.c
  20. 39 0
      qcom/opensource/synx-kernel/msm/synx/synx_debugfs_util.h
  21. 27 0
      qcom/opensource/synx-kernel/msm/synx/synx_err.h
  22. 916 0
      qcom/opensource/synx-kernel/msm/synx/synx_global.c
  23. 305 0
      qcom/opensource/synx-kernel/msm/synx/synx_global.h
  24. 249 0
      qcom/opensource/synx-kernel/msm/synx/synx_private.h
  25. 1685 0
      qcom/opensource/synx-kernel/msm/synx/synx_util.c
  26. 188 0
      qcom/opensource/synx-kernel/msm/synx/synx_util.h
  27. 1455 0
      qcom/opensource/synx-kernel/msm/synx/test/ipclite_test.c
  28. 118 0
      qcom/opensource/synx-kernel/msm/synx/test/ipclite_test.h
  29. 16 0
      qcom/opensource/synx-kernel/pineapple.bzl
  30. 28 0
      qcom/opensource/synx-kernel/synx_kernel_board.mk
  31. 18 0
      qcom/opensource/synx-kernel/synx_kernel_product.mk
  32. 128 0
      qcom/opensource/synx-kernel/synx_module_build.bzl
  33. 33 0
      qcom/opensource/synx-kernel/synx_modules.bzl

+ 5 - 0
qcom/opensource/synx-kernel/Android.bp

@@ -0,0 +1,5 @@
+cc_library_headers {
+    name: "qti_synx_kernel_headers",
+    export_include_dirs: ["include/uapi/synx/media"],
+    vendor_available: true
+}

+ 82 - 0
qcom/opensource/synx-kernel/Android.mk

@@ -0,0 +1,82 @@
+TARGET_SYNX_ENABLE := false
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true)
+	ifeq ($(TARGET_KERNEL_DLKM_SYNX_OVERRIDE),true)
+		TARGET_SYNX_ENABLE := true
+	endif
+else
+TARGET_SYNX_ENABLE := true
+endif
+
+ifneq (,$(call is-board-platform-in-list2,volcano))
+TARGET_SYNX_ENABLE := false
+endif
+ifneq (,$(call is-board-platform-in-list2,pitti))
+TARGET_SYNX_ENABLE := false
+endif
+ifeq ($(TARGET_SYNX_ENABLE),true)
+SYNX_BLD_DIR := $(TOP)/vendor/qcom/opensource/synx-kernel
+
+
+# Build synx-driver.ko
+###########################################################
+# This is set once per LOCAL_PATH, not per (kernel) module
+KBUILD_OPTIONS := SYNX_ROOT=$(SYNX_BLD_DIR)
+KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
+###########################################################
+
+DLKM_DIR   := $(TOP)/device/qcom/common/dlkm
+
+LOCAL_PATH := $(call my-dir)
+LOCAL_MODULE_DDK_BUILD := true
+LOCAL_MODULE_KO_DIRS := msm/synx/synx-driver.ko msm/synx/ipclite.ko msm/synx/test/ipclite_test.ko
+
+include $(CLEAR_VARS)
+# For incremental compilation
+LOCAL_SRC_FILES           := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+LOCAL_MODULE              := synx-driver-symvers
+LOCAL_MODULE_KBUILD_NAME  := Module.symvers
+#LOCAL_MODULE_STEM         := Module.symvers
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+# Include kp_module.ko in the /vendor/lib/modules (vendor.img)
+# BOARD_VENDOR_KERNEL_MODULES += $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE)
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+
+include $(CLEAR_VARS)
+# For incremental compilation
+LOCAL_SRC_FILES   := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+$(info LOCAL_SRC_FILES = $(LOCAL_SRC_FILES))
+LOCAL_MODULE      := synx-driver.ko
+LOCAL_MODULE_KBUILD_NAME := msm/synx/synx-driver.ko
+LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+
+
+include $(CLEAR_VARS)
+# For incremental compilation
+LOCAL_SRC_FILES   := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+$(info LOCAL_SRC_FILES = $(LOCAL_SRC_FILES))
+LOCAL_MODULE      := ipclite.ko
+LOCAL_MODULE_KBUILD_NAME := msm/synx/ipclite.ko
+LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
+#BOARD_VENDOR_KERNEL_MODULES += $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE)
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+
+include $(CLEAR_VARS)
+# For incremental compilation
+LOCAL_SRC_FILES   := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+$(info LOCAL_SRC_FILES = $(LOCAL_SRC_FILES))
+LOCAL_MODULE      := ipclite_test.ko
+LOCAL_MODULE_KBUILD_NAME := msm/synx/test/ipclite_test.ko
+LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
+#BOARD_VENDOR_KERNEL_MODULES += $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE)
+
+# print out variables
+$(info KBUILD_OPTIONS = $(KBUILD_OPTIONS))
+$(info LOCAL_ADDITIONAL_DEPENDENCY = $(LOCAL_ADDITIONAL_DEPENDENCY))
+$(info LOCAL_ADDITIONAL_DEPENDENCIES = $(LOCAL_ADDITIONAL_DEPENDENCIES))
+$(info LOCAL_REQUIRED_MODULES = $(LOCAL_REQUIRED_MODULES))
+$(info DLKM_DIR = $(DLKM_DIR))
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+
+
+endif # End of check for TARGET_SYNX_ENABLE

+ 27 - 0
qcom/opensource/synx-kernel/BUILD.bazel

@@ -0,0 +1,27 @@
+package(
+    default_visibility = [
+        "//visibility:public",
+    ],
+)
+
+load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
+
+driver_header_globs = [
+    "include/uapi/synx/**/*.h",
+    "include/uapi/synx/media/**/*.h",
+    "msm/synx/**/*.h",
+]
+
+ddk_headers(
+    name = "synx_headers",
+    hdrs = glob(driver_header_globs),
+    includes = [
+        "include/uapi/synx",
+        "include/uapi/synx/media",
+        "msm/synx",
+    ],
+)
+
+load(":pineapple.bzl", "define_pineapple")
+
+define_pineapple()

+ 6 - 0
qcom/opensource/synx-kernel/Kbuild

@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+CONFIG_BUILD_VENDORSI := true
+
+# auto-detect subdirs
+obj-y +=msm/

+ 11 - 0
qcom/opensource/synx-kernel/Makefile

@@ -0,0 +1,11 @@
+KBUILD_OPTIONS+= SYNX_ROOT=$(KERNEL_SRC)/$(M)
+
+all:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS)
+modules_install:
+	$(MAKE) M=$(M) -C $(KERNEL_SRC) modules_install
+%:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
+clean:
+	rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
+	rm -rf .tmp_versions

+ 5 - 0
qcom/opensource/synx-kernel/config/pineapplesynx.conf

@@ -0,0 +1,5 @@
+ifeq ($(CONFIG_QGKI),y)
+export TARGET_SYNX_ENABLE=y
+else
+export TARGET_SYNX_ENABLE=m
+endif

+ 6 - 0
qcom/opensource/synx-kernel/config/pineapplesynxconf.h

@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define TARGET_SYNX_ENABLE 1

+ 18 - 0
qcom/opensource/synx-kernel/dt-bindings/ipclite-signals.h

@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_IPCLITE_SIGNALS_H
+#define __DT_BINDINGS_IPCLITE_SIGNALS_H
+
+/* Signal IDs for COMPUTE_L0 protocol */
+#define IPCC_COMPUTE_L0_SIGNAL_IPCLITE_MSG			0
+#define IPCC_COMPUTE_L0_SIGNAL_IPCLITE_MEM_INIT		1
+#define IPCC_COMPUTE_L0_SIGNAL_IPCLITE_VERSION		2
+#define IPCC_COMPUTE_L0_SIGNAL_IPCLITE_TEST			3
+#define IPCC_COMPUTE_L0_SIGNAL_IPCLITE_SSR			4
+#define IPCC_COMPUTE_L0_SIGNAL_IPCLITE_DEBUG		5
+
+#endif

+ 326 - 0
qcom/opensource/synx-kernel/include/uapi/synx/media/synx_header.h

@@ -0,0 +1,326 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __UAPI_SYNX_H__
+#define __UAPI_SYNX_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* Size of opaque payload sent to kernel for safekeeping until signal time */
+#define SYNX_USER_PAYLOAD_SIZE               4
+
+#define SYNX_MAX_WAITING_SYNX                16
+
+#define SYNX_CALLBACK_RESULT_SUCCESS         2
+#define SYNX_CALLBACK_RESULT_FAILED          3
+#define SYNX_CALLBACK_RESULT_CANCELED        4
+
+/**
+ * struct synx_info - Sync object creation information
+ *
+ * @name     : Optional string representation of the synx object
+ * @synx_obj : Sync object returned after creation in kernel
+ */
+struct synx_info {
+	char name[64];
+	__s32 synx_obj;
+};
+
+/**
+ * struct synx_userpayload_info - Payload info from user space
+ *
+ * @synx_obj:   Sync object for which payload has to be registered for
+ * @reserved:   Reserved
+ * @payload:    Pointer to user payload
+ */
+struct synx_userpayload_info {
+	__s32 synx_obj;
+	__u32 reserved;
+	__u64 payload[SYNX_USER_PAYLOAD_SIZE];
+};
+
+/**
+ * struct synx_signal - Sync object signaling struct
+ *
+ * @synx_obj   : Sync object to be signaled
+ * @synx_state : State of the synx object to which it should be signaled
+ */
+struct synx_signal {
+	__s32 synx_obj;
+	__u32 synx_state;
+};
+
+/**
+ * struct synx_merge - Merge information for synx objects
+ *
+ * @synx_objs :  Pointer to synx object array to merge
+ * @num_objs  :  Number of objects in the array
+ * @merged    :  Merged synx object
+ */
+struct synx_merge {
+	__u64 synx_objs;
+	__u32 num_objs;
+	__s32 merged;
+};
+
+/**
+ * struct synx_wait - Sync object wait information
+ *
+ * @synx_obj   : Sync object to wait on
+ * @reserved   : Reserved
+ * @timeout_ms : Timeout in milliseconds
+ */
+struct synx_wait {
+	__s32 synx_obj;
+	__u32 reserved;
+	__u64 timeout_ms;
+};
+
+/**
+ * struct synx_external_desc - info of external sync object
+ *
+ * @type     : Synx type
+ * @reserved : Reserved
+ * @id       : Sync object id
+ *
+ */
+struct synx_external_desc {
+	__u32 type;
+	__u32 reserved;
+	__s32 id[2];
+};
+
+/**
+ * struct synx_bind - info for binding two synx objects
+ *
+ * @synx_obj      : Synx object
+ * @Reserved      : Reserved
+ * @ext_sync_desc : External synx to bind to
+ *
+ */
+struct synx_bind {
+	__s32 synx_obj;
+	__u32 reserved;
+	struct synx_external_desc ext_sync_desc;
+};
+
+/**
+ * struct synx_addrefcount - info for refcount increment
+ *
+ * @synx_obj : Synx object
+ * @count    : Count to increment
+ *
+ */
+struct synx_addrefcount {
+	__s32 synx_obj;
+	__u32 count;
+};
+
+/**
+ * struct synx_id_info - info for import and export of a synx object
+ *
+ * @synx_obj     : Synx object to be exported
+ * @secure_key   : Secure key created in export and used in import
+ * @new_synx_obj : Synx object created in import
+ *
+ */
+struct synx_id_info {
+	__s32 synx_obj;
+	__u32 secure_key;
+	__s32 new_synx_obj;
+	__u32 padding;
+};
+
+/**
+ * struct synx_fence_desc - info of external fence object
+ *
+ * @type     : Fence type
+ * @reserved : Reserved
+ * @id       : Fence object id
+ *
+ */
+struct synx_fence_desc {
+	__u32 type;
+	__u32 reserved;
+	__s32 id[2];
+};
+
+/**
+ * struct synx_create - Sync object creation information
+ *
+ * @name     : Optional string representation of the synx object
+ * @synx_obj : Synx object allocated
+ * @flags    : Create flags
+ * @desc     : External fence desc
+ */
+struct synx_create_v2 {
+	char name[64];
+	__u32 synx_obj;
+	__u32 flags;
+	struct synx_fence_desc desc;
+};
+
+/**
+ * struct synx_userpayload_info - Payload info from user space
+ *
+ * @synx_obj  : Sync object for which payload has to be registered for
+ * @reserved  : Reserved
+ * @payload   : Pointer to user payload
+ */
+struct synx_userpayload_info_v2 {
+	__u32 synx_obj;
+	__u32 reserved;
+	__u64 payload[SYNX_USER_PAYLOAD_SIZE];
+};
+
+/**
+ * struct synx_signal - Sync object signaling struct
+ *
+ * @synx_obj   : Sync object to be signaled
+ * @synx_state : State of the synx object to which it should be signaled
+ * @reserved   : Reserved
+ */
+struct synx_signal_v2 {
+	__u32 synx_obj;
+	__u32 synx_state;
+	__u64 reserved;
+};
+
+/**
+ * struct synx_merge - Merge information for synx objects
+ *
+ * @synx_objs :  Pointer to synx object array to merge
+ * @num_objs  :  Number of objects in the array
+ * @merged    :  Merged synx object
+ * @flags     :  Merge flags
+ * @reserved  :  Reserved
+ */
+struct synx_merge_v2 {
+	__u64 synx_objs;
+	__u32 num_objs;
+	__u32 merged;
+	__u32 flags;
+	__u32 reserved;
+};
+
+/**
+ * struct synx_wait - Sync object wait information
+ *
+ * @synx_obj   : Sync object to wait on
+ * @reserved   : Reserved
+ * @timeout_ms : Timeout in milliseconds
+ */
+struct synx_wait_v2 {
+	__u32 synx_obj;
+	__u32 reserved;
+	__u64 timeout_ms;
+};
+
+/**
+ * struct synx_external_desc - info of external sync object
+ *
+ * @type     : Synx type
+ * @reserved : Reserved
+ * @id       : Sync object id
+ *
+ */
+struct synx_external_desc_v2 {
+	__u64 id;
+	__u32 type;
+	__u32 reserved;
+};
+
+/**
+ * struct synx_bind - info for binding two synx objects
+ *
+ * @synx_obj      : Synx object
+ * @Reserved      : Reserved
+ * @ext_sync_desc : External synx to bind to
+ *
+ */
+struct synx_bind_v2 {
+	__u32 synx_obj;
+	__u32 reserved;
+	struct synx_external_desc_v2 ext_sync_desc;
+};
+
+/**
+ * struct synx_import_info - import info
+ *
+ * @synx_obj     : Synx handle to be imported
+ * @flags        : Import flags
+ * @new_synx_obj : Synx object created in import
+ * @reserved     : Reserved
+ * @desc         : External fence descriptor
+ */
+struct synx_import_info {
+	__u32 synx_obj;
+	__u32 flags;
+	__u32 new_synx_obj;
+	__u32 reserved;
+	struct synx_fence_desc desc;
+};
+
+/**
+ * struct synx_import_arr_info - import list info
+ *
+ * @list     : List of synx_import_info
+ * @num_objs : No of fences to import
+ */
+struct synx_import_arr_info {
+	__u64 list;
+	__u32 num_objs;
+};
+
+/**
+ * struct synx_fence_fd - get fd for synx fence
+ *
+ * @synx_obj : Synx handle
+ * @fd       : fd for synx handle fence
+ */
+struct synx_fence_fd {
+	__u32 synx_obj;
+	__s32 fd;
+};
+
+/**
+ * struct synx_private_ioctl_arg - Sync driver ioctl argument
+ *
+ * @id        : IOCTL command id
+ * @size      : Size of command payload
+ * @result    : Result of command execution
+ * @reserved  : Reserved
+ * @ioctl_ptr : Pointer to user data
+ */
+struct synx_private_ioctl_arg {
+	__u32 id;
+	__u32 size;
+	__u32 result;
+	__u32 reserved;
+	__u64 ioctl_ptr;
+};
+
+#define SYNX_PRIVATE_MAGIC_NUM 's'
+
+#define SYNX_PRIVATE_IOCTL_CMD \
+	_IOWR(SYNX_PRIVATE_MAGIC_NUM, 130, struct synx_private_ioctl_arg)
+
+#define SYNX_CREATE                          0
+#define SYNX_RELEASE                         1
+#define SYNX_SIGNAL                          2
+#define SYNX_MERGE                           3
+#define SYNX_REGISTER_PAYLOAD                4
+#define SYNX_DEREGISTER_PAYLOAD              5
+#define SYNX_WAIT                            6
+#define SYNX_BIND                            7
+#define SYNX_ADDREFCOUNT                     8
+#define SYNX_GETSTATUS                       9
+#define SYNX_IMPORT                          10
+#define SYNX_EXPORT                          11
+#define SYNX_IMPORT_ARR                      12
+#define SYNX_GETFENCE_FD                     13
+
+#endif /* __UAPI_SYNX_H__ */

+ 38 - 0
qcom/opensource/synx-kernel/msm/Kbuild

@@ -0,0 +1,38 @@
+LINUXINCLUDE    += -I$(SYNX_ROOT)/include \
+                   -I$(SYNX_ROOT)/include/uapi \
+				   -I$(SYNX_ROOT)/include/uapi/synx/media
+
+ccflags-y += -I$(SYNX_ROOT)/msm/synx/
+
+# add flag to compile mmrm actual implementatio instead of stub version.
+# to follow up with mmrm team if techpack users need to define this for long term?
+#KBUILD_CPPFLAGS += -DCONFIG_MSM_MMRM
+
+# ported from Android.mk
+$(info within KBUILD file KBUILD_EXTRA_SYMBOLS = $(KBUILD_EXTRA_SYMBOLS))
+
+ifeq ($(CONFIG_ARCH_WAIPIO), y)
+$(info within KBUILD file CONFIG_ARCH_WAIPIO = $(CONFIG_ARCH_WAIPIO))
+# include $(SYNX_ROOT)/config/waipio.mk
+KBUILD_CPPFLAGS += -DCONFIG_SYNX_WAIPIO=1
+ccflags-y += -DCONFIG_SYNX_WAIPIO=1
+endif
+
+ifeq ($(CONFIG_ARCH_KALAMA), y)
+$(info within KBUILD file CONFIG_ARCH_KALAMA = $(CONFIG_ARCH_KALAMA))
+# include $(SYNX_ROOT)/config/waipio.mk
+KBUILD_CPPFLAGS += -DCONFIG_SYNX_KALAMA=1
+ccflags-y += -DCONFIG_SYNX_KALAMA=1
+endif
+
+ifeq ($(CONFIG_ARCH_PINEAPPLE), y)
+$(info within KBUILD file CONFIG_ARCH_PINEAPPLE = $(CONFIG_ARCH_PINEAPPLE))
+# include $(SYNX_ROOT)/config/pineapple.mk
+KBUILD_CPPFLAGS += -DCONFIG_SYNX_PINEAPPLE=1
+ccflags-y += -DCONFIG_SYNX_PINEAPPLE=1
+endif
+
+obj-m += synx-driver.o
+obj-m += synx/ipclite.o
+obj-m += synx/test/ipclite_test.o
+synx-driver-objs := synx/synx.o synx/synx_global.o synx/synx_util.o synx/synx_debugfs.o

+ 8 - 0
qcom/opensource/synx-kernel/msm/Makefile

@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+CONFIG_DEBUGFS_SYNX = false
+
+obj-m += synx/ipclite.o
+obj-m += synx-driver.o
+synx-driver-objs := synx/synx.o synx/synx_util.o synx/synx_debugfs.o synx/synx_global.o
+synx-driver-$(CONFIG_DEBUGFS_SYNX) += synx/synx_debugfs_util.o

+ 1881 - 0
qcom/opensource/synx-kernel/msm/synx/ipclite.c

@@ -0,0 +1,1881 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/platform_device.h>
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <asm/memory.h>
+#include <linux/sizes.h>
+
+#include <linux/hwspinlock.h>
+
+#include <linux/sysfs.h>
+
+#include "ipclite_client.h"
+#include "ipclite.h"
+
+static struct ipclite_info *ipclite;
+static struct ipclite_client synx_client;
+static struct ipclite_client test_client;
+static struct ipclite_debug_info *ipclite_dbg_info;
+static struct ipclite_debug_struct *ipclite_dbg_struct;
+static struct ipclite_debug_inmem_buf *ipclite_dbg_inmem;
+static struct mutex ssr_mutex;
+static struct kobject *sysfs_kobj;
+
+static uint32_t ipclite_debug_level = IPCLITE_ERR | IPCLITE_WARN | IPCLITE_INFO;
+static uint32_t ipclite_debug_control = IPCLITE_DMESG_LOG, ipclite_debug_dump;
+static uint32_t enabled_hosts, partitions, major_ver, minor_ver;
+static uint64_t feature_mask;
+
+static inline bool is_host_enabled(uint32_t host)
+{
+	return (1U & (enabled_hosts >> host));
+}
+
+static inline bool is_loopback_except_apps(uint32_t h0, uint32_t h1)
+{
+	return (h0 == h1 && h0 != IPCMEM_APPS);
+}
+
+static void ipclite_inmem_log(const char *psztStr, ...)
+{
+	uint32_t local_index = 0;
+	va_list pArgs;
+
+	va_start(pArgs, psztStr);
+
+	/* Incrementing the index atomically and storing the index in local variable */
+	local_index = ipclite_global_atomic_inc((ipclite_atomic_int32_t *)
+							&ipclite_dbg_info->debug_log_index);
+	local_index %= IPCLITE_LOG_BUF_SIZE;
+
+	/* Writes data on the index location */
+	vsnprintf(ipclite_dbg_inmem->IPCLITELog[local_index], IPCLITE_LOG_MSG_SIZE, psztStr, pArgs);
+
+	va_end(pArgs);
+}
+
+static void ipclite_dump_debug_struct(void)
+{
+	int i = 0, host = 0;
+	struct ipclite_debug_struct *temp_dbg_struct;
+
+	/* Check if debug structures are initialized */
+	if (!ipclite_dbg_info || !ipclite_dbg_struct) {
+		pr_err("Debug Structures not initialized\n");
+		return;
+	}
+
+	/* Check if debug structures are enabled before printing */
+	if (!(IS_DEBUG_CONFIG(IPCLITE_DBG_STRUCT))) {
+		pr_err("Debug Structures not enabled\n");
+		return;
+	}
+
+	/* Dumping the debug structures */
+	pr_info("------------------- Dumping IPCLite Debug Structure -------------------\n");
+
+	for (host = 0; host < IPCMEM_NUM_HOSTS; host++) {
+		if (!is_host_enabled(host))
+			continue;
+		temp_dbg_struct = (struct ipclite_debug_struct *)
+					(((char *)ipclite_dbg_struct) +
+					(sizeof(*temp_dbg_struct) * host));
+
+		pr_info("---------- Host ID: %d dbg_mem:%p ----------\n",
+				host, temp_dbg_struct);
+		pr_info("Total Signals Sent : %d Total Signals Received : %d\n",
+				temp_dbg_struct->dbg_info_overall.total_numsig_sent,
+				temp_dbg_struct->dbg_info_overall.total_numsig_recv);
+		pr_info("Last Signal Sent to Host ID : %d Last Signal Received from Host ID : %d\n",
+				temp_dbg_struct->dbg_info_overall.last_sent_host_id,
+				temp_dbg_struct->dbg_info_overall.last_recv_host_id);
+		pr_info("Last Signal ID Sent : %d Last Signal ID Received : %d\n",
+				temp_dbg_struct->dbg_info_overall.last_sigid_sent,
+				temp_dbg_struct->dbg_info_overall.last_sigid_recv);
+
+		for (i = 0; i < IPCMEM_NUM_HOSTS; i++) {
+			if (!is_host_enabled(i))
+				continue;
+			pr_info("----------> Host ID : %d Host ID : %d\n", host, i);
+			pr_info("No. of Messages Sent : %d No. of Messages Received : %d\n",
+			temp_dbg_struct->dbg_info_host[i].numsig_sent,
+			temp_dbg_struct->dbg_info_host[i].numsig_recv);
+			pr_info("No. of Interrupts Received : %d\n",
+			temp_dbg_struct->dbg_info_host[i].num_intr);
+			pr_info("TX Write Index : %d TX Read Index : %d\n",
+			temp_dbg_struct->dbg_info_host[i].tx_wr_index,
+			temp_dbg_struct->dbg_info_host[i].tx_rd_index);
+			pr_info("TX Write Index[0] : %d TX Read Index[0] : %d\n",
+			temp_dbg_struct->dbg_info_host[i].prev_tx_wr_index[0],
+			temp_dbg_struct->dbg_info_host[i].prev_tx_rd_index[0]);
+			pr_info("TX Write Index[1] : %d TX Read Index[1] : %d\n",
+			temp_dbg_struct->dbg_info_host[i].prev_tx_wr_index[1],
+			temp_dbg_struct->dbg_info_host[i].prev_tx_rd_index[1]);
+			pr_info("RX Write Index : %d RX Read Index : %d\n",
+			temp_dbg_struct->dbg_info_host[i].rx_wr_index,
+			temp_dbg_struct->dbg_info_host[i].rx_rd_index);
+			pr_info("RX Write Index[0] : %d RX Read Index[0] : %d\n",
+			temp_dbg_struct->dbg_info_host[i].prev_rx_wr_index[0],
+			temp_dbg_struct->dbg_info_host[i].prev_rx_rd_index[0]);
+			pr_info("RX Write Index[1] : %d RX Read Index[1] : %d\n",
+			temp_dbg_struct->dbg_info_host[i].prev_rx_wr_index[1],
+			temp_dbg_struct->dbg_info_host[i].prev_rx_rd_index[1]);
+		}
+	}
+	return;
+}
+
+static void ipclite_dump_inmem_logs(void)
+{
+	int i = 0;
+	uint32_t local_index = 0;
+
+	/* Check if debug and inmem structures are initialized */
+	if (!ipclite_dbg_info || !ipclite_dbg_inmem) {
+		pr_err("Debug structures not initialized\n");
+		return;
+	}
+
+	/* Check if debug structures are enabled before printing */
+	if (!(IS_DEBUG_CONFIG(IPCLITE_INMEM_LOG))) {
+		pr_err("In-Memory Logs not enabled\n");
+		return;
+	}
+
+	/* Dumping the debug in-memory logs */
+	pr_info("------------------- Dumping In-Memory Logs -------------------\n");
+
+	/* Storing the index atomically in local variable */
+	local_index = ipclite_global_atomic_load_u32((ipclite_atomic_uint32_t *)
+							&ipclite_dbg_info->debug_log_index);
+
+	/* Printing from current index till the end of buffer */
+	for (i = local_index % IPCLITE_LOG_BUF_SIZE; i < IPCLITE_LOG_BUF_SIZE; i++) {
+		if (ipclite_dbg_inmem->IPCLITELog[i][0])
+			pr_info("%s\n", ipclite_dbg_inmem->IPCLITELog[i]);
+	}
+
+	/* Printing from 0th index to current-1 index */
+	for (i = 0; i < local_index % IPCLITE_LOG_BUF_SIZE; i++) {
+		if (ipclite_dbg_inmem->IPCLITELog[i][0])
+			pr_info("%s\n", ipclite_dbg_inmem->IPCLITELog[i]);
+	}
+
+	return;
+}
+
+int ipclite_hw_mutex_acquire(void)
+{
+	int ret;
+
+	if (unlikely(!ipclite)) {
+		pr_err("IPCLite not initialized\n");
+		return -ENOMEM;
+	}
+	ret = hwspin_lock_timeout_irqsave(ipclite->hwlock,
+					HWSPINLOCK_TIMEOUT, &ipclite->hw_mutex_flags);
+	if (ret) {
+		pr_err("Hw mutex lock acquire failed\n");
+		return ret;
+	}
+	ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_APPS;
+	return ret;
+}
+EXPORT_SYMBOL(ipclite_hw_mutex_acquire);
+
+int ipclite_hw_mutex_release(void)
+{
+	if (unlikely(!ipclite)) {
+		pr_err("IPCLite not initialized\n");
+		return -ENOMEM;
+	}
+	if (ipclite->ipcmem.toc_data.host_info->hwlock_owner != IPCMEM_APPS)
+		return -EINVAL;
+
+	ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST;
+	hwspin_unlock_irqrestore(ipclite->hwlock, &ipclite->hw_mutex_flags);
+	return 0;
+}
+EXPORT_SYMBOL(ipclite_hw_mutex_release);
+
+/* Atomic Functions Start */
+void ipclite_atomic_init_u32(ipclite_atomic_uint32_t *addr, uint32_t data)
+{
+	BUG_ON(addr == NULL);
+
+	atomic_set(addr, data);
+}
+EXPORT_SYMBOL(ipclite_atomic_init_u32);
+
+void ipclite_atomic_init_i32(ipclite_atomic_int32_t *addr, int32_t data)
+{
+	BUG_ON(addr == NULL);
+
+	atomic_set(addr, data);
+}
+EXPORT_SYMBOL(ipclite_atomic_init_i32);
+
+void ipclite_global_atomic_store_u32(ipclite_atomic_uint32_t *addr, uint32_t data)
+{
+	BUG_ON(addr == NULL);
+
+	ATOMIC_HW_MUTEX_ACQUIRE;
+
+	atomic_set(addr, data);
+
+	ATOMIC_HW_MUTEX_RELEASE;
+}
+EXPORT_SYMBOL(ipclite_global_atomic_store_u32);
+
+void ipclite_global_atomic_store_i32(ipclite_atomic_int32_t *addr, int32_t data)
+{
+	BUG_ON(addr == NULL);
+
+	ATOMIC_HW_MUTEX_ACQUIRE;
+
+	atomic_set(addr, data);
+
+	ATOMIC_HW_MUTEX_RELEASE;
+}
+EXPORT_SYMBOL(ipclite_global_atomic_store_i32);
+
+uint32_t ipclite_global_atomic_load_u32(ipclite_atomic_uint32_t *addr)
+{
+	uint32_t ret = 0;
+
+	BUG_ON(addr == NULL);
+
+	ATOMIC_HW_MUTEX_ACQUIRE;
+
+	ret = atomic_read(addr);
+
+	ATOMIC_HW_MUTEX_RELEASE;
+
+	return ret;
+}
+EXPORT_SYMBOL(ipclite_global_atomic_load_u32);
+
+int32_t ipclite_global_atomic_load_i32(ipclite_atomic_int32_t *addr)
+{
+	int32_t ret = 0;
+
+	BUG_ON(addr == NULL);
+
+	ATOMIC_HW_MUTEX_ACQUIRE;
+
+	ret = atomic_read(addr);
+
+	ATOMIC_HW_MUTEX_RELEASE;
+
+	return ret;
+}
+EXPORT_SYMBOL(ipclite_global_atomic_load_i32);
+
+uint32_t ipclite_global_test_and_set_bit(uint32_t nr, ipclite_atomic_uint32_t *addr)
+{
+	uint32_t ret = 0, mask = (1 << nr);
+
+	BUG_ON(addr == NULL);
+
+	ATOMIC_HW_MUTEX_ACQUIRE;
+
+	ret = atomic_fetch_or(mask, addr);
+
+	ATOMIC_HW_MUTEX_RELEASE;
+
+	return ret;
+}
+EXPORT_SYMBOL(ipclite_global_test_and_set_bit);
+
+uint32_t ipclite_global_test_and_clear_bit(uint32_t nr, ipclite_atomic_uint32_t *addr)
+{
+	uint32_t ret = 0, mask = (1 << nr);
+
+	BUG_ON(addr == NULL);
+
+	ATOMIC_HW_MUTEX_ACQUIRE;
+
+	ret = atomic_fetch_and(~mask, addr);
+
+	ATOMIC_HW_MUTEX_RELEASE;
+
+	return ret;
+}
+EXPORT_SYMBOL(ipclite_global_test_and_clear_bit);
+
+int32_t ipclite_global_atomic_inc(ipclite_atomic_int32_t *addr)
+{
+	int32_t ret = 0;
+
+	BUG_ON(addr == NULL);
+
+	ATOMIC_HW_MUTEX_ACQUIRE;
+
+	ret = atomic_fetch_add(1, addr);
+
+	ATOMIC_HW_MUTEX_RELEASE;
+
+	return ret;
+}
+EXPORT_SYMBOL(ipclite_global_atomic_inc);
+
+int32_t ipclite_global_atomic_dec(ipclite_atomic_int32_t *addr)
+{
+	int32_t ret = 0;
+
+	BUG_ON(addr == NULL);
+
+	ATOMIC_HW_MUTEX_ACQUIRE;
+
+	ret = atomic_fetch_sub(1, addr);
+
+	ATOMIC_HW_MUTEX_RELEASE;
+
+	return ret;
+}
+EXPORT_SYMBOL(ipclite_global_atomic_dec);
+/* Atomic Functions End */
+
+static size_t ipcmem_rx_avail(struct ipclite_fifo *rx_fifo)
+{
+	size_t len = 0;
+	u32 head = 0, tail = 0;
+
+	head = le32_to_cpu(*rx_fifo->head);
+	tail = le32_to_cpu(*rx_fifo->tail);
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "head=%d, tail=%d\n", head, tail);
+
+	if (head < tail)
+		len = rx_fifo->length - tail + head;
+	else
+		len = head - tail;
+
+	if (WARN_ON_ONCE(len > rx_fifo->length))
+		len = 0;
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "len=%d\n", len);
+
+	return len;
+}
+
+static void ipcmem_rx_peak(struct ipclite_fifo *rx_fifo,
+			       void *data, size_t count)
+{
+	size_t len = 0;
+	u32 tail = 0;
+
+	tail = le32_to_cpu(*rx_fifo->tail);
+
+	if (WARN_ON_ONCE(tail > rx_fifo->length))
+		return;
+
+	if (tail >= rx_fifo->length)
+		tail -= rx_fifo->length;
+
+	len = min_t(size_t, count, rx_fifo->length - tail);
+	if (len)
+		memcpy_fromio(data, rx_fifo->fifo + tail, len);
+
+	if (len != count)
+		memcpy_fromio(data + len, rx_fifo->fifo, (count - len));
+}
+
+static void ipcmem_rx_advance(struct ipclite_fifo *rx_fifo,
+				  size_t count, uint32_t core_id)
+{
+	u32 tail = 0;
+
+	tail = le32_to_cpu(*rx_fifo->tail);
+
+	tail += count;
+	if (tail >= rx_fifo->length)
+		tail %= rx_fifo->length;
+
+	*rx_fifo->tail = cpu_to_le32(tail);
+
+	/* Storing the debug data in debug structures */
+	if (IS_DEBUG_CONFIG(IPCLITE_DBG_STRUCT)) {
+		ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_wr_index[1] =
+				ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_wr_index[0];
+		ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_wr_index[0] =
+				ipclite_dbg_struct->dbg_info_host[core_id].rx_wr_index;
+		ipclite_dbg_struct->dbg_info_host[core_id].rx_wr_index = *rx_fifo->head;
+
+		ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_rd_index[1] =
+				ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_rd_index[0];
+		ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_rd_index[0] =
+				ipclite_dbg_struct->dbg_info_host[core_id].rx_rd_index;
+		ipclite_dbg_struct->dbg_info_host[core_id].rx_rd_index = *rx_fifo->tail;
+
+		ipclite_dbg_struct->dbg_info_overall.total_numsig_recv++;
+		ipclite_dbg_struct->dbg_info_host[core_id].numsig_recv++;
+	}
+}
+
+static size_t ipcmem_tx_avail(struct ipclite_fifo *tx_fifo)
+{
+	u32 head = 0, tail = 0, avail = 0;
+
+	head = le32_to_cpu(*tx_fifo->head);
+	tail = le32_to_cpu(*tx_fifo->tail);
+
+	if (tail <= head)
+		avail = tx_fifo->length - head + tail;
+	else
+		avail = tail - head;
+
+	if (avail < FIFO_FULL_RESERVE)
+		avail = 0;
+	else
+		avail -= FIFO_FULL_RESERVE;
+
+	if (WARN_ON_ONCE(avail > tx_fifo->length))
+		avail = 0;
+
+	return avail;
+}
+
+static unsigned int ipcmem_tx_write_one(struct ipclite_fifo *tx_fifo,
+					    unsigned int head,
+					    const void *data, size_t count)
+{
+	size_t len = 0;
+
+	if (WARN_ON_ONCE(head > tx_fifo->length))
+		return head;
+
+	len = min_t(size_t, count, tx_fifo->length - head);
+	if (len)
+		memcpy(tx_fifo->fifo + head, data, len);
+
+	if (len != count)
+		memcpy(tx_fifo->fifo, data + len, count - len);
+
+	head += count;
+	if (head >= tx_fifo->length)
+		head -= tx_fifo->length;
+
+	return head;
+}
+
+static void ipcmem_tx_write(struct ipclite_fifo *tx_fifo,
+			const void *data, size_t dlen, uint32_t core_id, uint32_t signal_id)
+{
+	unsigned int head = 0;
+
+	head = le32_to_cpu(*tx_fifo->head);
+	head = ipcmem_tx_write_one(tx_fifo, head, data, dlen);
+
+	head = ALIGN(head, 8);
+	if (head >= tx_fifo->length)
+		head -= tx_fifo->length;
+
+	/* Ensure ordering of fifo and head update */
+	wmb();
+
+	*tx_fifo->head = cpu_to_le32(head);
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "head : %d core_id : %d signal_id : %d\n",
+						*tx_fifo->head, core_id, signal_id);
+
+	/* Storing the debug data in debug structures */
+	if (IS_DEBUG_CONFIG(IPCLITE_DBG_STRUCT)) {
+		ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_wr_index[1] =
+				ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_wr_index[0];
+		ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_wr_index[0] =
+				ipclite_dbg_struct->dbg_info_host[core_id].tx_wr_index;
+		ipclite_dbg_struct->dbg_info_host[core_id].tx_wr_index = *tx_fifo->head;
+
+		ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_rd_index[1] =
+				ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_rd_index[0];
+		ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_rd_index[0] =
+				ipclite_dbg_struct->dbg_info_host[core_id].tx_rd_index;
+		ipclite_dbg_struct->dbg_info_host[core_id].tx_rd_index = *tx_fifo->tail;
+
+		ipclite_dbg_struct->dbg_info_overall.total_numsig_sent++;
+		ipclite_dbg_struct->dbg_info_host[core_id].numsig_sent++;
+		ipclite_dbg_struct->dbg_info_overall.last_sent_host_id = core_id;
+		ipclite_dbg_struct->dbg_info_overall.last_sigid_sent = signal_id;
+	}
+}
+
+static size_t ipclite_rx_avail(struct ipclite_channel *channel)
+{
+	return channel->rx_fifo->avail(channel->rx_fifo);
+}
+
+static void ipclite_rx_peak(struct ipclite_channel *channel,
+			       void *data, size_t count)
+{
+	channel->rx_fifo->peak(channel->rx_fifo, data, count);
+}
+
+static void ipclite_rx_advance(struct ipclite_channel *channel,
+					size_t count)
+{
+	channel->rx_fifo->advance(channel->rx_fifo, count, channel->remote_pid);
+}
+
+static size_t ipclite_tx_avail(struct ipclite_channel *channel)
+{
+	return channel->tx_fifo->avail(channel->tx_fifo);
+}
+
+static void ipclite_tx_write(struct ipclite_channel *channel,
+				const void *data, size_t dlen)
+{
+	channel->tx_fifo->write(channel->tx_fifo, data, dlen, channel->remote_pid,
+								channel->irq_info->signal_id);
+}
+
+static int ipclite_rx_data(struct ipclite_channel *channel, size_t avail)
+{
+	int ret = 0;
+	uint64_t data = 0;
+
+	if (avail < sizeof(data)) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Not enough data in fifo, Core : %d Signal : %d\n",
+						channel->remote_pid, channel->irq_info->signal_id);
+		return -EAGAIN;
+	}
+
+	ipclite_rx_peak(channel, &data, sizeof(data));
+
+	if (synx_client.reg_complete == 1) {
+		if (synx_client.callback)
+			synx_client.callback(channel->remote_pid, data,
+								synx_client.priv_data);
+	}
+	ipclite_rx_advance(channel, ALIGN(sizeof(data), 8));
+	return ret;
+}
+
+static int ipclite_rx_test_data(struct ipclite_channel *channel, size_t avail)
+{
+	int ret = 0;
+	uint64_t data = 0;
+
+	if (avail < sizeof(data)) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Not enough data in fifo, Core : %d Signal : %d\n",
+						channel->remote_pid, channel->irq_info->signal_id);
+		return -EAGAIN;
+	}
+
+	ipclite_rx_peak(channel, &data, sizeof(data));
+
+	if (test_client.reg_complete == 1) {
+		if (test_client.callback)
+			test_client.callback(channel->remote_pid, data,
+								test_client.priv_data);
+	}
+	ipclite_rx_advance(channel, ALIGN(sizeof(data), 8));
+	return ret;
+}
+
+static irqreturn_t ipclite_intr(int irq, void *data)
+{
+	int ret = 0;
+	unsigned int avail = 0;
+	uint64_t msg = 0;
+	struct ipclite_channel *channel;
+	struct ipclite_irq_info *irq_info;
+
+	irq_info = (struct ipclite_irq_info *)data;
+	channel = container_of(irq_info, struct ipclite_channel, irq_info[irq_info->signal_id]);
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "Interrupt received from Core : %d Signal : %d\n",
+							channel->remote_pid, irq_info->signal_id);
+
+	/* Storing the debug data in debug structures */
+	if (IS_DEBUG_CONFIG(IPCLITE_DBG_STRUCT)) {
+		ipclite_dbg_struct->dbg_info_host[channel->remote_pid].num_intr++;
+		ipclite_dbg_struct->dbg_info_overall.last_recv_host_id = channel->remote_pid;
+		ipclite_dbg_struct->dbg_info_overall.last_sigid_recv = irq_info->signal_id;
+	}
+
+	if (irq_info->signal_id == IPCLITE_MSG_SIGNAL) {
+		for (;;) {
+			avail = ipclite_rx_avail(channel);
+			if (avail < sizeof(msg))
+				break;
+
+			ret = ipclite_rx_data(channel, avail);
+		}
+		IPCLITE_OS_LOG(IPCLITE_DBG, "checking messages in rx_fifo done\n");
+	} else if (irq_info->signal_id == IPCLITE_VERSION_SIGNAL) {
+		IPCLITE_OS_LOG(IPCLITE_DBG, "Versioning is not enabled using IPCC signals\n");
+	} else if (irq_info->signal_id == IPCLITE_TEST_SIGNAL) {
+		for (;;) {
+			avail = ipclite_rx_avail(channel);
+			if (avail < sizeof(msg))
+				break;
+
+			ret = ipclite_rx_test_data(channel, avail);
+		}
+		IPCLITE_OS_LOG(IPCLITE_DBG, "checking messages in rx_fifo done\n");
+	} else {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Wrong Interrupt Signal from core : %d signal : %d\n",
+							channel->remote_pid, irq_info->signal_id);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int ipclite_tx(struct ipclite_channel *channel,
+			uint64_t data, size_t dlen, uint32_t ipclite_signal)
+{
+	int ret = 0;
+	unsigned long flags;
+
+	if (channel->status != ACTIVE) {
+		if (channel->status == IN_PROGRESS && *channel->gstatus_ptr == ACTIVE) {
+			channel->status = ACTIVE;
+		} else {
+			IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Channel not active\n");
+			return -EOPNOTSUPP;
+		}
+	}
+
+	spin_lock_irqsave(&channel->tx_lock, flags);
+	if (ipclite_tx_avail(channel) < dlen) {
+		spin_unlock_irqrestore(&channel->tx_lock, flags);
+		ret = -EAGAIN;
+		return ret;
+	}
+
+	ipclite_tx_write(channel, &data, dlen);
+
+	mbox_send_message(channel->irq_info[ipclite_signal].mbox_chan, NULL);
+	mbox_client_txdone(channel->irq_info[ipclite_signal].mbox_chan, 0);
+
+	spin_unlock_irqrestore(&channel->tx_lock, flags);
+
+	return ret;
+}
+
+static int ipclite_notify_core(int32_t proc_id, int32_t signal_id)
+{
+	int ret = 0;
+	struct ipclite_channel *channel;
+
+	if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id);
+		return -EINVAL;
+	}
+	channel = &ipclite->channel[proc_id];
+
+	if (channel->status != ACTIVE) {
+		if (channel->status == IN_PROGRESS && *channel->gstatus_ptr == ACTIVE) {
+			channel->status = ACTIVE;
+		} else {
+			IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id);
+			return -EOPNOTSUPP;
+		}
+	}
+
+	ret = mbox_send_message(channel->irq_info[signal_id].mbox_chan, NULL);
+	if (ret < 0) {
+		IPCLITE_OS_LOG(IPCLITE_ERR,
+				"Signal sending failed to Core : %d Signal : %d ret : %d\n",
+									proc_id, signal_id, ret);
+		return ret;
+	}
+
+	IPCLITE_OS_LOG(IPCLITE_DBG,
+			"Signal send completed to core : %d signal : %d ret : %d\n",
+									proc_id, signal_id, ret);
+	return 0;
+}
+
+static int map_ipcmem(struct ipclite_info *ipclite, const char *name)
+{
+	int ret = 0;
+	struct device *dev;
+	struct device_node *np;
+	struct resource r;
+
+	dev = ipclite->dev;
+
+	np = of_parse_phandle(dev->of_node, name, 0);
+	if (!np) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "No %s specified\n", name);
+		return -EINVAL;
+	}
+
+	ret = of_address_to_resource(np, 0, &r);
+	of_node_put(np);
+	if (ret)
+		return ret;
+
+	ipclite->ipcmem.mem.aux_base = (u64)r.start;
+	ipclite->ipcmem.mem.size = resource_size(&r);
+	ipclite->ipcmem.mem.virt_base = devm_ioremap_wc(dev, r.start,
+					resource_size(&r));
+	if (!ipclite->ipcmem.mem.virt_base)
+		return -ENOMEM;
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "aux_base = %lx, size=%d,virt_base=%p\n",
+			ipclite->ipcmem.mem.aux_base, ipclite->ipcmem.mem.size,
+			ipclite->ipcmem.mem.virt_base);
+
+	return ret;
+}
+
+/**
+ * insert_magic_number() - Inserts the magic number in toc header
+ *
+ * Function computes a simple checksum of the contents in toc header
+ * and stores the result in magic_number field in the toc header
+ */
+static void insert_magic_number(void)
+{
+	uint32_t *block = ipclite->ipcmem.mem.virt_base;
+	size_t size = sizeof(struct ipcmem_toc_header) / sizeof(uint32_t);
+
+	for (int i = 1; i < size; i++)
+		block[0] ^= block[i];
+
+	block[0] = ~block[0];
+}
+
+static int32_t setup_toc(struct ipclite_mem *ipcmem)
+{
+	size_t offset = 0;
+	void *virt_base = ipcmem->mem.virt_base;
+	struct ipcmem_offsets *offsets = &ipcmem->toc->offsets;
+	struct ipcmem_toc_data *toc_data = &ipcmem->toc_data;
+
+	/* Setup Offsets */
+	offsets->host_info		= offset += IPCMEM_TOC_VAR_OFFSET;
+	offsets->global_entry		= offset += sizeof(struct ipcmem_host_info);
+	offsets->partition_info		= offset += sizeof(struct ipcmem_partition_entry);
+	offsets->partition_entry	= offset += sizeof(struct ipcmem_partition_info);
+	// offsets->debug		= virt_base + size - 64K;
+	/* Offset to be used for any new structure added in toc (after partition_entry) */
+	// offsets->new_struct	= offset += sizeof(struct ipcmem_partition_entry)*IPCMEM_NUM_HOSTS;
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "toc_data offsets:");
+	IPCLITE_OS_LOG(IPCLITE_DBG, "host_info = 0x%X", offsets->host_info);
+	IPCLITE_OS_LOG(IPCLITE_DBG, "global_entry = 0x%X", offsets->global_entry);
+	IPCLITE_OS_LOG(IPCLITE_DBG, "partition_info = 0x%X", offsets->partition_info);
+	IPCLITE_OS_LOG(IPCLITE_DBG, "partition_entry = 0x%X", offsets->partition_entry);
+
+	/* Point structures to the appropriate offset in TOC */
+	toc_data->host_info		= ADD_OFFSET(virt_base, offsets->host_info);
+	toc_data->global_entry		= ADD_OFFSET(virt_base, offsets->global_entry);
+	toc_data->partition_info	= ADD_OFFSET(virt_base, offsets->partition_info);
+	toc_data->partition_entry	= ADD_OFFSET(virt_base, offsets->partition_entry);
+
+	return 0;
+}
+
+static void setup_global_partition(struct ipclite_mem *ipcmem, uint32_t base_offset)
+{
+	/*Fill in global partition details*/
+	ipcmem->toc_data.global_entry->base_offset = base_offset;
+	ipcmem->toc_data.global_entry->size = GLOBAL_PARTITION_SIZE;
+	ipcmem->toc_data.global_entry->flags = GLOBAL_PARTITION_FLAGS;
+	ipcmem->toc_data.global_entry->host0 = IPCMEM_GLOBAL_HOST;
+	ipcmem->toc_data.global_entry->host1 = IPCMEM_GLOBAL_HOST;
+
+	ipcmem->global_partition = ADD_OFFSET(ipcmem->mem.virt_base, base_offset);
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "base_offset =%x,ipcmem->global_partition = %p\n",
+				base_offset,
+				ipcmem->global_partition);
+
+	ipcmem->global_partition->hdr = global_partition_hdr;
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "hdr.type = %x,hdr.offset = %x,hdr.size = %d\n",
+				ipcmem->global_partition->hdr.partition_type,
+				ipcmem->global_partition->hdr.region_offset,
+				ipcmem->global_partition->hdr.region_size);
+}
+
+static void update_partition(struct ipclite_mem *ipcmem, uint32_t p)
+{
+	int host0 = ipcmem->toc_data.partition_entry[p].host0;
+	int host1 = ipcmem->toc_data.partition_entry[p].host1;
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "host0 = %d, host1=%d\n", host0, host1);
+
+	ipcmem->partition[p] = ADD_OFFSET(ipcmem->mem.virt_base,
+					ipcmem->toc_data.partition_entry[p].base_offset);
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "partition[%d] = %p,partition_base_offset[%d]=%lx",
+				p, ipcmem->partition[p],
+				p, ipcmem->toc_data.partition_entry[p].base_offset);
+
+	if (host0 == host1)
+		ipcmem->partition[p]->hdr = loopback_partition_hdr;
+	else
+		ipcmem->partition[p]->hdr = default_partition_hdr;
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "hdr.type = %x,hdr.offset = %x,hdr.size = %d",
+				ipcmem->partition[p]->hdr.type,
+				ipcmem->partition[p]->hdr.desc_offset,
+				ipcmem->partition[p]->hdr.desc_size);
+}
+
+static int32_t setup_partitions(struct ipclite_mem *ipcmem, uint32_t base_offset)
+{
+	uint32_t p, host0, host1;
+	uint32_t num_entry = 0;
+
+	/*Fill in each valid ipcmem partition table entry*/
+	for (host0 = 0; host0 < IPCMEM_NUM_HOSTS; host0++) {
+		if (!is_host_enabled(host0))
+			continue;
+		for (host1 = host0; host1 < IPCMEM_NUM_HOSTS; host1++) {
+			if (!is_host_enabled(host1) || is_loopback_except_apps(host0, host1))
+				continue;
+			ipcmem->toc_data.partition_entry[num_entry].base_offset = base_offset;
+			ipcmem->toc_data.partition_entry[num_entry].size = DEFAULT_PARTITION_SIZE;
+			ipcmem->toc_data.partition_entry[num_entry].flags = DEFAULT_PARTITION_FLAGS;
+			ipcmem->toc_data.partition_entry[num_entry].host0 = host0;
+			ipcmem->toc_data.partition_entry[num_entry].host1 = host1;
+
+			base_offset += DEFAULT_PARTITION_SIZE;
+			num_entry++;
+		}
+	}
+	IPCLITE_OS_LOG(IPCLITE_DBG, "total partitions = %u", num_entry);
+
+	ipcmem->partition = kcalloc(num_entry, sizeof(*ipcmem->partition), GFP_KERNEL);
+	if (!ipcmem->partition) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Partition Allocation failed");
+		return -ENOMEM;
+	}
+
+	/*Update appropriate partition based on partition entries*/
+	for (p = 0; p < num_entry; p++)
+		update_partition(ipcmem, p);
+
+	/*Set up info to parse partition entries*/
+	ipcmem->toc_data.partition_info->num_entries = partitions = num_entry;
+	ipcmem->toc_data.partition_info->entry_size = sizeof(struct ipcmem_partition_entry);
+
+	return 0;
+}
+
+static int32_t ipcmem_init(struct ipclite_mem *ipcmem, struct device_node *pn)
+{
+	int ret = 0;
+	uint32_t remote_pid = 0, host_count = 0, gmem_offset = 0;
+	struct device_node *cn;
+
+	for_each_available_child_of_node(pn, cn) {
+		of_property_read_u32(cn, "qcom,remote-pid", &remote_pid);
+		if (remote_pid < IPCMEM_NUM_HOSTS) {
+			enabled_hosts |= BIT_MASK(remote_pid);
+			host_count++;
+		}
+	}
+	IPCLITE_OS_LOG(IPCLITE_DBG, "enabled_hosts = 0x%X", enabled_hosts);
+	IPCLITE_OS_LOG(IPCLITE_DBG, "host_count = %u", host_count);
+
+	ipcmem->toc = ipcmem->mem.virt_base;
+	IPCLITE_OS_LOG(IPCLITE_DBG, "toc_base = %p\n", ipcmem->toc);
+
+	ret = setup_toc(ipcmem);
+	if (ret) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to set up toc");
+		return ret;
+	}
+
+	/*Set up host related info*/
+	ipcmem->toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST;
+	ipcmem->toc_data.host_info->configured_host = enabled_hosts;
+
+	gmem_offset += IPCMEM_TOC_SIZE;
+	setup_global_partition(ipcmem, gmem_offset);
+
+	gmem_offset += GLOBAL_PARTITION_SIZE;
+	ret = setup_partitions(ipcmem, gmem_offset);
+	if (ret) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to set up partitions");
+		return ret;
+	}
+
+	/*Making sure all writes for ipcmem configurations are completed*/
+	wmb();
+
+	ipcmem->toc->hdr.init_done = IPCMEM_INIT_COMPLETED;
+	IPCLITE_OS_LOG(IPCLITE_DBG, "Ipcmem init completed\n");
+
+	return 0;
+}
+
+static int ipclite_channel_irq_init(struct device *parent, struct device_node *node,
+								struct ipclite_channel *channel)
+{
+	int ret = 0;
+	u32 index = 0;
+	struct ipclite_irq_info *irq_info;
+	struct device *dev;
+	char strs[MAX_CHANNEL_SIGNALS][IPCLITE_SIGNAL_LABEL_SIZE] = {
+			"msg", "mem-init", "version", "test", "ssr", "debug"};
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->parent = parent;
+	dev->of_node = node;
+	dev_set_name(dev, "%s:%pOFn", dev_name(parent->parent), node);
+	IPCLITE_OS_LOG(IPCLITE_DBG, "Registering %s device\n", dev_name(parent->parent));
+	ret = device_register(dev);
+	if (ret) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "failed to register ipclite child node\n");
+		put_device(dev);
+		return ret;
+	}
+
+	ret = of_property_read_u32(dev->of_node, "index", &index);
+	if (ret) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "failed to parse index\n");
+		goto err_dev;
+	}
+
+	irq_info = &channel->irq_info[index];
+	IPCLITE_OS_LOG(IPCLITE_DBG, "irq_info[%d]=%p\n", index, irq_info);
+
+	irq_info->mbox_client.dev = dev;
+	irq_info->mbox_client.knows_txdone = true;
+	irq_info->mbox_chan = mbox_request_channel(&irq_info->mbox_client, 0);
+	IPCLITE_OS_LOG(IPCLITE_DBG, "irq_info[%d].mbox_chan=%p\n", index, irq_info->mbox_chan);
+	if (IS_ERR(irq_info->mbox_chan)) {
+		if (PTR_ERR(irq_info->mbox_chan) != -EPROBE_DEFER)
+			IPCLITE_OS_LOG(IPCLITE_ERR, "failed to acquire IPC channel\n");
+		goto err_dev;
+	}
+
+	snprintf(irq_info->irqname, 32, "ipclite-signal-%s", strs[index]);
+	irq_info->irq = of_irq_get(dev->of_node, 0);
+	IPCLITE_OS_LOG(IPCLITE_DBG, "irq[%d] = %d\n", index, irq_info->irq);
+	irq_info->signal_id = index;
+	ret = devm_request_irq(dev, irq_info->irq,
+			       ipclite_intr,
+			       IRQF_NO_SUSPEND | IRQF_SHARED,
+			       irq_info->irqname, irq_info);
+	if (ret) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "failed to request IRQ\n");
+		goto err_dev;
+	}
+	IPCLITE_OS_LOG(IPCLITE_DBG, "Interrupt init completed, ret = %d\n", ret);
+
+	return ret;
+
+err_dev:
+	device_unregister(dev);
+	kfree(dev);
+	return ret;
+}
+
+static struct ipcmem_partition_header *get_ipcmem_partition_hdr(struct ipclite_mem ipcmem, int local_pid,
+								int remote_pid)
+{
+	uint32_t p = 0, found = -1;
+
+	for (p = 0; p < partitions; p++) {
+		if (ipcmem.toc_data.partition_entry[p].host0 == local_pid
+			&& ipcmem.toc_data.partition_entry[p].host1 == remote_pid) {
+			found = p;
+			break;
+		}
+	}
+
+	if (found < partitions)
+		return (struct ipcmem_partition_header *)((char *)ipcmem.mem.virt_base +
+					ipcmem.toc_data.partition_entry[found].base_offset);
+	else
+		return NULL;
+}
+
+static void ipclite_channel_release(struct device *dev)
+{
+	IPCLITE_OS_LOG(IPCLITE_INFO, "Releasing ipclite channel\n");
+	kfree(dev);
+}
+
+/* Sets up following fields of IPCLite channel structure:
+ *	remote_pid,tx_fifo, rx_fifo
+ */
+static int ipclite_channel_init(struct device *parent,
+								struct device_node *node)
+{
+	int ret = 0;
+	u32 local_pid = 0, remote_pid = 0;
+	u32 *descs = NULL;
+	struct ipclite_fifo *rx_fifo;
+	struct ipclite_fifo *tx_fifo;
+	struct device *dev;
+	struct device_node *child;
+	struct ipcmem_partition_header *partition_hdr;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->parent = parent;
+	dev->of_node = node;
+	dev->release = ipclite_channel_release;
+	dev_set_name(dev, "%s:%pOFn", dev_name(parent->parent), node);
+	IPCLITE_OS_LOG(IPCLITE_DBG, "Registering %s device\n", dev_name(parent->parent));
+	ret = device_register(dev);
+	if (ret) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "failed to register ipclite device\n");
+		put_device(dev);
+		kfree(dev);
+		return ret;
+	}
+
+	local_pid = LOCAL_HOST;
+
+	ret = of_property_read_u32(dev->of_node, "qcom,remote-pid",
+				   &remote_pid);
+	if (ret) {
+		dev_err(dev, "failed to parse qcom,remote-pid\n");
+		goto err_put_dev;
+	}
+	IPCLITE_OS_LOG(IPCLITE_DBG, "remote_pid = %d, local_pid=%d\n", remote_pid, local_pid);
+
+	rx_fifo = devm_kzalloc(dev, sizeof(*rx_fifo), GFP_KERNEL);
+	tx_fifo = devm_kzalloc(dev, sizeof(*tx_fifo), GFP_KERNEL);
+	if (!rx_fifo || !tx_fifo) {
+		ret = -ENOMEM;
+		goto err_put_dev;
+	}
+	IPCLITE_OS_LOG(IPCLITE_DBG, "rx_fifo = %p, tx_fifo=%p\n", rx_fifo, tx_fifo);
+
+	partition_hdr = get_ipcmem_partition_hdr(ipclite->ipcmem, local_pid, remote_pid);
+	IPCLITE_OS_LOG(IPCLITE_DBG, "partition_hdr = %p\n", partition_hdr);
+	if (!partition_hdr) {
+		ret = -ENOMEM;
+		goto err_put_dev;
+	}
+
+	descs = (u32 *)((char *)partition_hdr + partition_hdr->desc_offset);
+	IPCLITE_OS_LOG(IPCLITE_DBG, "descs = %p\n", descs);
+
+	if (local_pid < remote_pid) {
+		tx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo0_offset;
+		tx_fifo->length = partition_hdr->fifo0_size;
+		rx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo1_offset;
+		rx_fifo->length = partition_hdr->fifo1_size;
+
+		tx_fifo->tail = &descs[0];
+		tx_fifo->head = &descs[1];
+		rx_fifo->tail = &descs[2];
+		rx_fifo->head = &descs[3];
+
+	} else {
+		tx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo1_offset;
+		tx_fifo->length = partition_hdr->fifo1_size;
+		rx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo0_offset;
+		rx_fifo->length = partition_hdr->fifo0_size;
+
+		rx_fifo->tail = &descs[0];
+		rx_fifo->head = &descs[1];
+		tx_fifo->tail = &descs[2];
+		tx_fifo->head = &descs[3];
+	}
+
+	if (partition_hdr->type == LOOPBACK_PARTITION_TYPE) {
+		rx_fifo->tail = tx_fifo->tail;
+		rx_fifo->head = tx_fifo->head;
+	}
+
+	/* rx_fifo->reset = ipcmem_rx_reset;*/
+	rx_fifo->avail = ipcmem_rx_avail;
+	rx_fifo->peak = ipcmem_rx_peak;
+	rx_fifo->advance = ipcmem_rx_advance;
+
+	/* tx_fifo->reset = ipcmem_tx_reset;*/
+	tx_fifo->avail = ipcmem_tx_avail;
+	tx_fifo->write = ipcmem_tx_write;
+
+	*rx_fifo->tail = 0;
+	*tx_fifo->head = 0;
+
+	/*Store Channel Information*/
+	ipclite->channel[remote_pid].remote_pid = remote_pid;
+	ipclite->channel[remote_pid].tx_fifo = tx_fifo;
+	ipclite->channel[remote_pid].rx_fifo = rx_fifo;
+	ipclite->channel[remote_pid].gstatus_ptr = &partition_hdr->status;
+
+	spin_lock_init(&ipclite->channel[remote_pid].tx_lock);
+
+	for_each_available_child_of_node(dev->of_node, child) {
+		ret = ipclite_channel_irq_init(dev, child,
+				&ipclite->channel[remote_pid]);
+		if (ret) {
+			IPCLITE_OS_LOG(IPCLITE_ERR, "irq setup for ipclite channel failed\n");
+			goto err_put_dev;
+		}
+	}
+
+	/* Updating Local & Global Channel Status */
+	if (remote_pid == IPCMEM_APPS) {
+		*ipclite->channel[remote_pid].gstatus_ptr = ACTIVE;
+		ipclite->channel[remote_pid].status = ACTIVE;
+	} else {
+		*ipclite->channel[remote_pid].gstatus_ptr = IN_PROGRESS;
+		ipclite->channel[remote_pid].status = IN_PROGRESS;
+	}
+	IPCLITE_OS_LOG(IPCLITE_DBG, "Channel init completed, ret = %d\n", ret);
+	return ret;
+
+err_put_dev:
+	ipclite->channel[remote_pid].status = INACTIVE;
+	device_unregister(dev);
+	kfree(dev);
+	return ret;
+}
+
+static void probe_subsystem(struct device *dev, struct device_node *np)
+{
+	int ret = 0;
+
+	ret = ipclite_channel_init(dev, np);
+	if (ret)
+		IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite Channel init failed\n");
+}
+
+/* IPCLite Debug related functions start */
+static ssize_t ipclite_dbg_lvl_write(struct kobject *kobj,
+				struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int ret = 0, host = 0;
+
+	/* Parse the string from Sysfs Interface */
+	ret = kstrtoint(buf, 0, &ipclite_debug_level);
+	if (ret < 0) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value");
+		return ret;
+	}
+
+	/* Check if debug structure is initialized */
+	if (!ipclite_dbg_info) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Debug structures not initialized\n");
+		return -ENOMEM;
+	}
+
+	/* Update the Global Debug variable for FW cores */
+	ipclite_dbg_info->debug_level = ipclite_debug_level;
+
+	/* Memory Barrier to make sure all writes are completed */
+	wmb();
+
+	/* Signal other cores for updating the debug information */
+	for (host = 1; host < IPCMEM_NUM_HOSTS; host++) {
+		if (!is_host_enabled(host))
+			continue;
+		ret = ipclite_notify_core(host, IPCLITE_DEBUG_SIGNAL);
+		if (ret < 0)
+			IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n", host);
+		else
+			IPCLITE_OS_LOG(IPCLITE_DBG, "Debug info sent to host %d\n", host);
+	}
+
+	return count;
+}
+
+static ssize_t ipclite_dbg_ctrl_write(struct kobject *kobj,
+				struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int ret = 0, host = 0;
+
+	/* Parse the string from Sysfs Interface */
+	ret = kstrtoint(buf, 0, &ipclite_debug_control);
+	if (ret < 0) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value");
+		return ret;
+	}
+
+	/* Check if debug structures are initialized */
+	if (!ipclite_dbg_info || !ipclite_dbg_struct || !ipclite_dbg_inmem) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Debug structures not initialized\n");
+		return -ENOMEM;
+	}
+
+	/* Update the Global Debug variable for FW cores */
+	ipclite_dbg_info->debug_control = ipclite_debug_control;
+
+	/* Memory Barrier to make sure all writes are completed */
+	wmb();
+
+	/* Signal other cores for updating the debug information */
+	for (host = 1; host < IPCMEM_NUM_HOSTS; host++) {
+		if (!is_host_enabled(host))
+			continue;
+		ret = ipclite_notify_core(host, IPCLITE_DEBUG_SIGNAL);
+		if (ret < 0)
+			IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n", host);
+		else
+			IPCLITE_OS_LOG(IPCLITE_DBG, "Debug info sent to host %d\n", host);
+	}
+
+	return count;
+}
+
+static ssize_t ipclite_dbg_dump_write(struct kobject *kobj,
+				struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	int ret = 0;
+
+	/* Parse the string from Sysfs Interface */
+	ret = kstrtoint(buf, 0, &ipclite_debug_dump);
+	if (ret < 0) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value");
+		return ret;
+	}
+
+	/* Check if debug structures are initialized */
+	if (!ipclite_dbg_info || !ipclite_dbg_struct || !ipclite_dbg_inmem) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Debug structures not initialized\n");
+		return -ENOMEM;
+	}
+
+	/* Dump the debug information */
+	if (ipclite_debug_dump & IPCLITE_DUMP_DBG_STRUCT)
+		ipclite_dump_debug_struct();
+
+	if (ipclite_debug_dump & IPCLITE_DUMP_INMEM_LOG)
+		ipclite_dump_inmem_logs();
+
+	return count;
+}
+
+struct kobj_attribute sysfs_dbg_lvl = __ATTR(ipclite_debug_level, 0660,
+					NULL, ipclite_dbg_lvl_write);
+struct kobj_attribute sysfs_dbg_ctrl = __ATTR(ipclite_debug_control, 0660,
+					NULL, ipclite_dbg_ctrl_write);
+struct kobj_attribute sysfs_dbg_dump = __ATTR(ipclite_debug_dump, 0660,
+					NULL, ipclite_dbg_dump_write);
+
+static int ipclite_debug_sysfs_setup(void)
+{
+	int ret = 0;
+
+	/* Creating a directory in /sys/kernel/ */
+	sysfs_kobj = kobject_create_and_add("ipclite", kernel_kobj);
+	if (!sysfs_kobj) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create and add sysfs directory\n");
+		return -ENOMEM;
+	}
+
+	/* Creating sysfs files/interfaces for debug */
+	ret = sysfs_create_file(sysfs_kobj, &sysfs_dbg_lvl.attr);
+	if (ret) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug level file\n");
+		return ret;
+	}
+
+	ret = sysfs_create_file(sysfs_kobj, &sysfs_dbg_ctrl.attr);
+	if (ret) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug control file\n");
+		return ret;
+	}
+
+	ret = sysfs_create_file(sysfs_kobj, &sysfs_dbg_dump.attr);
+	if (ret) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug dump file\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static int ipclite_debug_mem_setup(void)
+{
+	/* Setting up the Debug Structures */
+	ipclite_dbg_info = (struct ipclite_debug_info *)(((char *)ipclite->ipcmem.mem.virt_base +
+						ipclite->ipcmem.mem.size) - DEBUG_PARTITION_SIZE);
+	if (!ipclite_dbg_info)
+		return -EADDRNOTAVAIL;
+
+	ipclite_dbg_struct = (struct ipclite_debug_struct *)
+					(((char *)ipclite_dbg_info + IPCLITE_DEBUG_INFO_SIZE) +
+					(sizeof(*ipclite_dbg_struct) * IPCMEM_APPS));
+	if (!ipclite_dbg_struct)
+		return -EADDRNOTAVAIL;
+
+	ipclite_dbg_inmem = (struct ipclite_debug_inmem_buf *)
+					(((char *)ipclite_dbg_info + IPCLITE_DEBUG_INFO_SIZE) +
+					(sizeof(*ipclite_dbg_struct) * IPCMEM_NUM_HOSTS));
+
+	if (!ipclite_dbg_inmem)
+		return -EADDRNOTAVAIL;
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "virtual_base_ptr = %p total_size : %d debug_size : %d\n",
+		ipclite->ipcmem.mem.virt_base, ipclite->ipcmem.mem.size, DEBUG_PARTITION_SIZE);
+	IPCLITE_OS_LOG(IPCLITE_DBG, "dbg_info : %p dbg_struct : %p dbg_inmem : %p\n",
+					ipclite_dbg_info, ipclite_dbg_struct, ipclite_dbg_inmem);
+
+	return 0;
+}
+
+static int ipclite_debug_setup(void)
+{
+	int ret = 0;
+
+	/* Set up sysfs for debug */
+	ret = ipclite_debug_sysfs_setup();
+	if (ret != 0) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to Set up IPCLite Debug Sysfs\n");
+		return ret;
+	}
+
+	/* Mapping Debug Memory */
+	ret = ipclite_debug_mem_setup();
+	if (ret != 0) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to Set up IPCLite Debug Structures\n");
+		return ret;
+	}
+
+	/* Update the Global Debug variable for FW cores */
+	ipclite_dbg_info->debug_level = ipclite_debug_level;
+	ipclite_dbg_info->debug_control = ipclite_debug_control;
+
+	return ret;
+}
+/* IPCLite Debug related functions end */
+
+/* IPCLite Features setup related functions start */
+static int ipclite_feature_setup(struct device_node *pn)
+{
+	int ret = 0;
+	uint32_t feature_mask_l = 0, feature_mask_h = 0;
+
+	/* Parse the feature related DT entries and store the values locally */
+	ret = of_property_read_u32(pn, "feature_mask_low", &feature_mask_l);
+	if (ret != 0) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "failed to parse feature_mask_low\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(pn, "feature_mask_high", &feature_mask_h);
+	if (ret != 0) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "failed to parse feature_mask_high\n");
+		return ret;
+	}
+
+	/* Combine feature_mask_low and feature_mask_high into 64-bit feature_mask */
+	feature_mask = (uint64_t) feature_mask_h << 32 | feature_mask_l;
+
+	/* Update the feature mask to TOC for FW */
+	ipclite->ipcmem.toc->hdr.feature_mask = feature_mask;
+
+	/* Set up Global Atomics Feature*/
+	if (!(IS_FEATURE_CONFIG(IPCLITE_GLOBAL_ATOMIC)))
+		IPCLITE_OS_LOG(IPCLITE_INFO, "IPCLite Global Atomic Support Disabled\n");
+
+	/* Set up Test Suite Feature*/
+	if (!(IS_FEATURE_CONFIG(IPCLITE_TEST_SUITE)))
+		IPCLITE_OS_LOG(IPCLITE_INFO, "IPCLite Test Suite Disabled\n");
+
+	return ret;
+}
+/* IPCLite Features setup related functions end */
+
+/* API Definition Start - Minor Version 0*/
+static int ipclite_init_v0(struct platform_device *pdev)
+{
+	int ret = 0, hwlock_id = 0;
+	struct ipcmem_region *mem;
+	struct device_node *cn;
+	struct device_node *pn = pdev->dev.of_node;
+	struct ipclite_channel broadcast;
+
+	/* Allocate memory for IPCLite */
+	ipclite = kzalloc(sizeof(*ipclite), GFP_KERNEL);
+	if (!ipclite) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite Memory Allocation Failed\n");
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	ipclite->dev = &pdev->dev;
+
+	/* Parse HW Lock from DT */
+	hwlock_id = of_hwspin_lock_get_id(pn, 0);
+	if (hwlock_id < 0) {
+		if (hwlock_id != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "failed to retrieve hwlock\n");
+		ret = hwlock_id;
+		goto release;
+	}
+	IPCLITE_OS_LOG(IPCLITE_DBG, "Hwlock id retrieved, hwlock_id=%d\n", hwlock_id);
+
+	/* Reserve a HWSpinLock for later use */
+	ipclite->hwlock = hwspin_lock_request_specific(hwlock_id);
+	if (!ipclite->hwlock) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to assign hwlock_id\n");
+		ret = -ENXIO;
+		goto release;
+	}
+	IPCLITE_OS_LOG(IPCLITE_DBG, "Hwlock id assigned successfully, hwlock=%p\n",
+									ipclite->hwlock);
+
+	/* Initializing Local Mutex Lock for SSR functionality */
+	mutex_init(&ssr_mutex);
+
+	/* Map to IPCLite Memory */
+	ret = map_ipcmem(ipclite, "memory-region");
+	if (ret) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "failed to map ipcmem\n");
+		goto release;
+	}
+	mem = &(ipclite->ipcmem.mem);
+	memset(mem->virt_base, 0, mem->size);
+
+	ret = ipcmem_init(&ipclite->ipcmem, pn);
+	if (ret) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to set up IPCMEM");
+		goto release;
+	}
+
+	/* Setup Channel for each Remote Subsystem */
+	for_each_available_child_of_node(pn, cn)
+		probe_subsystem(&pdev->dev, cn);
+
+	/* Broadcast init_done signal to all subsystems once mbox channels are set up */
+	broadcast = ipclite->channel[IPCMEM_APPS];
+	ret = mbox_send_message(broadcast.irq_info[IPCLITE_MEM_INIT_SIGNAL].mbox_chan, NULL);
+	if (ret < 0)
+		goto mem_release;
+
+	mbox_client_txdone(broadcast.irq_info[IPCLITE_MEM_INIT_SIGNAL].mbox_chan, 0);
+
+	/* Debug Setup */
+	ret = ipclite_debug_setup();
+	if (ret != 0) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite Debug Setup Failed\n");
+		goto release;
+	}
+
+	/* Features Setup */
+	ret = ipclite_feature_setup(pn);
+	if (ret != 0) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite Features Setup Failed\n");
+		goto release;
+	}
+
+	/* Update TOC with version entries for FW */
+	ipclite->ipcmem.toc->hdr.major_version = major_ver;
+	ipclite->ipcmem.toc->hdr.minor_version = minor_ver;
+
+	/* Should be called after all Global TOC related init is done */
+	insert_magic_number();
+
+	IPCLITE_OS_LOG(IPCLITE_INFO, "IPCLite Version : %d.%d Feature Mask : 0x%llx\n",
+						major_ver, minor_ver, feature_mask);
+
+	IPCLITE_OS_LOG(IPCLITE_INFO, "IPCLite Probe Completed Successfully\n");
+
+	return ret;
+
+mem_release:
+	/* If the remote subsystem has already completed the init and actively
+	 * using IPCMEM, re-assigning IPCMEM memory back to HLOS can lead to crash
+	 * Solution: Either we don't take back the memory or make sure APPS completes
+	 * init before any other subsystem initializes IPCLite (we won't have to send
+	 * braodcast)
+	 */
+release:
+	kfree(ipclite);
+	ipclite = NULL;
+error:
+	return ret;
+}
+
+static int ipclite_register_client_v0(IPCLite_Client cb_func_ptr, void *priv)
+{
+	if (!cb_func_ptr) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid callback pointer\n");
+		return -EINVAL;
+	}
+
+	synx_client.callback = cb_func_ptr;
+	synx_client.priv_data = priv;
+	synx_client.reg_complete = 1;
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "Client Registration completed\n");
+
+	return 0;
+}
+
+static int ipclite_register_test_client_v0(IPCLite_Client cb_func_ptr, void *priv)
+{
+	if (!cb_func_ptr) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid callback pointer\n");
+		return -EINVAL;
+	}
+
+	test_client.callback = cb_func_ptr;
+	test_client.priv_data = priv;
+	test_client.reg_complete = 1;
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "Test Client Registration Completed\n");
+
+	return 0;
+}
+
+static int ipclite_msg_send_v0(int32_t proc_id, uint64_t data)
+{
+	int ret = 0;
+
+	/* Check for valid core id */
+	if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id);
+		return -EINVAL;
+	}
+
+	/* Send the data to the core */
+	ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data), IPCLITE_MSG_SIGNAL);
+	if (ret < 0) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Message send failed to core : %d signal:%d ret:%d\n",
+								proc_id, IPCLITE_MSG_SIGNAL, ret);
+		return ret;
+	}
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "Message send complete to core : %d signal : %d ret : %d\n",
+								proc_id, IPCLITE_MSG_SIGNAL, ret);
+	return ret;
+}
+
+static int ipclite_test_msg_send_v0(int32_t proc_id, uint64_t data)
+{
+	int ret = 0;
+
+	/* Check for valid core id */
+	if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id);
+		return -EINVAL;
+	}
+
+	/* Send the data to the core */
+	ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data), IPCLITE_TEST_SIGNAL);
+	if (ret < 0) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Message send failed to core : %d signal:%d ret:%d\n",
+								proc_id, IPCLITE_TEST_SIGNAL, ret);
+		return ret;
+	}
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "Test Msg send complete to core : %d signal : %d ret : %d\n",
+								proc_id, IPCLITE_TEST_SIGNAL, ret);
+	return ret;
+}
+
+static int32_t get_global_partition_info_v0(struct global_region_info *global_ipcmem)
+{
+	struct ipcmem_global_partition *global_partition;
+
+	if (!ipclite) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite not initialized\n");
+		return -ENOMEM;
+	}
+
+	if (!global_ipcmem)
+		return -EINVAL;
+
+	global_partition = ipclite->ipcmem.global_partition;
+	global_ipcmem->virt_base = (void *)((char *)global_partition +
+							global_partition->hdr.region_offset);
+	global_ipcmem->size = (size_t)(global_partition->hdr.region_size);
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "base = %p, size=%lx\n", global_ipcmem->virt_base,
+									global_ipcmem->size);
+	return 0;
+}
+
+static void ipclite_recover_v0(enum ipcmem_host_type core_id)
+{
+	int ret = 0, host = 0, host0 = 0, host1 = 0;
+	uint32_t p = 0;
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "IPCLite Recover - Crashed Core : %d\n", core_id);
+
+	/* verify and reset the hw mutex lock */
+	if (core_id == ipclite->ipcmem.toc_data.host_info->hwlock_owner) {
+		ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST;
+		hwspin_unlock_raw(ipclite->hwlock);
+		IPCLITE_OS_LOG(IPCLITE_DBG, "HW Lock Reset\n");
+	}
+
+	mutex_lock(&ssr_mutex);
+
+	/* Set the Global Channel Status to 0 to avoid Race condition */
+	for (p = 0; p < partitions; p++) {
+		host0 = ipclite->ipcmem.toc_data.partition_entry[p].host0;
+		host1 = ipclite->ipcmem.toc_data.partition_entry[p].host1;
+		if (host0 != core_id && host1 != core_id)
+			continue;
+
+		ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *)
+			(&(ipclite->ipcmem.partition[p]->hdr.status)), 0);
+
+		IPCLITE_OS_LOG(IPCLITE_DBG, "Global Channel Status : [%d][%d] : %d\n",
+					host0, host1, ipclite->ipcmem.partition[p]->hdr.status);
+	}
+
+	/* Resets the TX/RX queue */
+	*(ipclite->channel[core_id].tx_fifo->head) = 0;
+	*(ipclite->channel[core_id].rx_fifo->tail) = 0;
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "TX Fifo Reset : %d\n",
+						*(ipclite->channel[core_id].tx_fifo->head));
+	IPCLITE_OS_LOG(IPCLITE_DBG, "RX Fifo Reset : %d\n",
+						*(ipclite->channel[core_id].rx_fifo->tail));
+
+	/* Increment the Global Channel Status for APPS and crashed core*/
+	ipclite_global_atomic_inc((ipclite_atomic_int32_t *)
+					ipclite->channel[core_id].gstatus_ptr);
+
+	ipclite->channel[core_id].status = *ipclite->channel[core_id].gstatus_ptr;
+
+	/* Update other cores about SSR */
+	for (host = 1; host < IPCMEM_NUM_HOSTS; host++) {
+		if (!is_host_enabled(host) || host == core_id)
+			continue;
+		ret = ipclite_notify_core(host, IPCLITE_SSR_SIGNAL);
+		if (ret < 0)
+			IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send SSR update to core %d\n", host);
+		else
+			IPCLITE_OS_LOG(IPCLITE_DBG, "SSR update sent to core %d\n", host);
+	}
+
+	mutex_unlock(&ssr_mutex);
+
+	/* Dump the debug information */
+	if (ipclite_debug_dump & IPCLITE_DUMP_SSR) {
+		ipclite_dump_debug_struct();
+		ipclite_dump_inmem_logs();
+	}
+}
+/* API Definition End - Minor Version 0*/
+
+/* Versioned Functions Start */
+int ipclite_init(struct platform_device *pdev)
+{
+	if (api_list_t.init == NULL) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Unassigned function : %s", __func__);
+		return -EINVAL;
+	}
+
+	return api_list_t.init(pdev);
+}
+
+int ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv)
+{
+	if (api_list_t.register_client == NULL) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Unassigned function : %s", __func__);
+		return -EINVAL;
+	}
+
+	return api_list_t.register_client(cb_func_ptr, priv);
+}
+EXPORT_SYMBOL(ipclite_register_client);
+
+int ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv)
+{
+	if (api_list_t.register_test_client == NULL) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Unassigned function : %s", __func__);
+		return -EINVAL;
+	}
+
+	return api_list_t.register_test_client(cb_func_ptr, priv);
+}
+EXPORT_SYMBOL(ipclite_register_test_client);
+
+int ipclite_msg_send(int32_t proc_id, uint64_t data)
+{
+	if (api_list_t.msg_send == NULL) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Unassigned function : %s", __func__);
+		return -EINVAL;
+	}
+
+	return api_list_t.msg_send(proc_id, data);
+}
+EXPORT_SYMBOL(ipclite_msg_send);
+
+int ipclite_test_msg_send(int32_t proc_id, uint64_t data)
+{
+	if (api_list_t.test_msg_send == NULL) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Unassigned function : %s", __func__);
+		return -EINVAL;
+	}
+
+	return api_list_t.test_msg_send(proc_id, data);
+}
+EXPORT_SYMBOL(ipclite_test_msg_send);
+
+void ipclite_recover(enum ipcmem_host_type core_id)
+{
+	if (api_list_t.recover == NULL) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Unassigned function : %s", __func__);
+		return;
+	}
+
+	api_list_t.recover(core_id);
+}
+EXPORT_SYMBOL(ipclite_recover);
+
+int32_t get_global_partition_info(struct global_region_info *global_ipcmem)
+{
+	if (api_list_t.partition_info == NULL) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Unassigned function : %s", __func__);
+		return -EINVAL;
+	}
+
+	return api_list_t.partition_info(global_ipcmem);
+}
+EXPORT_SYMBOL(get_global_partition_info);
+/* Versioned Functions End */
+
+/* List of APIs  based on the version */
+struct ipclite_api_list api_list_version[] = {
+	/* Minor Version 0 */
+	{
+		.init = ipclite_init_v0,
+		.register_client = ipclite_register_client_v0,
+		.register_test_client = ipclite_register_test_client_v0,
+		.msg_send = ipclite_msg_send_v0,
+		.test_msg_send = ipclite_test_msg_send_v0,
+		.partition_info = get_global_partition_info_v0,
+		.recover = ipclite_recover_v0,
+	},
+};
+
+/* IPCLite Version setup related functions start */
+static int ipclite_update_version_api(struct ipclite_api_list *res_str,
+						struct ipclite_api_list *ver_str)
+{
+	if (res_str == NULL || ver_str == NULL)
+		return -EINVAL;
+
+	/* Register APIs based on the version */
+	res_str->init = (ver_str->init != NULL) ?
+		ver_str->init : res_str->init;
+
+	res_str->register_client = (ver_str->register_client != NULL) ?
+		ver_str->register_client : res_str->register_client;
+	res_str->register_test_client = (ver_str->register_test_client != NULL) ?
+		ver_str->register_test_client : res_str->register_test_client;
+
+	res_str->msg_send = (ver_str->msg_send != NULL) ?
+		ver_str->msg_send : res_str->msg_send;
+	res_str->test_msg_send = (ver_str->test_msg_send != NULL) ?
+		ver_str->test_msg_send : res_str->test_msg_send;
+
+	res_str->partition_info = (ver_str->partition_info != NULL) ?
+		ver_str->partition_info : res_str->partition_info;
+	res_str->recover = (ver_str->recover != NULL) ?
+		ver_str->recover : res_str->recover;
+
+	return 0;
+}
+
+static int ipclite_register_api(void)
+{
+	int ret = 0, ver_itr = 0;
+
+	/* Register APIs based on the version */
+	for (ver_itr = 0; ver_itr <= minor_ver; ver_itr++) {
+		ret = ipclite_update_version_api(&api_list_t, &api_list_version[ver_itr]);
+		if (ret != 0)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int ipclite_version_setup(struct device_node *pn)
+{
+	int ret = 0;
+
+	/* Parse the version related DT entries and store the values locally */
+	ret = of_property_read_u32(pn, "major_version", &major_ver);
+	if (ret != 0) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "failed to parse major_vesion\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(pn, "minor_version", &minor_ver);
+	if (ret != 0) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "failed to parse minor_vesion\n");
+		return ret;
+	}
+
+	/* Verify IPCLite Version - if version does not match crash the system */
+	BUG_ON(major_ver != MAJOR_VERSION || minor_ver > MINOR_VERSION);
+
+	return ret;
+}
+/* IPCLite Version setup related functions end */
+
+/* Start of IPCLite Init*/
+static int ipclite_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	/* Version Setup */
+	ret = ipclite_version_setup(pdev->dev.of_node);
+	if (ret != 0) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite Version Setup Failed\n");
+		goto error;
+	}
+
+	/* Register API Setup */
+	ret = ipclite_register_api();
+	if (ret != 0) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite API Registration Failed\n");
+		goto error;
+	}
+
+	/* IPCLite Init */
+	ret = ipclite_init(pdev);
+	if (ret != 0) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite Init Failed\n");
+		goto error;
+	}
+
+	return ret;
+
+error:
+	IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite probe failed\n");
+	return ret;
+}
+
+static const struct of_device_id ipclite_of_match[] = {
+	{ .compatible = "qcom,ipclite"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, ipclite_of_match);
+
+static struct platform_driver ipclite_driver = {
+	.probe = ipclite_probe,
+	.driver = {
+		.name = "ipclite",
+		.of_match_table = ipclite_of_match,
+	},
+};
+
+module_platform_driver(ipclite_driver);
+
+MODULE_DESCRIPTION("IPCLite Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_SOFTDEP("pre: qcom_hwspinlock");

+ 443 - 0
qcom/opensource/synx-kernel/msm/synx/ipclite.h

@@ -0,0 +1,443 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved..
+ */
+#include <linux/hwspinlock.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/soc/qcom,ipcc.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+#include "ipclite_client.h"
+
+/* version related entries */
+#define MAJOR_VERSION		1
+#define MINOR_VERSION		0
+
+#define IPCMEM_INIT_COMPLETED	0x1
+#define ACTIVE_CHANNEL			0x1
+
+#define IPCMEM_TOC_SIZE			(4*1024)
+#define IPCMEM_TOC_VAR_OFFSET	0x100
+
+#define GLOBAL_ATOMIC_SUPPORT_BMSK 0x1UL
+
+/* IPCC signal info */
+#define IPCLITE_MSG_SIGNAL		0
+#define IPCLITE_MEM_INIT_SIGNAL 1
+#define IPCLITE_VERSION_SIGNAL  2
+#define IPCLITE_TEST_SIGNAL		3
+#define IPCLITE_SSR_SIGNAL		4
+#define IPCLITE_DEBUG_SIGNAL	5
+#define MAX_CHANNEL_SIGNALS		6
+
+/** Flag definitions for the entries */
+#define IPCMEM_FLAGS_ENABLE_READ_PROTECTION   (0x01)
+#define IPCMEM_FLAGS_ENABLE_WRITE_PROTECTION  (0x02)
+#define IPCMEM_FLAGS_ENABLE_RW_PROTECTION \
+		(IPCMEM_FLAGS_ENABLE_READ_PROTECTION | \
+		IPCMEM_FLAGS_ENABLE_WRITE_PROTECTION)
+
+#define IPCMEM_FLAGS_IGNORE_PARTITION         (0x00000004)
+
+/*Hardcoded macro to identify local host on each core*/
+#define LOCAL_HOST		IPCMEM_APPS
+
+/* Timeout (ms) for the trylock of remote spinlocks */
+#define HWSPINLOCK_TIMEOUT	1000
+
+/* queue related entries */
+#define FIFO_FULL_RESERVE		8
+#define FIFO_ALIGNMENT			8
+
+/* debug related entries */
+#define IPCLITE_DEBUG_INFO_SIZE		256
+#define IPCLITE_CORE_DBG_LABEL		"APSS:"
+#define IPCLITE_LOG_MSG_SIZE		100
+#define IPCLITE_LOG_BUF_SIZE		512
+#define IPCLITE_DBG_LABEL_SIZE		5
+#define IPCLITE_SIGNAL_LABEL_SIZE	10
+#define PREV_INDEX					2
+
+#define ADD_OFFSET(x, y)	((void *)((size_t)x + y))
+
+/* IPCLite Logging Mechanism */
+#define IPCLITE_OS_LOG(__level, __fmt, arg...) \
+	do { \
+		if (ipclite_debug_level & __level) { \
+			if (ipclite_debug_control & IPCLITE_DMESG_LOG) \
+				pr_info(IPCLITE_CORE_DBG_LABEL "%s:"__fmt, \
+							ipclite_dbg_label[__level], ## arg); \
+			if (ipclite_debug_control & IPCLITE_INMEM_LOG) \
+				ipclite_inmem_log(IPCLITE_CORE_DBG_LABEL "%s:"__fmt, \
+							ipclite_dbg_label[__level], ## arg); \
+		} \
+	} while (0)
+
+/* IPCLite Debug enable status */
+#define IS_DEBUG_CONFIG(ipclite_debug) (ipclite_debug_control & ipclite_debug)
+
+/* IPCLite Feature enable status */
+#define IS_FEATURE_CONFIG(ipclite_feature) (feature_mask & ipclite_feature)
+
+/* Global Atomic status */
+#define ATOMIC_HW_MUTEX_ACQUIRE \
+(IS_FEATURE_CONFIG(IPCLITE_GLOBAL_ATOMIC) ?: ipclite_hw_mutex_acquire())
+#define ATOMIC_HW_MUTEX_RELEASE \
+(IS_FEATURE_CONFIG(IPCLITE_GLOBAL_ATOMIC) ?: ipclite_hw_mutex_release())
+
+/* API Structure */
+struct ipclite_api_list {
+	int (*init)(struct platform_device *pdev);
+	int32_t (*register_client)(IPCLite_Client cb_func_ptr, void *priv);
+	int32_t (*register_test_client)(IPCLite_Client cb_func_ptr, void *priv);
+	int32_t (*msg_send)(int32_t proc_id, uint64_t data);
+	int32_t (*test_msg_send)(int32_t proc_id, uint64_t data);
+	int32_t (*partition_info)(struct global_region_info *global_ipcmem);
+	void (*recover)(enum ipcmem_host_type core_id);
+} api_list_t;
+
+/**
+ * enum ipclite_channel_status - channel status
+ *
+ * INACTIVE             : Channel uninitialized or init failed
+ * IN_PROGRESS          : Channel init passed, awaiting confirmation from remote host
+ * ACTIVE               : Channel init passed in local and remote host, thus active
+ */
+enum ipclite_channel_status {
+	INACTIVE				= 0,
+	IN_PROGRESS				= 1,
+	ACTIVE					= 2,
+};
+
+enum ipclite_feature_mask {
+	IPCLITE_GLOBAL_ATOMIC = 0x0001ULL,
+	IPCLITE_TEST_SUITE = 0x0002ULL,
+};
+
+enum ipclite_debug_level {
+	IPCLITE_ERR  = 0x0001,
+	IPCLITE_WARN = 0x0002,
+	IPCLITE_INFO = 0x0004,
+	IPCLITE_DBG  = 0x0008,
+};
+
+enum ipclite_debug_control {
+	IPCLITE_DMESG_LOG = 0x0001,
+	IPCLITE_DBG_STRUCT = 0x0002,
+	IPCLITE_INMEM_LOG = 0x0004,
+};
+
+enum ipclite_debug_dump {
+	IPCLITE_DUMP_DBG_STRUCT = 0x0001,
+	IPCLITE_DUMP_INMEM_LOG = 0x0002,
+	IPCLITE_DUMP_SSR = 0x0004,
+};
+
+static const char ipclite_dbg_label[][IPCLITE_DBG_LABEL_SIZE] = {
+	[IPCLITE_ERR] = "err",
+	[IPCLITE_WARN] = "warn",
+	[IPCLITE_INFO] = "info",
+	[IPCLITE_DBG] = "dbg"
+};
+
+/**
+ * IPCMEM Debug Structure Definitions
+ *  - Present in Local Memory
+ */
+
+struct ipclite_debug_info_host {
+	uint32_t numsig_sent; //no. of signals sent from the core
+	uint32_t numsig_recv; //no. of signals received on the core
+	uint32_t tx_wr_index; //write index of tx queue
+	uint32_t tx_rd_index; //read index of tx queue
+	uint32_t rx_wr_index; //write index of rx queue
+	uint32_t rx_rd_index; //read index of rx queue
+	uint32_t num_intr; //no. of interrupts received on the core
+	uint32_t prev_tx_wr_index[PREV_INDEX]; //previous write index of tx queue
+	uint32_t prev_tx_rd_index[PREV_INDEX]; //previous read index of tx queue
+	uint32_t prev_rx_wr_index[PREV_INDEX]; //previous write index of rx queue
+	uint32_t prev_rx_rd_index[PREV_INDEX]; //previous read index of rx queue
+};
+
+struct ipclite_debug_info_overall {
+	uint32_t total_numsig_sent; //total no. of signals sent
+	uint32_t total_numsig_recv; //total no. of signals received
+	uint32_t last_sent_host_id; //last signal sent to host
+	uint32_t last_recv_host_id; //last signal received from host
+	uint32_t last_sigid_sent; //last sent signal id
+	uint32_t last_sigid_recv; //last received signal id
+};
+
+struct ipclite_debug_info {
+	uint32_t debug_version;
+	uint32_t debug_level;
+	uint32_t debug_control;
+	uint32_t debug_dump;
+	uint32_t debug_log_index;
+};
+
+struct ipclite_debug_inmem_buf {
+	char IPCLITELog[IPCLITE_LOG_BUF_SIZE][IPCLITE_LOG_MSG_SIZE];
+};
+
+struct ipclite_debug_struct {
+	struct ipclite_debug_info_overall dbg_info_overall;
+	struct ipclite_debug_info_host dbg_info_host[IPCMEM_NUM_HOSTS];
+};
+
+/**
+ * IPCMEM TOC Structure Definitions
+ *  - Present in toc in shared memory
+ */
+
+struct ipcmem_host_info {
+	uint32_t hwlock_owner;
+	uint32_t configured_host;
+};
+
+struct ipcmem_partition_entry {
+	uint32_t base_offset;	/*partition offset from IPCMEM base*/
+	uint32_t size;			/*partition size*/
+	uint32_t flags;			/*partition flags if required*/
+	uint32_t host0;			/*subsystem 0 who can access this partition*/
+	uint32_t host1;			/*subsystem 1 who can access this partition*/
+	uint32_t reserved;		/*legacy partition active status*/
+};
+
+struct ipcmem_partition_info {
+	uint32_t num_entries;	/* Number of channel partitions */
+	uint32_t entry_size;	/* Size of partition_entry structure */
+};
+
+struct ipcmem_offsets {
+	uint32_t host_info;
+	uint32_t global_entry;
+	uint32_t partition_info;
+	uint32_t partition_entry;
+	uint32_t debug;
+	uint32_t reserved;		/*Padded for 64-bit alignment*/
+};
+
+/**
+ * Any change in TOC header size can only be accomodated with
+ * major version change, as it is not backward compatible.
+ */
+struct ipcmem_toc_header {
+	uint32_t magic_number;		/*Checksum of TOC*/
+	uint32_t init_done;			/*TOC initialization status*/
+	uint32_t major_version;
+	uint32_t minor_version;
+	uint64_t feature_mask;
+	uint32_t reserved[6];		/*Padded for future use and 64-bit alignment*/
+};
+
+/**
+ * struct ipcmem_toc - Table of contents in ipcmem
+ *
+ * @hdr     : Header to check for toc integrity, version and features
+ * @offsets : List of offsetted structures and partition entries
+ *            available in the toc data region (ipcmem_toc_data)
+ */
+struct ipcmem_toc {
+	struct ipcmem_toc_header hdr;
+	struct ipcmem_offsets offsets;
+
+	/* ---------------------------------------
+	 * ipcmem_toc_data @ 256-byte offset
+	 * struct ipcmem_host_info host_info;
+	 * struct ipcmem_partition_entry global_entry;
+	 * struct ipcmem_partition_info partition_info;
+	 * struct ipcmem_partition_entry partition_entry[num_entries];
+	 * ---------------------------------------
+	 */
+};
+
+/**
+ * IPCMEM Partition Structure Definitions
+ *  - Present in partitions in shared memory
+ */
+
+struct global_partition_header {
+	uint32_t partition_type;
+	uint32_t region_offset;
+	uint32_t region_size;
+};
+
+struct ipcmem_global_partition {
+	struct global_partition_header hdr;
+};
+
+struct ipcmem_partition_header {
+	uint32_t type;			   /*partition type*/
+	uint32_t desc_offset;      /*descriptor offset*/
+	uint32_t desc_size;        /*descriptor size*/
+	uint32_t fifo0_offset;     /*fifo 0 offset*/
+	uint32_t fifo0_size;       /*fifo 0 size*/
+	uint32_t fifo1_offset;     /*fifo 1 offset*/
+	uint32_t fifo1_size;       /*fifo 1 size*/
+	uint32_t status;           /*partition status*/
+};
+
+struct ipcmem_partition {
+	struct ipcmem_partition_header hdr;
+};
+
+/**
+ * IPCMEM Helper Structure Definitions
+ *  - Present in local memory
+ *  - Can have pointers to toc and partitions in shared memory
+ */
+
+/*Pointers to offsetted structures in TOC*/
+struct ipcmem_toc_data {
+	struct ipcmem_host_info *host_info;
+	struct ipcmem_partition_entry *global_entry;
+	struct ipcmem_partition_info *partition_info;
+	struct ipcmem_partition_entry *partition_entry;
+};
+
+struct ipcmem_region {
+	u64 aux_base;
+	void __iomem *virt_base;
+	uint32_t size;
+};
+
+struct ipclite_mem {
+	struct ipcmem_toc *toc;
+	struct ipcmem_toc_data toc_data;
+	struct ipcmem_region mem;
+	struct ipcmem_global_partition *global_partition;
+	struct ipcmem_partition **partition;
+};
+
+/**
+ * IPCLite Structure Definitions
+ *  - Present in local memory
+ *  - Can have pointers to partitions in shared memory
+ */
+
+struct ipclite_fifo {
+	uint32_t length;
+
+	__le32 *tail;
+	__le32 *head;
+
+	void *fifo;
+
+	size_t (*avail)(struct ipclite_fifo *fifo);
+
+	void (*peak)(struct ipclite_fifo *fifo,
+					void *data, size_t count);
+
+	void (*advance)(struct ipclite_fifo *fifo,
+					size_t count, uint32_t core_id);
+
+	void (*write)(struct ipclite_fifo *fifo,
+		const void *data, size_t dlen, uint32_t core_id, uint32_t signal_id);
+
+	void (*reset)(struct ipclite_fifo *fifo);
+};
+
+struct ipclite_irq_info {
+	struct mbox_client mbox_client;
+	struct mbox_chan *mbox_chan;
+	int irq;
+	int signal_id;
+	char irqname[32];
+};
+
+struct ipclite_client {
+	IPCLite_Client callback;
+	void *priv_data;
+	int reg_complete;
+};
+
+struct ipclite_channel {
+	uint32_t remote_pid;
+
+	struct ipclite_fifo *tx_fifo;
+	struct ipclite_fifo *rx_fifo;
+	spinlock_t tx_lock;
+
+	struct ipclite_irq_info irq_info[MAX_CHANNEL_SIGNALS];
+
+	struct ipclite_client client;
+
+	uint32_t channel_version;
+	uint32_t version_finalised;
+
+	uint32_t *gstatus_ptr;
+	uint32_t status;
+};
+
+/*Single structure that defines everything about IPCLite*/
+struct ipclite_info {
+	struct device *dev;
+	struct ipclite_channel channel[IPCMEM_NUM_HOSTS];
+	struct ipclite_mem ipcmem;
+	struct hwspinlock *hwlock;
+	unsigned long hw_mutex_flags;
+};
+
+/*Default partition parameters*/
+#define DEFAULT_PARTITION_TYPE			0x0
+#define DEFAULT_PARTITION_STATUS		INACTIVE
+#define DEFAULT_PARTITION_HDR_SIZE		1024
+
+#define DEFAULT_DESCRIPTOR_OFFSET		1024
+#define DEFAULT_DESCRIPTOR_SIZE			(3*1024)
+#define DEFAULT_FIFO0_OFFSET			(4*1024)
+#define DEFAULT_FIFO0_SIZE				(8*1024)
+#define DEFAULT_FIFO1_OFFSET			(12*1024)
+#define DEFAULT_FIFO1_SIZE				(8*1024)
+
+#define DEFAULT_PARTITION_SIZE			(32*1024)
+#define DEFAULT_PARTITION_FLAGS			IPCMEM_FLAGS_ENABLE_RW_PROTECTION
+
+/*Loopback partition parameters*/
+#define LOOPBACK_PARTITION_TYPE			0x1
+
+/*Global partition parameters*/
+#define GLOBAL_PARTITION_TYPE			0xFF
+#define GLOBAL_PARTITION_HDR_SIZE		(4*1024)
+
+#define GLOBAL_REGION_OFFSET			(4*1024)
+#define GLOBAL_REGION_SIZE				(124*1024)
+
+#define GLOBAL_PARTITION_SIZE			(128*1024)
+#define GLOBAL_PARTITION_FLAGS			IPCMEM_FLAGS_ENABLE_RW_PROTECTION
+
+/*Debug partition parameters*/
+#define DEBUG_PARTITION_SIZE			(64*1024)
+
+const struct ipcmem_partition_header default_partition_hdr = {
+	DEFAULT_PARTITION_TYPE,
+	DEFAULT_DESCRIPTOR_OFFSET,
+	DEFAULT_DESCRIPTOR_SIZE,
+	DEFAULT_FIFO0_OFFSET,
+	DEFAULT_FIFO0_SIZE,
+	DEFAULT_FIFO1_OFFSET,
+	DEFAULT_FIFO1_SIZE,
+	DEFAULT_PARTITION_STATUS,
+};
+
+/* TX and RX FIFO point to same location for such loopback partition type
+ * (FIFO0 offset = FIFO1 offset)
+ */
+const struct ipcmem_partition_header loopback_partition_hdr = {
+	LOOPBACK_PARTITION_TYPE,
+	DEFAULT_DESCRIPTOR_OFFSET,
+	DEFAULT_DESCRIPTOR_SIZE,
+	DEFAULT_FIFO0_OFFSET,
+	DEFAULT_FIFO0_SIZE,
+	DEFAULT_FIFO0_OFFSET,
+	DEFAULT_FIFO0_SIZE,
+	DEFAULT_PARTITION_STATUS,
+};
+
+const struct global_partition_header global_partition_hdr = {
+	GLOBAL_PARTITION_TYPE,
+	GLOBAL_REGION_OFFSET,
+	GLOBAL_REGION_SIZE,
+};

+ 205 - 0
qcom/opensource/synx-kernel/msm/synx/ipclite_client.h

@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef __IPCLITE_CLIENT_H__
+#define __IPCLITE_CLIENT_H__
+
+typedef atomic_t ipclite_atomic_uint32_t;
+typedef atomic_t ipclite_atomic_int32_t;
+
+/**
+ * A list of hosts supported in IPCMEM
+ */
+enum ipcmem_host_type {
+	IPCMEM_APPS         =  0,                     /**< Apps Processor */
+	IPCMEM_MODEM        =  1,                     /**< Modem processor */
+	IPCMEM_LPASS        =  2,                     /**< Audio processor */
+	IPCMEM_SLPI         =  3,                     /**< Sensor processor */
+	IPCMEM_GPU          =  4,                     /**< Graphics processor */
+	IPCMEM_CDSP         =  5,                     /**< Compute DSP processor */
+	IPCMEM_CVP          =  6,                     /**< Computer Vision processor */
+	IPCMEM_CAM          =  7,                     /**< Camera processor */
+	IPCMEM_VPU          =  8,                     /**< Video processor */
+	IPCMEM_NUM_HOSTS    =  9,                     /**< Max number of host in target */
+
+	IPCMEM_GLOBAL_HOST  =  0xFE,                  /**< Global Host */
+	IPCMEM_INVALID_HOST =  0xFF,				  /**< Invalid processor */
+};
+
+struct global_region_info {
+	void *virt_base;
+	uint32_t size;
+};
+
+typedef int (*IPCLite_Client)(uint32_t proc_id,  int64_t data,  void *priv);
+
+/**
+ * ipclite_msg_send() - Sends message to remote client.
+ *
+ * @proc_id  : Identifier for remote client or subsystem.
+ * @data       : 64 bit message value.
+ *
+ * @return Zero on successful registration, negative on failure.
+ */
+int ipclite_msg_send(int32_t proc_id, uint64_t data);
+
+/**
+ * ipclite_register_client() - Registers client callback with framework.
+ *
+ * @cb_func_ptr : Client callback function to be called on message receive.
+ * @priv        : Private data required by client for handling callback.
+ *
+ * @return Zero on successful registration, negative on failure.
+ */
+int ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv);
+
+/**
+ * ipclite_test_msg_send() - Sends message to remote client.
+ *
+ * @proc_id  : Identifier for remote client or subsystem.
+ * @data       : 64 bit message value.
+ *
+ * @return Zero on successful registration, negative on failure.
+ */
+int ipclite_test_msg_send(int32_t proc_id, uint64_t data);
+
+/**
+ * ipclite_register_test_client() - Registers client callback with framework.
+ *
+ * @cb_func_ptr : Client callback function to be called on message receive.
+ * @priv        : Private data required by client for handling callback.
+ *
+ * @return Zero on successful registration, negative on failure.
+ */
+int ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv);
+
+/**
+ * get_global_partition_info() - Gets info about IPCMEM's global partitions.
+ *
+ * @global_ipcmem : Pointer to global_region_info structure.
+ *
+ * @return Zero on successful registration, negative on failure.
+ */
+int get_global_partition_info(struct global_region_info *global_ipcmem);
+
+/**
+ * ipclite_recover() - Recovers the ipclite if any core goes for SSR
+ *
+ * core_id	: takes the core id of the core which went to SSR.
+ *
+ * @return None.
+ */
+void ipclite_recover(enum ipcmem_host_type core_id);
+
+/**
+ * ipclite_hw_mutex_acquire() - Locks the hw mutex reserved for ipclite.
+ *
+ * @return Zero on successful acquire, negative on failure.
+ */
+int ipclite_hw_mutex_acquire(void);
+
+/**
+ * ipclite_hw_mutex_release() - Unlocks the hw mutex reserved for ipclite.
+ *
+ * @return Zero on successful release, negative on failure.
+ */
+int ipclite_hw_mutex_release(void);
+
+/**
+ * ipclite_atomic_init_u32() - Initializes the global memory with uint32_t value.
+ *
+ * @addr	: Pointer to global memory
+ * @data	: Value to store in global memory
+ *
+ * @return None.
+ */
+void ipclite_atomic_init_u32(ipclite_atomic_uint32_t *addr, uint32_t data);
+
+/**
+ * ipclite_atomic_init_i32() - Initializes the global memory with int32_t value.
+ *
+ * @addr	: Pointer to global memory
+ * @data	: Value to store in global memory
+ *
+ * @return None.
+ */
+void ipclite_atomic_init_i32(ipclite_atomic_int32_t *addr, int32_t data);
+
+/**
+ * ipclite_global_atomic_store_u32() - Writes uint32_t value to global memory.
+ *
+ * @addr	: Pointer to global memory
+ * @data	: Value to store in global memory
+ *
+ * @return None.
+ */
+void ipclite_global_atomic_store_u32(ipclite_atomic_uint32_t *addr, uint32_t data);
+
+/**
+ * ipclite_global_atomic_store_i32() - Writes int32_t value to global memory.
+ *
+ * @addr	: Pointer to global memory
+ * @data	: Value to store in global memory
+ *
+ * @return None.
+ */
+void ipclite_global_atomic_store_i32(ipclite_atomic_int32_t *addr, int32_t data);
+
+/**
+ * ipclite_global_atomic_load_u32() - Reads the value from global memory.
+ *
+ * @addr	: Pointer to global memory
+ *
+ * @return uint32_t value.
+ */
+uint32_t ipclite_global_atomic_load_u32(ipclite_atomic_uint32_t *addr);
+
+/**
+ * ipclite_global_atomic_load_i32() - Reads the value from global memory.
+ *
+ * @addr	: Pointer to global memory
+ *
+ * @return int32_t value.
+ */
+int32_t ipclite_global_atomic_load_i32(ipclite_atomic_int32_t *addr);
+
+/**
+ * ipclite_global_test_and_set_bit() - Sets a bit in global memory.
+ *
+ * @nr		: Bit position to set.
+ * @addr	: Pointer to global memory
+ *
+ * @return previous value.
+ */
+uint32_t ipclite_global_test_and_set_bit(uint32_t nr, ipclite_atomic_uint32_t *addr);
+
+/**
+ * ipclite_global_test_and_clear_bit() - Clears a bit in global memory.
+ *
+ * @nr		: Bit position to clear.
+ * @addr	: Pointer to global memory
+ *
+ * @return previous value.
+ */
+uint32_t ipclite_global_test_and_clear_bit(uint32_t nr, ipclite_atomic_uint32_t *addr);
+
+/**
+ * ipclite_global_atomic_inc() - Increments an atomic variable by one.
+ *
+ * @addr	: Pointer to global memory
+ *
+ * @return previous value.
+ */
+int32_t ipclite_global_atomic_inc(ipclite_atomic_int32_t *addr);
+
+/**
+ * ipclite_global_atomic_dec() - Decrements an atomic variable by one.
+ *
+ * @addr	: Pointer to global variable
+ *
+ * @return previous value.
+ */
+int32_t ipclite_global_atomic_dec(ipclite_atomic_int32_t *addr);
+
+#endif

+ 2946 - 0
qcom/opensource/synx-kernel/msm/synx/synx.c

@@ -0,0 +1,2946 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/atomic.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/random.h>
+#include <linux/remoteproc/qcom_rproc.h>
+#include <linux/slab.h>
+#include <linux/sync_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "synx_debugfs.h"
+#include "synx_private.h"
+#include "synx_util.h"
+
+struct synx_device *synx_dev;
+static atomic64_t synx_counter = ATOMIC64_INIT(1);
+
+void synx_external_callback(s32 sync_obj, int status, void *data)
+{
+	struct synx_signal_cb *signal_cb = data;
+
+	if (IS_ERR_OR_NULL(signal_cb)) {
+		dprintk(SYNX_ERR,
+			"invalid payload from external obj %d [%d]\n",
+			sync_obj, status);
+		return;
+	}
+
+	signal_cb->status = status;
+	signal_cb->ext_sync_id = sync_obj;
+	signal_cb->flag = SYNX_SIGNAL_FROM_CALLBACK;
+
+	dprintk(SYNX_DBG,
+		"external callback from %d on handle %u\n",
+		sync_obj, signal_cb->handle);
+
+	/*
+	 * invoke the handler directly as external callback
+	 * is invoked from separate task.
+	 * avoids creation of separate task again.
+	 */
+	synx_signal_handler(&signal_cb->cb_dispatch);
+}
+EXPORT_SYMBOL(synx_external_callback);
+
+bool synx_fence_enable_signaling(struct dma_fence *fence)
+{
+	return true;
+}
+
+const char *synx_fence_driver_name(struct dma_fence *fence)
+{
+	return "Global Synx driver";
+}
+
+void synx_fence_release(struct dma_fence *fence)
+{
+	/* release the memory allocated during create */
+	kfree(fence->lock);
+	kfree(fence);
+	dprintk(SYNX_MEM, "released backing fence %pK\n", fence);
+}
+EXPORT_SYMBOL(synx_fence_release);
+
+static struct dma_fence_ops synx_fence_ops = {
+	.wait = dma_fence_default_wait,
+	.enable_signaling = synx_fence_enable_signaling,
+	.get_driver_name = synx_fence_driver_name,
+	.get_timeline_name = synx_fence_driver_name,
+	.release = synx_fence_release,
+};
+
+static int synx_create_sync_fd(struct dma_fence *fence)
+{
+	int fd;
+	struct sync_file *sync_file;
+
+	if (IS_ERR_OR_NULL(fence))
+		return -SYNX_INVALID;
+
+	fd = get_unused_fd_flags(O_CLOEXEC);
+	if (fd < 0)
+		return fd;
+
+	sync_file = sync_file_create(fence);
+	if (IS_ERR_OR_NULL(sync_file)) {
+		dprintk(SYNX_ERR, "error creating sync file\n");
+		goto err;
+	}
+
+	fd_install(fd, sync_file->file);
+	return fd;
+
+err:
+	put_unused_fd(fd);
+	return -SYNX_INVALID;
+}
+
+void *synx_get_fence(struct synx_session *session,
+	u32 h_synx)
+{
+	struct synx_client *client;
+	struct synx_handle_coredata *synx_data;
+	struct synx_coredata *synx_obj;
+	struct dma_fence *fence = NULL;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return NULL;
+
+	synx_data = synx_util_acquire_handle(client, h_synx);
+	synx_obj = synx_util_obtain_object(synx_data);
+	if (IS_ERR_OR_NULL(synx_obj) ||
+		 IS_ERR_OR_NULL(synx_obj->fence)) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] invalid handle access %u\n",
+			client->id, h_synx);
+		goto fail;
+	}
+
+	mutex_lock(&synx_obj->obj_lock);
+	fence = synx_obj->fence;
+	/* obtain an additional reference to the fence */
+	dma_fence_get(fence);
+	mutex_unlock(&synx_obj->obj_lock);
+
+fail:
+	synx_util_release_handle(synx_data);
+	synx_put_client(client);
+	return fence;
+}
+EXPORT_SYMBOL(synx_get_fence);
+
+static int synx_native_check_bind(struct synx_client *client,
+	struct synx_create_params *params)
+{
+	int rc;
+	u32 h_synx;
+	struct synx_entry_64 *ext_entry;
+	struct synx_map_entry *entry;
+
+	if (IS_ERR_OR_NULL(params->fence))
+		return -SYNX_INVALID;
+
+	ext_entry = synx_util_retrieve_data(params->fence,
+					synx_util_map_params_to_type(params->flags));
+	if (IS_ERR_OR_NULL(ext_entry))
+		return -SYNX_NOENT;
+
+	h_synx = ext_entry->data[0];
+	synx_util_remove_data(params->fence,
+		synx_util_map_params_to_type(params->flags));
+
+	entry = synx_util_get_map_entry(h_synx);
+	if (IS_ERR_OR_NULL(entry))
+		/* possible cleanup, retry to alloc new handle */
+		return -SYNX_NOENT;
+
+	rc = synx_util_init_handle(client, entry->synx_obj,
+			&h_synx, entry);
+	if (rc != SYNX_SUCCESS) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] new handle init failed\n",
+			client->id);
+		goto fail;
+	}
+
+	*params->h_synx = h_synx;
+	return SYNX_SUCCESS;
+
+fail:
+	synx_util_release_map_entry(entry);
+	return rc;
+}
+
+static int synx_native_create_core(struct synx_client *client,
+	struct synx_create_params *params)
+{
+	int rc;
+	struct synx_coredata *synx_obj;
+	struct synx_map_entry *map_entry;
+
+	if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) ||
+		IS_ERR_OR_NULL(params->h_synx))
+		return -SYNX_INVALID;
+
+	synx_obj = kzalloc(sizeof(*synx_obj), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(synx_obj))
+		return -SYNX_NOMEM;
+
+	rc = synx_util_init_coredata(synx_obj, params,
+			&synx_fence_ops, client->dma_context);
+	if (rc) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] handle allocation failed\n",
+			client->id);
+		kfree(synx_obj);
+		goto fail;
+	}
+
+	map_entry = synx_util_insert_to_map(synx_obj,
+					*params->h_synx, 0);
+	if (IS_ERR_OR_NULL(map_entry)) {
+		rc = PTR_ERR(map_entry);
+		synx_util_put_object(synx_obj);
+		goto fail;
+	}
+
+	rc = synx_util_add_callback(synx_obj, *params->h_synx);
+	if (rc != SYNX_SUCCESS) {
+		synx_util_release_map_entry(map_entry);
+		goto fail;
+	}
+
+	rc = synx_util_init_handle(client, synx_obj,
+			params->h_synx, map_entry);
+	if (rc < 0) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] unable to init new handle\n",
+			client->id);
+		synx_util_release_map_entry(map_entry);
+		goto fail;
+	}
+
+	dprintk(SYNX_MEM,
+		"[sess :%llu] allocated %u, core %pK, fence %pK\n",
+		client->id, *params->h_synx, synx_obj, synx_obj->fence);
+	return SYNX_SUCCESS;
+
+fail:
+	return rc;
+}
+
+int synx_create(struct synx_session *session,
+	struct synx_create_params *params)
+{
+	int rc = -SYNX_NOENT;
+	struct synx_client *client;
+	struct synx_external_desc_v2 ext_desc = {0};
+
+	if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->h_synx) ||
+		params->flags > SYNX_CREATE_MAX_FLAGS) {
+		dprintk(SYNX_ERR, "invalid create arguments\n");
+		return -SYNX_INVALID;
+	}
+
+	if (params->flags & SYNX_CREATE_DMA_FENCE) {
+		dprintk(SYNX_ERR,
+			"handle create with native fence not supported\n");
+		return -SYNX_NOSUPPORT;
+	}
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	*params->h_synx = 0;
+
+	do {
+		/* create with external fence */
+		if (!IS_ERR_OR_NULL(params->fence))
+			rc = synx_native_check_bind(client, params);
+
+		if (rc == -SYNX_NOENT) {
+			rc = synx_native_create_core(client, params);
+			if (rc == SYNX_SUCCESS &&
+				 !IS_ERR_OR_NULL(params->fence)) {
+				/* save external fence details */
+				rc = synx_util_save_data(params->fence,
+					synx_util_map_params_to_type(params->flags),
+					*params->h_synx);
+				if (rc == -SYNX_ALREADY) {
+					/*
+					 * raced with create on same fence from
+					 * another client. clear the allocated
+					 * handle and retry.
+					 */
+					synx_native_release_core(client, *params->h_synx);
+					*params->h_synx = 0;
+					rc = -SYNX_NOENT;
+					continue;
+				} else if (rc != SYNX_SUCCESS) {
+					dprintk(SYNX_ERR,
+						"allocating handle failed=%d", rc);
+					synx_native_release_core(client, *params->h_synx);
+					break;
+				}
+
+				/* bind with external fence */
+				ext_desc.id = *((u32 *)params->fence);
+				ext_desc.type = synx_util_map_params_to_type(params->flags);
+				rc = synx_bind(session, *params->h_synx, ext_desc);
+				if (rc != SYNX_SUCCESS) {
+					dprintk(SYNX_ERR,
+						"[sess :%llu] bind external fence failed\n",
+						client->id);
+					synx_native_release_core(client, *params->h_synx);
+					goto fail;
+				}
+			}
+		}
+
+		if (rc == SYNX_SUCCESS)
+			dprintk(SYNX_VERB,
+				"[sess :%llu] handle allocated %u\n",
+				client->id, *params->h_synx);
+
+		break;
+	} while (true);
+
+fail:
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_create);
+
+int synx_native_signal_core(struct synx_coredata *synx_obj,
+	u32 status,
+	bool cb_signal,
+	u64 ext_sync_id)
+{
+	int rc = 0;
+	int ret;
+	u32 i = 0;
+	u32 idx = 0;
+	s32 sync_id;
+	u32 type;
+	void *data = NULL;
+	struct synx_bind_desc bind_descs[SYNX_MAX_NUM_BINDINGS];
+	struct bind_operations *bind_ops = NULL;
+
+	if (IS_ERR_OR_NULL(synx_obj))
+		return -SYNX_INVALID;
+
+	synx_util_callback_dispatch(synx_obj, status);
+
+	/*
+	 * signal the external bound sync obj/s even if fence signal fails,
+	 * w/ error signal state (set above) to prevent deadlock
+	 */
+	if (synx_obj->num_bound_synxs > 0) {
+		memset(bind_descs, 0,
+			sizeof(struct synx_bind_desc) * SYNX_MAX_NUM_BINDINGS);
+		for (i = 0; i < synx_obj->num_bound_synxs; i++) {
+			/* signal invoked by external sync obj */
+			if (cb_signal &&
+				 (ext_sync_id ==
+				 synx_obj->bound_synxs[i].external_desc.id)) {
+				dprintk(SYNX_VERB,
+					"skipping signaling inbound sync: %llu\n",
+					ext_sync_id);
+				type = synx_obj->bound_synxs[i].external_desc.type;
+				memset(&synx_obj->bound_synxs[i], 0,
+					sizeof(struct synx_bind_desc));
+				/* clear the hash table entry */
+				synx_util_remove_data(&ext_sync_id, type);
+				continue;
+			}
+			memcpy(&bind_descs[idx++],
+				&synx_obj->bound_synxs[i],
+				sizeof(struct synx_bind_desc));
+			/* clear the memory, its been backed up above */
+			memset(&synx_obj->bound_synxs[i], 0,
+				sizeof(struct synx_bind_desc));
+		}
+		synx_obj->num_bound_synxs = 0;
+	}
+
+	for (i = 0; i < idx; i++) {
+		sync_id = bind_descs[i].external_desc.id;
+		data = bind_descs[i].external_data;
+		type = bind_descs[i].external_desc.type;
+		bind_ops = synx_util_get_bind_ops(type);
+		if (IS_ERR_OR_NULL(bind_ops)) {
+			dprintk(SYNX_ERR,
+				"invalid bind ops for type: %u\n", type);
+			kfree(data);
+			continue;
+		}
+
+		/* clear the hash table entry */
+		synx_util_remove_data(&sync_id, type);
+
+		/*
+		 * we are already signaled, so don't want to
+		 * recursively be signaled
+		 */
+		ret = bind_ops->deregister_callback(
+				synx_external_callback, data, sync_id);
+		if (ret < 0) {
+			dprintk(SYNX_ERR,
+				"deregistration fail on %d, type: %u, err=%d\n",
+				sync_id, type, ret);
+			continue;
+		}
+		dprintk(SYNX_VERB,
+			"signal external sync: %d, type: %u, status: %u\n",
+			sync_id, type, status);
+		/* optional function to enable external signaling */
+		if (bind_ops->enable_signaling) {
+			ret = bind_ops->enable_signaling(sync_id);
+			if (ret < 0)
+				dprintk(SYNX_ERR,
+					"enabling fail on %d, type: %u, err=%d\n",
+					sync_id, type, ret);
+		}
+		ret = bind_ops->signal(sync_id, status);
+		if (ret < 0)
+			dprintk(SYNX_ERR,
+				"signaling fail on %d, type: %u, err=%d\n",
+				sync_id, type, ret);
+		/*
+		 * release the memory allocated for external data.
+		 * It is safe to release this memory as external cb
+		 * has been already deregistered before this.
+		 */
+		kfree(data);
+	}
+
+	return rc;
+}
+
+int synx_native_signal_fence(struct synx_coredata *synx_obj,
+	u32 status)
+{
+	int rc = 0;
+	unsigned long flags;
+
+	if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(synx_obj->fence))
+		return -SYNX_INVALID;
+
+	if (status <= SYNX_STATE_ACTIVE) {
+		dprintk(SYNX_ERR, "signaling with wrong status: %u\n",
+			status);
+		return -SYNX_INVALID;
+	}
+
+	if (synx_util_is_merged_object(synx_obj)) {
+		dprintk(SYNX_ERR, "signaling a composite handle\n");
+		return -SYNX_INVALID;
+	}
+
+	if (synx_util_get_object_status(synx_obj) !=
+		SYNX_STATE_ACTIVE)
+		return -SYNX_ALREADY;
+
+	if (IS_ERR_OR_NULL(synx_obj->signal_cb)) {
+		dprintk(SYNX_ERR, "signal cb in bad state\n");
+		return -SYNX_INVALID;
+	}
+
+	/*
+	 * remove registered callback for the fence
+	 * so it does not invoke the signal through callback again
+	 */
+	if (!dma_fence_remove_callback(synx_obj->fence,
+		&synx_obj->signal_cb->fence_cb)) {
+		dprintk(SYNX_ERR, "callback could not be removed\n");
+		return -SYNX_INVALID;
+	}
+
+	dprintk(SYNX_MEM, "signal cb destroyed %pK\n",
+		synx_obj->signal_cb);
+	kfree(synx_obj->signal_cb);
+	synx_obj->signal_cb = NULL;
+
+	/* releasing reference held by signal cb */
+	synx_util_put_object(synx_obj);
+
+	spin_lock_irqsave(synx_obj->fence->lock, flags);
+	/* check the status again acquiring lock to avoid errors */
+	if (synx_util_get_object_status_locked(synx_obj) !=
+		SYNX_STATE_ACTIVE) {
+		spin_unlock_irqrestore(synx_obj->fence->lock, flags);
+		return -SYNX_ALREADY;
+	}
+
+	synx_obj->status = status;
+
+	if (status >= SYNX_DMA_FENCE_STATE_MAX)
+		status = SYNX_DMA_FENCE_STATE_MAX - 1;
+
+	/* set fence error to model {signal w/ error} */
+	if (status != SYNX_STATE_SIGNALED_SUCCESS)
+		dma_fence_set_error(synx_obj->fence, -status);
+
+	rc = dma_fence_signal_locked(synx_obj->fence);
+	if (rc)
+		dprintk(SYNX_ERR,
+			"signaling fence %pK failed=%d\n",
+			synx_obj->fence, rc);
+	spin_unlock_irqrestore(synx_obj->fence->lock, flags);
+
+	return rc;
+}
+
+int synx_native_signal_merged_fence(struct synx_coredata *synx_obj, u32 status)
+{
+	int rc = SYNX_SUCCESS;
+	unsigned long flags;
+	int i = 0, num_fences = 0;
+	struct synx_coredata **synx_child_obj = NULL;
+
+	rc = synx_get_child_coredata(synx_obj, &synx_child_obj, &num_fences);
+	if (rc != SYNX_SUCCESS)
+		return rc;
+	for(i = 0; i < num_fences; i++)
+	{
+		if (IS_ERR_OR_NULL(synx_child_obj[i]) || IS_ERR_OR_NULL(synx_child_obj[i]->fence)) {
+			dprintk(SYNX_ERR, "Invalid child coredata %d\n", i);
+			rc = -SYNX_NOENT;
+			goto fail;
+		}
+
+		mutex_lock(&synx_child_obj[i]->obj_lock);
+		spin_lock_irqsave(synx_child_obj[i]->fence->lock, flags);
+		if (synx_util_get_object_status_locked(synx_child_obj[i]) != SYNX_STATE_ACTIVE ||
+			!synx_util_is_global_object(synx_child_obj[i]))
+		{
+			spin_unlock_irqrestore(synx_child_obj[i]->fence->lock, flags);
+			mutex_unlock(&synx_child_obj[i]->obj_lock);
+			continue;
+		}
+		spin_unlock_irqrestore(synx_child_obj[i]->fence->lock, flags);
+
+		status = synx_global_get_status(synx_child_obj[i]->global_idx);
+		rc = synx_native_signal_fence(synx_child_obj[i], status);
+		mutex_unlock(&synx_child_obj[i]->obj_lock);
+	}
+fail:
+	kfree(synx_child_obj);
+	return rc;
+}
+
+u32 synx_get_child_status(struct synx_coredata *synx_obj)
+{
+	u32 h_child = 0, i = 0;
+	u32 status = SYNX_DMA_FENCE_STATE_MAX - 1, child_status = SYNX_STATE_ACTIVE;
+	struct dma_fence_array *array = NULL;
+	struct synx_map_entry *fence_entry = NULL;
+	struct synx_coredata *synx_child_obj = NULL;
+
+	if (!dma_fence_is_array(synx_obj->fence))
+		return status;
+
+	array = to_dma_fence_array(synx_obj->fence);
+	if (IS_ERR_OR_NULL(array))
+		goto bail;
+
+	for (i = 0; i < array->num_fences; i++) {
+		h_child = synx_util_get_fence_entry((u64)array->fences[i], 1);
+		if (h_child == 0)
+			h_child = synx_util_get_fence_entry((u64)array->fences[i], 0);
+
+		if (h_child == 0)
+			continue;
+
+		fence_entry = synx_util_get_map_entry(h_child);
+		if (IS_ERR_OR_NULL(fence_entry) || IS_ERR_OR_NULL(fence_entry->synx_obj)) {
+			dprintk(SYNX_ERR, "Invalid handle access %u", h_child);
+			goto bail;
+		}
+		synx_child_obj = fence_entry->synx_obj;
+
+		mutex_lock(&synx_child_obj->obj_lock);
+		if (synx_util_is_global_object(synx_child_obj))
+			child_status = synx_global_get_status(synx_child_obj->global_idx);
+		else
+			child_status = synx_child_obj->status;
+		mutex_unlock(&synx_child_obj->obj_lock);
+		synx_util_release_map_entry(fence_entry);
+
+		dprintk(SYNX_VERB, "Child handle %u status %d", h_child, child_status);
+		if (child_status != SYNX_STATE_ACTIVE &&
+			(status == SYNX_DMA_FENCE_STATE_MAX - 1 ||
+			(child_status > SYNX_STATE_SIGNALED_SUCCESS &&
+			child_status <= SYNX_STATE_SIGNALED_MAX)))
+			status = child_status;
+	}
+bail:
+	return status;
+}
+
+u32 synx_custom_get_status(struct synx_coredata *synx_obj, u32 status)
+{
+	u32 custom_status = status;
+	u32 parent_global_status =
+		synx_util_is_global_object(synx_obj) ?
+		synx_global_get_status(synx_obj->global_idx) : SYNX_STATE_ACTIVE;
+
+	if (IS_ERR_OR_NULL(synx_obj))
+		goto bail;
+
+	mutex_lock(&synx_obj->obj_lock);
+	if (synx_util_is_merged_object(synx_obj)) {
+		if (parent_global_status == SYNX_STATE_ACTIVE)
+			synx_obj->status = synx_get_child_status(synx_obj);
+		else
+			synx_obj->status = parent_global_status;
+		custom_status = synx_obj->status;
+	}
+
+	mutex_unlock(&synx_obj->obj_lock);
+
+bail:
+	return custom_status;
+}
+
+void synx_signal_handler(struct work_struct *cb_dispatch)
+{
+	int rc = SYNX_SUCCESS;
+	u32 idx;
+	struct synx_signal_cb *signal_cb =
+		container_of(cb_dispatch, struct synx_signal_cb, cb_dispatch);
+	struct synx_coredata *synx_obj = signal_cb->synx_obj;
+
+	u32 h_synx = signal_cb->handle;
+	u32 status = signal_cb->status;
+
+	if (signal_cb->flag & SYNX_SIGNAL_FROM_FENCE) {
+		status = synx_custom_get_status(synx_obj, status);
+		dprintk(SYNX_VERB,
+			"handle %d will be updated with status %d\n",
+			h_synx, status);
+	}
+
+	if ((signal_cb->flag & SYNX_SIGNAL_FROM_FENCE) &&
+			(synx_util_is_global_handle(h_synx) ||
+			synx_util_is_global_object(synx_obj))) {
+		idx = (IS_ERR_OR_NULL(synx_obj)) ?
+				synx_util_global_idx(h_synx) :
+				synx_obj->global_idx;
+		if (synx_global_get_status(idx) == SYNX_STATE_ACTIVE) {
+			rc = synx_global_update_status(idx, status);
+			if (rc != SYNX_SUCCESS)
+				dprintk(SYNX_ERR,
+					"global status update of %u failed=%d\n",
+					h_synx, rc);
+		}
+		/*
+		 * We are decrementing the reference here assuming this code will be
+		 * executed after handle is released. But in case if clients signal
+		 * dma fence in middle of execution sequence, then we will put
+		 * one reference thus deleting the global idx. As of now clients cannot
+		 * signal dma fence.
+		 */
+		if (IS_ERR_OR_NULL(synx_obj))
+			synx_global_put_ref(idx);
+	}
+
+	/*
+	 * when invoked from external callback, possible for
+	 * all local clients to have released the handle coredata.
+	 */
+	if (IS_ERR_OR_NULL(synx_obj)) {
+		dprintk(SYNX_WARN,
+			"handle %d has no local clients\n",
+			h_synx);
+		dprintk(SYNX_MEM, "signal cb destroyed %pK\n",
+			signal_cb);
+		kfree(signal_cb);
+		return;
+	}
+
+	if (rc != SYNX_SUCCESS) {
+		dprintk(SYNX_ERR,
+			"global status update for %u failed=%d\n",
+			h_synx, rc);
+		goto fail;
+	}
+
+	mutex_lock(&synx_obj->obj_lock);
+	if (signal_cb->flag & SYNX_SIGNAL_FROM_IPC &&
+		synx_util_get_object_status(synx_obj) == SYNX_STATE_ACTIVE) {
+		if (synx_util_is_merged_object(synx_obj))
+			rc = synx_native_signal_merged_fence(synx_obj, status);
+		else
+			rc = synx_native_signal_fence(synx_obj, status);
+	}
+
+	if (rc != SYNX_SUCCESS) {
+		mutex_unlock(&synx_obj->obj_lock);
+		dprintk(SYNX_ERR,
+			"failed to signal fence %u with err=%d\n",
+			h_synx, rc);
+		goto fail;
+	}
+
+	if (rc == SYNX_SUCCESS && synx_util_get_object_status(synx_obj)
+		!= SYNX_STATE_ACTIVE)
+		rc = synx_native_signal_core(synx_obj, status,
+			(signal_cb->flag & SYNX_SIGNAL_FROM_CALLBACK) ?
+			true : false, signal_cb->ext_sync_id);
+	mutex_unlock(&synx_obj->obj_lock);
+
+	if (rc != SYNX_SUCCESS)
+		dprintk(SYNX_ERR,
+			"internal signaling %u failed=%d",
+			h_synx, rc);
+
+fail:
+	/* release reference held by signal cb */
+	synx_util_put_object(synx_obj);
+	dprintk(SYNX_MEM, "signal cb destroyed %pK\n", signal_cb);
+	kfree(signal_cb);
+	dprintk(SYNX_VERB, "signal handle %u dispatch complete=%d",
+		h_synx, rc);
+}
+
+/* function would be called from atomic context */
+void synx_fence_callback(struct dma_fence *fence,
+	struct dma_fence_cb *cb)
+{
+	s32 status;
+	struct synx_signal_cb *signal_cb =
+		container_of(cb, struct synx_signal_cb, fence_cb);
+
+	dprintk(SYNX_DBG,
+		"callback from external fence %pK for handle %u\n",
+		fence, signal_cb->handle);
+
+	/* other signal_cb members would be set during cb registration */
+	status = dma_fence_get_status_locked(fence);
+
+	/*
+	 * dma_fence_get_status_locked API returns 1 if signaled,
+	 * 0 if ACTIVE,
+	 * and negative error code in case of any failure
+	 */
+	if (status == 1)
+		status = SYNX_STATE_SIGNALED_SUCCESS;
+	else if (status == -SYNX_STATE_SIGNALED_CANCEL)
+		status = SYNX_STATE_SIGNALED_CANCEL;
+	else if (status < 0 && status >= -SYNX_STATE_SIGNALED_MAX)
+		status = SYNX_STATE_SIGNALED_EXTERNAL;
+	else
+		status = (u32)-status;
+
+	signal_cb->status = status;
+
+	INIT_WORK(&signal_cb->cb_dispatch, synx_signal_handler);
+	queue_work(synx_dev->wq_cb, &signal_cb->cb_dispatch);
+}
+EXPORT_SYMBOL(synx_fence_callback);
+
+static int synx_signal_offload_job(
+	struct synx_client *client,
+	struct synx_coredata *synx_obj,
+	u32 h_synx, u32 status)
+{
+	int rc = SYNX_SUCCESS;
+	struct synx_signal_cb *signal_cb;
+
+	signal_cb = kzalloc(sizeof(*signal_cb), GFP_ATOMIC);
+	if (IS_ERR_OR_NULL(signal_cb)) {
+		rc = -SYNX_NOMEM;
+		goto fail;
+	}
+
+	/*
+	 * since the signal will be queued to separate thread,
+	 * to ensure the synx coredata pointer remain valid, get
+	 * additional reference, thus avoiding any potential
+	 * use-after-free.
+	 */
+	synx_util_get_object(synx_obj);
+
+	signal_cb->handle = h_synx;
+	signal_cb->status = status;
+	signal_cb->synx_obj = synx_obj;
+	signal_cb->flag = SYNX_SIGNAL_FROM_CLIENT;
+
+	dprintk(SYNX_VERB,
+		"[sess :%llu] signal work queued for %u\n",
+		client->id, h_synx);
+
+	INIT_WORK(&signal_cb->cb_dispatch, synx_signal_handler);
+	queue_work(synx_dev->wq_cb, &signal_cb->cb_dispatch);
+
+fail:
+	return rc;
+}
+
+int synx_signal(struct synx_session *session, u32 h_synx, u32 status)
+{
+	int rc = SYNX_SUCCESS;
+	struct synx_client *client;
+	struct synx_handle_coredata *synx_data = NULL;
+	struct synx_coredata *synx_obj;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	if (status <= SYNX_STATE_ACTIVE ||
+			!(status == SYNX_STATE_SIGNALED_SUCCESS ||
+			status == SYNX_STATE_SIGNALED_CANCEL ||
+			status > SYNX_STATE_SIGNALED_MAX)) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] signaling with wrong status: %u\n",
+			client->id, status);
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	synx_data = synx_util_acquire_handle(client, h_synx);
+	synx_obj = synx_util_obtain_object(synx_data);
+	if (IS_ERR_OR_NULL(synx_obj) ||
+			IS_ERR_OR_NULL(synx_obj->fence)) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] invalid handle access %u\n",
+			client->id, h_synx);
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	mutex_lock(&synx_obj->obj_lock);
+	if (synx_util_is_global_handle(h_synx) ||
+			synx_util_is_global_object(synx_obj))
+		rc = synx_global_update_status(
+				synx_obj->global_idx, status);
+
+	if (rc != SYNX_SUCCESS) {
+		mutex_unlock(&synx_obj->obj_lock);
+		dprintk(SYNX_ERR,
+			"[sess :%llu] status update %d failed=%d\n",
+			client->id, h_synx, rc);
+		goto fail;
+	}
+
+	/*
+	 * offload callback dispatch and external fence
+	 * notification to separate worker thread, if any.
+	 */
+	if (synx_obj->num_bound_synxs ||
+			!list_empty(&synx_obj->reg_cbs_list))
+		rc = synx_signal_offload_job(client, synx_obj,
+				h_synx, status);
+
+	rc = synx_native_signal_fence(synx_obj, status);
+	if (rc != SYNX_SUCCESS)
+		dprintk(SYNX_ERR,
+			"[sess :%llu] signaling %u failed=%d\n",
+			client->id, h_synx, rc);
+	mutex_unlock(&synx_obj->obj_lock);
+
+fail:
+	synx_util_release_handle(synx_data);
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_signal);
+
+static int synx_match_payload(struct synx_kernel_payload *cb_payload,
+	struct synx_kernel_payload *payload)
+{
+	int rc = 0;
+
+	if (IS_ERR_OR_NULL(cb_payload) || IS_ERR_OR_NULL(payload))
+		return -SYNX_INVALID;
+
+	if ((cb_payload->cb_func == payload->cb_func) &&
+			(cb_payload->data == payload->data)) {
+		if (payload->cancel_cb_func) {
+			cb_payload->cb_func =
+				payload->cancel_cb_func;
+			rc = 1;
+		} else {
+			rc = 2;
+			dprintk(SYNX_VERB,
+				"kernel cb de-registration success\n");
+		}
+	}
+
+	return rc;
+}
+
+/* Timer Callback function. This will be called when timer expires */
+void synx_timer_cb(struct timer_list *data)
+{
+	struct synx_client *client;
+	struct synx_handle_coredata *synx_data;
+	struct synx_coredata *synx_obj;
+	struct synx_cb_data *synx_cb = container_of(data, struct synx_cb_data, synx_timer);
+
+	client = synx_get_client(synx_cb->session);
+	if (IS_ERR_OR_NULL(client)) {
+		dprintk(SYNX_ERR,
+			"invalid session data 0x%x in cb payload\n",
+			synx_cb->session);
+		return;
+	}
+	synx_data = synx_util_acquire_handle(client, synx_cb->h_synx);
+	synx_obj = synx_util_obtain_object(synx_data);
+	if (IS_ERR_OR_NULL(synx_obj)) {
+		dprintk(SYNX_ERR,
+			"[sess :0x%llx] invalid handle access 0x%x\n",
+			synx_cb->session, synx_cb->h_synx);
+		return;
+	}
+	dprintk(SYNX_VERB,
+		"Timer expired for synx_cb 0x%x timeout 0x%llx. Deleting the timer.\n",
+		synx_cb, synx_cb->timeout);
+
+	synx_cb->status = SYNX_STATE_TIMEOUT;
+	del_timer(&synx_cb->synx_timer);
+	list_del_init(&synx_cb->node);
+	queue_work(synx_dev->wq_cb, &synx_cb->cb_dispatch);
+}
+
+static int synx_start_timer(struct synx_cb_data *synx_cb)
+{
+	int rc = 0;
+
+	timer_setup(&synx_cb->synx_timer, synx_timer_cb, 0);
+	rc = mod_timer(&synx_cb->synx_timer, jiffies + msecs_to_jiffies(synx_cb->timeout));
+	dprintk(SYNX_VERB,
+		"Timer started for synx_cb 0x%x timeout 0x%llx\n",
+		synx_cb, synx_cb->timeout);
+	return rc;
+}
+
+
+int synx_async_wait(struct synx_session *session,
+	struct synx_callback_params *params)
+{
+	int rc = 0;
+	u32 idx;
+	u32 status;
+	struct synx_client *client;
+	struct synx_handle_coredata *synx_data;
+	struct synx_coredata *synx_obj;
+	struct synx_cb_data *synx_cb;
+	struct synx_kernel_payload payload;
+
+	if (IS_ERR_OR_NULL(session) || IS_ERR_OR_NULL(params))
+		return -SYNX_INVALID;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	synx_data = synx_util_acquire_handle(client, params->h_synx);
+	synx_obj = synx_util_obtain_object(synx_data);
+	if (IS_ERR_OR_NULL(synx_obj)) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] invalid handle access %u\n",
+			client->id, params->h_synx);
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	mutex_lock(&synx_obj->obj_lock);
+	synx_cb = kzalloc(sizeof(*synx_cb), GFP_ATOMIC);
+	if (IS_ERR_OR_NULL(synx_cb)) {
+		rc = -SYNX_NOMEM;
+		goto release;
+	}
+
+	payload.h_synx = params->h_synx;
+	payload.cb_func = params->cb_func;
+	payload.data = params->userdata;
+
+	/* allocate a free index from client cb table */
+	rc = synx_util_alloc_cb_entry(client, &payload, &idx);
+	if (rc) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] error allocating cb entry\n",
+			client->id);
+		kfree(synx_cb);
+		goto release;
+	}
+
+	if (synx_util_is_global_handle(params->h_synx) ||
+			synx_util_is_global_object(synx_obj)) {
+		status = synx_global_test_status_set_wait(
+					synx_util_global_idx(params->h_synx),
+					SYNX_CORE_APSS);
+		if (status != SYNX_STATE_ACTIVE) {
+			if (synx_util_is_merged_object(synx_obj))
+				synx_native_signal_merged_fence(synx_obj, status);
+			else
+				synx_native_signal_fence(synx_obj, status);
+		}
+	}
+
+	status = synx_util_get_object_status(synx_obj);
+
+	synx_cb->session = session;
+	synx_cb->idx = idx;
+	synx_cb->h_synx = params->h_synx;
+
+	INIT_WORK(&synx_cb->cb_dispatch, synx_util_cb_dispatch);
+
+	/* add callback if object still ACTIVE, dispatch if SIGNALED */
+	if (status == SYNX_STATE_ACTIVE) {
+		dprintk(SYNX_VERB,
+			"[sess :%llu] callback added for handle %u\n",
+			client->id, params->h_synx);
+		synx_cb->timeout = params->timeout_ms;
+		if (params->timeout_ms != SYNX_NO_TIMEOUT) {
+			rc = synx_start_timer(synx_cb);
+			if (rc != SYNX_SUCCESS) {
+				dprintk(SYNX_ERR,
+					"[sess :%llu] timer start failed - synx_cb: 0x%x, params->timeout_ms: 0x%llx, handle: 0x%x, ret : %d\n",
+					client->id, synx_cb, params->timeout_ms,
+					params->h_synx, rc);
+				goto release;
+			}
+		}
+		list_add(&synx_cb->node, &synx_obj->reg_cbs_list);
+	} else {
+		synx_cb->status = status;
+		dprintk(SYNX_VERB,
+			"[sess :%llu] callback queued for handle %u\n",
+			client->id, params->h_synx);
+		queue_work(synx_dev->wq_cb,
+			&synx_cb->cb_dispatch);
+	}
+
+release:
+	mutex_unlock(&synx_obj->obj_lock);
+fail:
+	synx_util_release_handle(synx_data);
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_async_wait);
+
+int synx_cancel_async_wait(
+	struct synx_session *session,
+	struct synx_callback_params *params)
+{
+	int rc = 0, ret = 0;
+	u32 status;
+	bool match_found = false;
+	struct synx_client *client;
+	struct synx_handle_coredata *synx_data;
+	struct synx_coredata *synx_obj;
+	struct synx_kernel_payload payload;
+	struct synx_cb_data *synx_cb, *synx_cb_temp;
+	struct synx_client_cb *cb_payload;
+
+	if (IS_ERR_OR_NULL(session) || IS_ERR_OR_NULL(params))
+		return -SYNX_INVALID;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	synx_data = synx_util_acquire_handle(client, params->h_synx);
+	synx_obj = synx_util_obtain_object(synx_data);
+	if (IS_ERR_OR_NULL(synx_obj)) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] invalid handle access %u\n",
+			client->id, params->h_synx);
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	mutex_lock(&synx_obj->obj_lock);
+	if (synx_util_is_external_object(synx_obj)) {
+		dprintk(SYNX_ERR,
+			"cannot cancel wait on external fence\n");
+		goto release;
+	}
+
+	payload.h_synx = params->h_synx;
+	payload.cb_func = params->cb_func;
+	payload.data = params->userdata;
+	payload.cancel_cb_func = params->cancel_cb_func;
+
+	status = synx_util_get_object_status(synx_obj);
+	if (status != SYNX_STATE_ACTIVE) {
+		dprintk(SYNX_ERR,
+			"handle %u already signaled or timed out, cannot cancel\n",
+			params->h_synx);
+		rc = -SYNX_INVALID;
+		goto release;
+	}
+
+	status = SYNX_CALLBACK_RESULT_CANCELED;
+	/* remove all cb payloads mayching the deregister call */
+	list_for_each_entry_safe(synx_cb, synx_cb_temp,
+			&synx_obj->reg_cbs_list, node) {
+		if (synx_cb->session != session) {
+			continue;
+		} else if (synx_cb->idx == 0 ||
+			synx_cb->idx >= SYNX_MAX_OBJS) {
+			/*
+			 * this should not happen. Even if it does,
+			 * the allocated memory will be cleaned up
+			 * when object is destroyed, preventing any
+			 * memory leaks.
+			 */
+			dprintk(SYNX_ERR,
+				"[sess :%llu] invalid callback data\n",
+				client->id);
+			continue;
+		}
+
+		cb_payload = &client->cb_table[synx_cb->idx];
+		ret = synx_match_payload(&cb_payload->kernel_cb, &payload);
+		if (synx_cb->timeout != SYNX_NO_TIMEOUT) {
+			dprintk(SYNX_VERB,
+				"Deleting timer synx_cb 0x%x, timeout 0x%llx\n",
+				synx_cb, synx_cb->timeout);
+			del_timer(&synx_cb->synx_timer);
+		}
+		switch (ret) {
+		case 1:
+			/* queue the cancel cb work */
+			list_del_init(&synx_cb->node);
+			synx_cb->status = status;
+			queue_work(synx_dev->wq_cb,
+				&synx_cb->cb_dispatch);
+			match_found = true;
+			break;
+		case 2:
+			/* no cancellation cb */
+			if (synx_util_clear_cb_entry(client, cb_payload))
+				dprintk(SYNX_ERR,
+				"[sess :%llu] error clearing cb %u\n",
+				client->id, params->h_synx);
+			list_del_init(&synx_cb->node);
+			kfree(synx_cb);
+			match_found = true;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (!match_found)
+		rc = -SYNX_INVALID;
+
+release:
+	mutex_unlock(&synx_obj->obj_lock);
+fail:
+	synx_util_release_handle(synx_data);
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_cancel_async_wait);
+
+int synx_merge(struct synx_session *session,
+	struct synx_merge_params *params)
+{
+	int rc = SYNX_SUCCESS, i, num_signaled = 0;
+	u32 count = 0, h_child, status = SYNX_STATE_ACTIVE;
+	u32 *h_child_list = NULL, *h_child_idx_list = NULL;
+	struct synx_client *client;
+	struct dma_fence **fences = NULL;
+	struct synx_map_entry *map_entry;
+	struct synx_coredata *synx_obj, *synx_obj_child;
+	struct synx_handle_coredata *synx_data_child;
+
+	if (IS_ERR_OR_NULL(session) || IS_ERR_OR_NULL(params))
+		return -SYNX_INVALID;
+
+	if (IS_ERR_OR_NULL(params->h_synxs) ||
+		IS_ERR_OR_NULL(params->h_merged_obj)) {
+		dprintk(SYNX_ERR, "invalid arguments\n");
+		return -SYNX_INVALID;
+	}
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	synx_obj = kzalloc(sizeof(*synx_obj), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(synx_obj)) {
+		rc = -SYNX_NOMEM;
+		goto fail;
+	}
+
+	rc = synx_util_validate_merge(client, params->h_synxs,
+			params->num_objs, &fences, &count);
+	if (rc < 0) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] merge validation failed\n",
+			client->id);
+		rc = -SYNX_INVALID;
+
+		kfree(synx_obj);
+		goto fail;
+	}
+
+	rc = synx_util_init_group_coredata(synx_obj, fences,
+			params, count, client->dma_context);
+	if (rc) {
+		dprintk(SYNX_ERR,
+		"[sess :%llu] error initializing merge handle\n",
+			client->id);
+		goto clean_up;
+	}
+
+	map_entry = synx_util_insert_to_map(synx_obj,
+					*params->h_merged_obj, 0);
+	if (IS_ERR_OR_NULL(map_entry)) {
+		rc = PTR_ERR(map_entry);
+
+		/*
+		 * dma fence put will take care of removing the references taken
+		 * on child fences
+		 */
+		dma_fence_put(synx_obj->fence);
+		kfree(synx_obj);
+		goto fail;
+	}
+
+	rc = synx_util_add_callback(synx_obj, *params->h_merged_obj);
+
+	if (rc != SYNX_SUCCESS)
+		goto clean_up;
+
+	rc = synx_util_init_handle(client, synx_obj,
+			params->h_merged_obj, map_entry);
+	if (rc) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] unable to init merge handle %u\n",
+			client->id, *params->h_merged_obj);
+		goto clean_up;
+	}
+
+	h_child_list = kzalloc(count*4, GFP_KERNEL);
+	if (IS_ERR_OR_NULL(h_child_list)) {
+		rc = -SYNX_NOMEM;
+		goto clear;
+	}
+
+	h_child_idx_list = kzalloc(count*4, GFP_KERNEL);
+	if (IS_ERR_OR_NULL(h_child_idx_list)) {
+		kfree(h_child_list);
+		rc = -SYNX_NOMEM;
+		goto clear;
+	}
+
+	for (i = 0; i < count; i++) {
+		h_child = synx_util_get_fence_entry((u64)fences[i], 1);
+		if (!synx_util_is_global_handle(h_child))
+			continue;
+
+		h_child_list[num_signaled] = h_child;
+		h_child_idx_list[num_signaled++] = synx_util_global_idx(h_child);
+	}
+
+	if (params->flags & SYNX_MERGE_GLOBAL_FENCE) {
+		rc = synx_global_merge(h_child_idx_list, num_signaled,
+			synx_util_global_idx(*params->h_merged_obj));
+		if (rc != SYNX_SUCCESS) {
+			dprintk(SYNX_ERR, "global merge failed\n");
+			kfree(h_child_list);
+			kfree(h_child_idx_list);
+			goto clear;
+		}
+	}
+	else {
+		for(i = 0; i < num_signaled; i++) {
+			status = synx_global_test_status_set_wait(synx_util_global_idx(h_child_list[i]), SYNX_CORE_APSS);
+
+			if (status != SYNX_STATE_ACTIVE) {
+				synx_data_child = synx_util_acquire_handle(client, h_child_list[i]);
+				synx_obj_child = synx_util_obtain_object(synx_data_child);
+
+				if (IS_ERR_OR_NULL(synx_obj_child)) {
+					dprintk(SYNX_ERR,
+						"[sess :%llu] invalid child handle %u\n",
+						client->id, h_child_list[i]);
+					continue;
+				}
+				mutex_lock(&synx_obj_child->obj_lock);
+				if (synx_obj->status == SYNX_STATE_ACTIVE)
+					rc = synx_native_signal_fence(synx_obj_child, status);
+				mutex_unlock(&synx_obj_child->obj_lock);
+				if (rc != SYNX_SUCCESS)
+					dprintk(SYNX_ERR, "h_synx %u failed with status %d\n", h_child_list[i], rc);
+
+				synx_util_release_handle(synx_data_child);
+			}
+		}
+	}
+
+	dprintk(SYNX_MEM,
+		"[sess :%llu] merge allocated %u, core %pK, fence %pK\n",
+		client->id, *params->h_merged_obj, synx_obj,
+		synx_obj->fence);
+	kfree(h_child_list);
+	kfree(h_child_idx_list);
+	synx_put_client(client);
+	return SYNX_SUCCESS;
+clear:
+	synx_native_release_core(client, (*params->h_merged_obj));
+	synx_put_client(client);
+	return rc;
+
+clean_up:
+	/*
+	 * if map_entry is not created the cleanup of child fences have to be
+	 * handled manually
+	 */
+	if (IS_ERR_OR_NULL(map_entry)) {
+		kfree(synx_obj);
+		synx_util_merge_error(client, params->h_synxs, count);
+		if (params->num_objs && params->num_objs <= count)
+			kfree(fences);
+
+	} else {
+		synx_util_release_map_entry(map_entry);
+	}
+fail:
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_merge);
+
+int synx_native_release_core(struct synx_client *client,
+	u32 h_synx)
+{
+	int rc = -SYNX_INVALID;
+	struct synx_handle_coredata *curr, *synx_handle = NULL;
+
+	spin_lock_bh(&client->handle_map_lock);
+	hash_for_each_possible(client->handle_map,
+			curr, node, h_synx) {
+		if (curr->key == h_synx &&
+			curr->rel_count != 0) {
+			curr->rel_count--;
+			synx_handle = curr;
+			rc = SYNX_SUCCESS;
+			break;
+		}
+	}
+	spin_unlock_bh(&client->handle_map_lock);
+
+	/* release the reference obtained at synx creation */
+	synx_util_release_handle(synx_handle);
+
+	return rc;
+}
+
+int synx_release(struct synx_session *session, u32 h_synx)
+{
+	int rc = 0;
+	struct synx_client *client;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	rc = synx_native_release_core(client, h_synx);
+
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_release);
+
+int synx_wait(struct synx_session *session,
+	u32 h_synx, u64 timeout_ms)
+{
+	int rc = 0;
+	unsigned long timeleft;
+	struct synx_client *client;
+	struct synx_handle_coredata *synx_data;
+	struct synx_coredata *synx_obj;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	synx_data = synx_util_acquire_handle(client, h_synx);
+	synx_obj = synx_util_obtain_object(synx_data);
+	if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(synx_obj->fence)) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] invalid handle access %u\n",
+			client->id, h_synx);
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	if (synx_util_is_global_handle(h_synx)) {
+		rc = synx_global_test_status_set_wait(
+			synx_util_global_idx(h_synx), SYNX_CORE_APSS);
+		if (rc != SYNX_STATE_ACTIVE) {
+			mutex_lock(&synx_obj->obj_lock);
+			if (synx_util_is_merged_object(synx_obj))
+				synx_native_signal_merged_fence(synx_obj, rc);
+			else
+				synx_native_signal_fence(synx_obj, rc);
+			mutex_unlock(&synx_obj->obj_lock);
+		}
+	}
+
+	timeleft = dma_fence_wait_timeout(synx_obj->fence, (bool) 0,
+					msecs_to_jiffies(timeout_ms));
+	if (timeleft <= 0) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] wait timeout for handle %u\n",
+			client->id, h_synx);
+		rc = -ETIMEDOUT;
+		goto fail;
+	}
+
+	mutex_lock(&synx_obj->obj_lock);
+	rc = synx_util_get_object_status(synx_obj);
+	mutex_unlock(&synx_obj->obj_lock);
+
+fail:
+	synx_util_release_handle(synx_data);
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_wait);
+
+int synx_bind(struct synx_session *session,
+	u32 h_synx,
+	struct synx_external_desc_v2 external_sync)
+{
+	int rc = 0;
+	u32 i;
+	u32 bound_idx;
+	struct synx_client *client;
+	struct synx_handle_coredata *synx_data = NULL;
+	struct synx_coredata *synx_obj;
+	struct synx_signal_cb *data = NULL;
+	struct bind_operations *bind_ops = NULL;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	synx_data = synx_util_acquire_handle(client, h_synx);
+	synx_obj = synx_util_obtain_object(synx_data);
+	if (IS_ERR_OR_NULL(synx_obj)) {
+		if (rc || synx_data)
+			dprintk(SYNX_ERR,
+				"[sess :%llu] invalid handle access %u\n",
+				client->id, h_synx);
+		goto fail;
+	}
+
+	bind_ops = synx_util_get_bind_ops(external_sync.type);
+	if (IS_ERR_OR_NULL(bind_ops)) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] invalid bind ops for %u\n",
+			client->id, external_sync.type);
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	mutex_lock(&synx_obj->obj_lock);
+	if (synx_util_is_merged_object(synx_obj)) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] cannot bind to composite handle %u\n",
+			client->id, h_synx);
+		rc = -SYNX_INVALID;
+		goto release;
+	}
+
+	if (synx_obj->num_bound_synxs >= SYNX_MAX_NUM_BINDINGS) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] max bindings reached for handle %u\n",
+			client->id, h_synx);
+		rc = -SYNX_NOMEM;
+		goto release;
+	}
+
+	/* don't bind external sync obj if already done */
+	for (i = 0; i < synx_obj->num_bound_synxs; i++) {
+		if ((external_sync.id ==
+				synx_obj->bound_synxs[i].external_desc.id) &&
+				(external_sync.type ==
+				synx_obj->bound_synxs[i].external_desc.type)){
+			dprintk(SYNX_ERR,
+				"[sess :%llu] duplicate bind for sync %llu\n",
+				client->id, external_sync.id);
+			rc = -SYNX_ALREADY;
+			goto release;
+		}
+	}
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(data)) {
+		rc = -SYNX_NOMEM;
+		goto release;
+	}
+
+	/* get additional reference since passing pointer to cb */
+	synx_util_get_object(synx_obj);
+
+	/* data passed to external callback */
+	data->handle = h_synx;
+	data->synx_obj = synx_obj;
+
+	bound_idx = synx_obj->num_bound_synxs;
+	memcpy(&synx_obj->bound_synxs[bound_idx],
+		&external_sync, sizeof(struct synx_external_desc_v2));
+	synx_obj->bound_synxs[bound_idx].external_data = data;
+	synx_obj->num_bound_synxs++;
+	mutex_unlock(&synx_obj->obj_lock);
+
+	rc = bind_ops->register_callback(synx_external_callback,
+			data, external_sync.id);
+	if (rc) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] callback reg failed for %llu\n",
+			client->id, external_sync.id);
+		mutex_lock(&synx_obj->obj_lock);
+		memset(&synx_obj->bound_synxs[bound_idx], 0,
+			sizeof(struct synx_external_desc_v2));
+		synx_obj->num_bound_synxs--;
+		mutex_unlock(&synx_obj->obj_lock);
+		synx_util_put_object(synx_obj);
+		kfree(data);
+		goto fail;
+	}
+
+	synx_util_release_handle(synx_data);
+	dprintk(SYNX_DBG,
+		"[sess :%llu] ext sync %llu bound to handle %u\n",
+		client->id, external_sync.id, h_synx);
+	synx_put_client(client);
+	return SYNX_SUCCESS;
+
+release:
+	mutex_unlock(&synx_obj->obj_lock);
+fail:
+	synx_util_release_handle(synx_data);
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_bind);
+
+int synx_get_status(struct synx_session *session,
+	u32 h_synx)
+{
+	int rc = 0, status = 0;
+	struct synx_client *client;
+	struct synx_handle_coredata *synx_data;
+	struct synx_coredata *synx_obj;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	synx_data = synx_util_acquire_handle(client, h_synx);
+	synx_obj = synx_util_obtain_object(synx_data);
+	if (IS_ERR_OR_NULL(synx_obj) ||
+		IS_ERR_OR_NULL(synx_obj->fence)) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] invalid handle access %u\n",
+			client->id, h_synx);
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	mutex_lock(&synx_obj->obj_lock);
+	status = synx_util_get_object_status(synx_obj);
+	rc = synx_obj->status;
+	mutex_unlock(&synx_obj->obj_lock);
+	dprintk(SYNX_VERB,
+		"[sess :%llu] handle %u synx coredata status %d and dma fence status %d\n",
+		client->id, h_synx, rc, status);
+
+fail:
+	synx_util_release_handle(synx_data);
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_get_status);
+
+static struct synx_map_entry *synx_handle_conversion(
+	struct synx_client *client,
+	u32 *h_synx, struct synx_map_entry *old_entry)
+{
+	int rc;
+	struct synx_map_entry *map_entry = NULL;
+	struct synx_coredata *synx_obj;
+
+	if (IS_ERR_OR_NULL(old_entry)) {
+		old_entry = synx_util_get_map_entry(*h_synx);
+		if (IS_ERR_OR_NULL(old_entry)) {
+			rc = PTR_ERR(old_entry);
+			dprintk(SYNX_ERR,
+				"invalid import handle %u err=%d",
+				*h_synx, rc);
+			return old_entry;
+		}
+	}
+
+	synx_obj = old_entry->synx_obj;
+	BUG_ON(synx_obj == NULL);
+
+	mutex_lock(&synx_obj->obj_lock);
+	synx_util_get_object(synx_obj);
+	if (synx_obj->global_idx != 0) {
+		*h_synx = synx_encode_handle(
+				synx_obj->global_idx, SYNX_CORE_APSS, true);
+
+		map_entry = synx_util_get_map_entry(*h_synx);
+		if (IS_ERR_OR_NULL(map_entry)) {
+			/* raced with release from last global client */
+			map_entry = synx_util_insert_to_map(synx_obj,
+						*h_synx, 0);
+			if (IS_ERR_OR_NULL(map_entry)) {
+				rc = PTR_ERR(map_entry);
+				dprintk(SYNX_ERR,
+					"addition of %u to map failed=%d",
+					*h_synx, rc);
+			}
+		}
+	} else {
+		synx_obj->map_count++;
+		rc = synx_alloc_global_handle(h_synx);
+		if (rc == SYNX_SUCCESS) {
+			synx_obj->global_idx =
+				synx_util_global_idx(*h_synx);
+			synx_obj->type |= SYNX_CREATE_GLOBAL_FENCE;
+
+			map_entry = synx_util_insert_to_map(synx_obj,
+						*h_synx, 0);
+			if (IS_ERR_OR_NULL(map_entry)) {
+				rc = PTR_ERR(map_entry);
+				synx_global_put_ref(
+					synx_util_global_idx(*h_synx));
+				dprintk(SYNX_ERR,
+					"insertion of %u to map failed=%d",
+					*h_synx, rc);
+			}
+		}
+	}
+	mutex_unlock(&synx_obj->obj_lock);
+
+	if (IS_ERR_OR_NULL(map_entry))
+		synx_util_put_object(synx_obj);
+
+	synx_util_release_map_entry(old_entry);
+	return map_entry;
+}
+
+static int synx_native_import_handle(struct synx_client *client,
+	struct synx_import_indv_params *params)
+{
+	int rc = SYNX_SUCCESS;
+	u32 h_synx, core_id;
+	struct synx_map_entry *map_entry, *old_entry;
+	struct synx_coredata *synx_obj;
+	struct synx_handle_coredata *synx_data = NULL, *curr;
+	char name[SYNX_OBJ_NAME_LEN] = {0};
+	struct synx_create_params c_params = {0};
+
+	if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) ||
+		IS_ERR_OR_NULL(params->fence) ||
+		IS_ERR_OR_NULL(params->new_h_synx))
+		return -SYNX_INVALID;
+
+	h_synx = *((u32 *)params->fence);
+
+	/* check if already mapped to client */
+	spin_lock_bh(&client->handle_map_lock);
+	hash_for_each_possible(client->handle_map,
+			curr, node, h_synx) {
+		if (curr->key == h_synx &&
+				curr->rel_count != 0 &&
+				(synx_util_is_global_handle(h_synx) ||
+				params->flags & SYNX_IMPORT_LOCAL_FENCE)) {
+			curr->rel_count++;
+			kref_get(&curr->refcount);
+			synx_data = curr;
+			break;
+		}
+	}
+	spin_unlock_bh(&client->handle_map_lock);
+
+	if (synx_data) {
+		*params->new_h_synx = h_synx;
+		return SYNX_SUCCESS;
+	}
+
+	map_entry = synx_util_get_map_entry(h_synx);
+	if (IS_ERR_OR_NULL(map_entry)) {
+		core_id = (h_synx & SYNX_OBJ_CORE_ID_MASK)
+					>> SYNX_HANDLE_INDEX_BITS;
+		if (core_id == SYNX_CORE_APSS) {
+			dprintk(SYNX_ERR,
+				"[sess :%llu] invalid import handle %u\n",
+				client->id, h_synx);
+			return -SYNX_INVALID;
+		} else if (synx_util_is_global_handle(h_synx)) {
+			/* import global handle created in another core */
+			synx_util_map_import_params_to_create(params, &c_params);
+			scnprintf(name, SYNX_OBJ_NAME_LEN, "import-client-%d",
+				current->pid);
+			c_params.name = name;
+			c_params.h_synx = &h_synx;
+
+			rc = synx_native_create_core(client, &c_params);
+			if (rc != SYNX_SUCCESS)
+				return rc;
+
+			*params->new_h_synx = h_synx;
+			return SYNX_SUCCESS;
+		}
+		dprintk(SYNX_ERR,
+			"[sess :%llu] invalid handle %u\n",
+			client->id, h_synx);
+		return -SYNX_INVALID;
+	}
+
+	synx_obj = map_entry->synx_obj;
+	BUG_ON(synx_obj == NULL);
+
+	if ((params->flags & SYNX_IMPORT_GLOBAL_FENCE) &&
+		!synx_util_is_global_handle(h_synx)) {
+		old_entry = map_entry;
+		map_entry = synx_handle_conversion(client, &h_synx,
+						old_entry);
+	}
+
+	if (IS_ERR_OR_NULL(map_entry))
+		return -SYNX_INVALID;
+
+	*params->new_h_synx = h_synx;
+
+	rc = synx_util_init_handle(client, map_entry->synx_obj,
+		params->new_h_synx, map_entry);
+	if (rc != SYNX_SUCCESS) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] init of imported handle %u failed=%d\n",
+			client->id, h_synx, rc);
+		synx_util_release_map_entry(map_entry);
+	}
+
+	return rc;
+}
+
+static int synx_native_import_fence(struct synx_client *client,
+	struct synx_import_indv_params *params)
+{
+	int rc = SYNX_SUCCESS;
+	u32 curr_h_synx;
+	u32 global;
+	struct synx_create_params c_params = {0};
+	char name[SYNX_OBJ_NAME_LEN] = {0};
+	struct synx_fence_entry *entry;
+	struct synx_map_entry *map_entry = NULL;
+	struct synx_handle_coredata *synx_data = NULL, *curr;
+
+	if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) ||
+			IS_ERR_OR_NULL(params->fence) ||
+			IS_ERR_OR_NULL(params->new_h_synx))
+		return -SYNX_INVALID;
+
+	global = SYNX_IMPORT_GLOBAL_FENCE & params->flags;
+
+retry:
+	*params->new_h_synx =
+		synx_util_get_fence_entry((u64)params->fence, global);
+	if (*params->new_h_synx == 0) {
+		/* create a new synx obj and add to fence map */
+		synx_util_map_import_params_to_create(params, &c_params);
+		scnprintf(name, SYNX_OBJ_NAME_LEN, "import-client-%d",
+			current->pid);
+		c_params.name = name;
+		c_params.h_synx = params->new_h_synx;
+		c_params.fence = params->fence;
+
+		rc = synx_native_create_core(client, &c_params);
+		if (rc != SYNX_SUCCESS)
+			return rc;
+
+		curr_h_synx = *params->new_h_synx;
+
+		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+		if (IS_ERR_OR_NULL(entry)) {
+			rc = -SYNX_NOMEM;
+			curr_h_synx = *c_params.h_synx;
+			goto fail;
+		}
+
+		do {
+			entry->key = (u64)params->fence;
+			if (global)
+				entry->g_handle = *params->new_h_synx;
+			else
+				entry->l_handle = *params->new_h_synx;
+
+			rc = synx_util_insert_fence_entry(entry,
+					params->new_h_synx, global);
+			if (rc == SYNX_SUCCESS) {
+				dprintk(SYNX_DBG,
+					"mapped fence %pK to new handle %u\n",
+					params->fence, *params->new_h_synx);
+				break;
+			} else if (rc == -SYNX_ALREADY) {
+				/*
+				 * release the new handle allocated
+				 * and use the available handle
+				 * already mapped instead.
+				 */
+				map_entry = synx_util_get_map_entry(
+								*params->new_h_synx);
+				if (IS_ERR_OR_NULL(map_entry)) {
+					/* race with fence release, need to retry */
+					dprintk(SYNX_DBG,
+						"re-attempting handle import\n");
+					*params->new_h_synx = curr_h_synx;
+					continue;
+				}
+
+				rc = synx_util_init_handle(client,
+						map_entry->synx_obj,
+						params->new_h_synx, map_entry);
+
+				dprintk(SYNX_DBG, "mapped fence %pK to handle %u\n",
+					params->fence, *params->new_h_synx);
+				goto release;
+			} else {
+				dprintk(SYNX_ERR,
+					"importing fence %pK failed, err=%d\n",
+					params->fence, rc);
+				goto release;
+			}
+		} while (true);
+	} else {
+		/* check if already mapped to client */
+		spin_lock_bh(&client->handle_map_lock);
+		hash_for_each_possible(client->handle_map,
+				curr, node, *params->new_h_synx) {
+			if (curr->key == *params->new_h_synx &&
+					curr->rel_count != 0) {
+				curr->rel_count++;
+				kref_get(&curr->refcount);
+				synx_data = curr;
+				break;
+			}
+		}
+		spin_unlock_bh(&client->handle_map_lock);
+
+		if (synx_data) {
+			dprintk(SYNX_DBG, "mapped fence %pK to handle %u\n",
+				params->fence, *params->new_h_synx);
+			return SYNX_SUCCESS;
+		}
+
+		if (global && !synx_util_is_global_handle(
+				*params->new_h_synx))
+			map_entry = synx_handle_conversion(client,
+				params->new_h_synx, NULL);
+		else
+			map_entry = synx_util_get_map_entry(
+						*params->new_h_synx);
+
+		if (IS_ERR_OR_NULL(map_entry)) {
+			/* race with fence release, need to retry */
+			dprintk(SYNX_DBG, "re-attempting handle import\n");
+			goto retry;
+		}
+
+		rc = synx_util_init_handle(client, map_entry->synx_obj,
+			params->new_h_synx, map_entry);
+
+		dprintk(SYNX_DBG, "mapped fence %pK to existing handle %u\n",
+			params->fence, *params->new_h_synx);
+	}
+
+	return rc;
+
+release:
+	kfree(entry);
+fail:
+	synx_native_release_core(client, curr_h_synx);
+	return rc;
+}
+
+static int synx_native_import_indv(struct synx_client *client,
+	struct synx_import_indv_params *params)
+{
+	int rc = -SYNX_INVALID;
+
+	if (IS_ERR_OR_NULL(params) ||
+		IS_ERR_OR_NULL(params->new_h_synx) ||
+		IS_ERR_OR_NULL(params->fence)) {
+		dprintk(SYNX_ERR, "invalid import arguments\n");
+		return -SYNX_INVALID;
+	}
+
+	if (likely(params->flags & SYNX_IMPORT_DMA_FENCE))
+		rc = synx_native_import_fence(client, params);
+	else if (params->flags & SYNX_IMPORT_SYNX_FENCE)
+		rc = synx_native_import_handle(client, params);
+
+	dprintk(SYNX_DBG,
+		"[sess :%llu] import of fence %pK %s, handle %u\n",
+		client->id, params->fence,
+		rc ? "failed" : "successful",
+		rc ? 0 : *params->new_h_synx);
+
+	return rc;
+}
+
+static int synx_native_import_arr(struct synx_client *client,
+	struct synx_import_arr_params *params)
+{
+	u32 i;
+	int rc = SYNX_SUCCESS;
+
+	if (IS_ERR_OR_NULL(params) || params->num_fences == 0) {
+		dprintk(SYNX_ERR, "invalid import arr arguments\n");
+		return -SYNX_INVALID;
+	}
+
+	for (i = 0; i < params->num_fences; i++) {
+		rc = synx_native_import_indv(client, &params->list[i]);
+		if (rc != SYNX_SUCCESS) {
+			dprintk(SYNX_ERR,
+				"importing fence[%u] %pK failed=%d\n",
+				i, params->list[i].fence, rc);
+			break;
+		}
+	}
+
+	if (rc != SYNX_SUCCESS)
+		while (i--) {
+			/* release the imported handles and cleanup */
+			if (synx_native_release_core(client,
+				*params->list[i].new_h_synx) != SYNX_SUCCESS)
+				dprintk(SYNX_ERR,
+					"error cleaning up imported handle[%u] %u\n",
+					i, *params->list[i].new_h_synx);
+		}
+
+	return rc;
+}
+
+int synx_import(struct synx_session *session,
+	struct synx_import_params *params)
+{
+	int rc = 0;
+	struct synx_client *client;
+
+	if (IS_ERR_OR_NULL(params)) {
+		dprintk(SYNX_ERR, "invalid import arguments\n");
+		return -SYNX_INVALID;
+	}
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	/* import fence based on its type */
+	if (params->type == SYNX_IMPORT_ARR_PARAMS)
+		rc = synx_native_import_arr(client, &params->arr);
+	else
+		rc = synx_native_import_indv(client, &params->indv);
+
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_import);
+
+static int synx_handle_create(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	int result;
+	int csl_fence;
+	struct synx_create_v2 create_info;
+	struct synx_create_params params = {0};
+
+	if (k_ioctl->size != sizeof(create_info))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&create_info,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	params.h_synx = &create_info.synx_obj;
+	params.name = create_info.name;
+	params.flags = create_info.flags;
+	if (create_info.flags & SYNX_CREATE_CSL_FENCE) {
+		csl_fence = create_info.desc.id[0];
+		params.fence = &csl_fence;
+	}
+	result = synx_create(session, &params);
+
+	if (!result)
+		if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
+				&create_info,
+				k_ioctl->size))
+			return -EFAULT;
+
+	return result;
+}
+
+static int synx_handle_getstatus(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	struct synx_signal_v2 signal_info;
+
+	if (k_ioctl->size != sizeof(signal_info))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&signal_info,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	signal_info.synx_state =
+		synx_get_status(session, signal_info.synx_obj);
+
+	if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			&signal_info,
+			k_ioctl->size))
+		return -EFAULT;
+
+	return SYNX_SUCCESS;
+}
+
+static int synx_handle_import(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	struct synx_import_info import_info;
+	struct synx_import_params params = {0};
+	int result = SYNX_SUCCESS;
+
+	if (k_ioctl->size != sizeof(import_info))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&import_info,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	if (import_info.flags & SYNX_IMPORT_DMA_FENCE)
+		params.indv.fence =
+			sync_file_get_fence(import_info.desc.id[0]);
+	else if (import_info.flags & SYNX_IMPORT_SYNX_FENCE)
+		params.indv.fence = &import_info.synx_obj;
+
+	params.type = SYNX_IMPORT_INDV_PARAMS;
+	params.indv.flags = import_info.flags;
+	params.indv.new_h_synx = &import_info.new_synx_obj;
+
+	if (synx_import(session, &params))
+		result = -SYNX_INVALID;
+
+	// Fence needs to be put irresepctive of import status
+	if (import_info.flags & SYNX_IMPORT_DMA_FENCE)
+		dma_fence_put(params.indv.fence);
+
+	if (result != SYNX_SUCCESS)
+		return result;
+
+	if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			&import_info,
+			k_ioctl->size))
+		return -EFAULT;
+
+	return result;
+}
+
+static int synx_handle_import_arr(
+	struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	int rc = -SYNX_INVALID;
+	u32 idx = 0;
+	struct synx_client *client;
+	struct synx_import_arr_info arr_info;
+	struct synx_import_info *arr;
+	struct synx_import_indv_params params = {0};
+
+	if (k_ioctl->size != sizeof(arr_info))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&arr_info,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	arr = kcalloc(arr_info.num_objs,
+				sizeof(*arr), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(arr))
+		return -ENOMEM;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client)) {
+		rc = PTR_ERR(client);
+		goto clean;
+	}
+
+	if (copy_from_user(arr,
+			u64_to_user_ptr(arr_info.list),
+			sizeof(*arr) * arr_info.num_objs)) {
+		rc = -EFAULT;
+		goto fail;
+	}
+
+	while (idx < arr_info.num_objs) {
+		params.new_h_synx = &arr[idx].new_synx_obj;
+		params.flags = arr[idx].flags;
+
+		if (arr[idx].flags & SYNX_IMPORT_DMA_FENCE)
+			params.fence =
+				sync_file_get_fence(arr[idx].desc.id[0]);
+		else if (arr[idx].flags & SYNX_IMPORT_SYNX_FENCE)
+			params.fence = &arr[idx].synx_obj;
+
+		rc = synx_native_import_indv(client, &params);
+
+		// Fence needs to be put irresepctive of import status
+		if (arr[idx].flags & SYNX_IMPORT_DMA_FENCE)
+			dma_fence_put(params.fence);
+
+		if (rc != SYNX_SUCCESS)
+			break;
+		idx++;
+	}
+
+	/* release allocated handles in case of failure */
+	if (rc != SYNX_SUCCESS) {
+		while (idx > 0)
+			synx_native_release_core(client,
+				arr[--idx].new_synx_obj);
+	} else {
+		if (copy_to_user(u64_to_user_ptr(arr_info.list),
+			arr,
+			sizeof(*arr) * arr_info.num_objs)) {
+			rc = -EFAULT;
+			goto fail;
+		}
+	}
+
+fail:
+	synx_put_client(client);
+clean:
+	kfree(arr);
+	return rc;
+}
+
+static int synx_handle_export(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	return -SYNX_INVALID;
+}
+
+static int synx_handle_signal(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	struct synx_signal_v2 signal_info;
+
+	if (k_ioctl->size != sizeof(signal_info))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&signal_info,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	return synx_signal(session, signal_info.synx_obj,
+		signal_info.synx_state);
+}
+
+static int synx_handle_merge(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	u32 *h_synxs;
+	int result;
+	struct synx_merge_v2 merge_info;
+	struct synx_merge_params params = {0};
+
+	if (k_ioctl->size != sizeof(merge_info))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&merge_info,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	if (merge_info.num_objs >= SYNX_MAX_OBJS)
+		return -SYNX_INVALID;
+
+	h_synxs = kcalloc(merge_info.num_objs,
+				sizeof(*h_synxs), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(h_synxs))
+		return -ENOMEM;
+
+	if (copy_from_user(h_synxs,
+			u64_to_user_ptr(merge_info.synx_objs),
+			sizeof(u32) * merge_info.num_objs)) {
+		kfree(h_synxs);
+		return -EFAULT;
+	}
+
+	params.num_objs = merge_info.num_objs;
+	params.h_synxs = h_synxs;
+	params.flags = merge_info.flags;
+	params.h_merged_obj = &merge_info.merged;
+
+	result = synx_merge(session, &params);
+	if (!result)
+		if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
+				&merge_info,
+				k_ioctl->size)) {
+			kfree(h_synxs);
+			return -EFAULT;
+	}
+
+	kfree(h_synxs);
+	return result;
+}
+
+static int synx_handle_wait(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	struct synx_wait_v2 wait_info;
+
+	if (k_ioctl->size != sizeof(wait_info))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&wait_info,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	k_ioctl->result = synx_wait(session,
+		wait_info.synx_obj, wait_info.timeout_ms);
+
+	return SYNX_SUCCESS;
+}
+
+static int synx_handle_async_wait(
+	struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	int rc = 0;
+	struct synx_userpayload_info_v2 user_data;
+	struct synx_callback_params params = {0};
+
+	if (k_ioctl->size != sizeof(user_data))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&user_data,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	params.h_synx = user_data.synx_obj;
+	params.cb_func = synx_util_default_user_callback;
+	params.userdata = (void *)user_data.payload[0];
+	params.timeout_ms = user_data.payload[2];
+
+	rc = synx_async_wait(session, &params);
+	if (rc)
+		dprintk(SYNX_ERR,
+			"user cb registration failed for handle %d\n",
+			user_data.synx_obj);
+
+	return rc;
+}
+
+static int synx_handle_cancel_async_wait(
+	struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	int rc = 0;
+	struct synx_userpayload_info_v2 user_data;
+	struct synx_callback_params params = {0};
+
+	if (k_ioctl->size != sizeof(user_data))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&user_data,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	params.h_synx = user_data.synx_obj;
+	params.cb_func = synx_util_default_user_callback;
+	params.userdata = (void *)user_data.payload[0];
+
+	rc = synx_cancel_async_wait(session, &params);
+	if (rc)
+		dprintk(SYNX_ERR,
+			"user cb deregistration failed for handle %d\n",
+			user_data.synx_obj);
+
+	return rc;
+}
+
+static int synx_handle_bind(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	struct synx_bind_v2 synx_bind_info;
+
+	if (k_ioctl->size != sizeof(synx_bind_info))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&synx_bind_info,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	k_ioctl->result = synx_bind(session,
+		synx_bind_info.synx_obj,
+		synx_bind_info.ext_sync_desc);
+
+	return k_ioctl->result;
+}
+
+static int synx_handle_release(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	struct synx_info release_info;
+
+	if (k_ioctl->size != sizeof(release_info))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&release_info,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	return synx_release(session, release_info.synx_obj);
+}
+
+static int synx_handle_get_fence(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	struct synx_fence_fd fence_fd;
+	struct dma_fence *fence;
+
+	if (k_ioctl->size != sizeof(fence_fd))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&fence_fd,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	fence = synx_get_fence(session, fence_fd.synx_obj);
+	fence_fd.fd = synx_create_sync_fd(fence);
+	/*
+	 * release additional reference taken in synx_get_fence.
+	 * additional reference ensures the fence is valid and
+	 * does not race with handle/fence release.
+	 */
+	dma_fence_put(fence);
+
+	if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			&fence_fd, k_ioctl->size))
+		return -EFAULT;
+
+	return SYNX_SUCCESS;
+}
+
+static long synx_ioctl(struct file *filep,
+	unsigned int cmd,
+	unsigned long arg)
+{
+	s32 rc = 0;
+	struct synx_private_ioctl_arg k_ioctl;
+	struct synx_session *session = filep->private_data;
+
+	if (cmd != SYNX_PRIVATE_IOCTL_CMD) {
+		dprintk(SYNX_ERR, "invalid ioctl cmd\n");
+		return -ENOIOCTLCMD;
+	}
+
+	if (copy_from_user(&k_ioctl,
+			(struct synx_private_ioctl_arg *)arg,
+			sizeof(k_ioctl))) {
+		dprintk(SYNX_ERR, "invalid ioctl args\n");
+		return -EFAULT;
+	}
+
+	if (!k_ioctl.ioctl_ptr)
+		return -SYNX_INVALID;
+
+	dprintk(SYNX_VERB, "[sess :%llu] Enter cmd %u from pid %d\n",
+		((struct synx_client *)session)->id,
+		k_ioctl.id, current->pid);
+
+	switch (k_ioctl.id) {
+	case SYNX_CREATE:
+		rc = synx_handle_create(&k_ioctl, session);
+		break;
+	case SYNX_RELEASE:
+		rc = synx_handle_release(&k_ioctl, session);
+		break;
+	case SYNX_REGISTER_PAYLOAD:
+		rc = synx_handle_async_wait(&k_ioctl,
+				session);
+		break;
+	case SYNX_DEREGISTER_PAYLOAD:
+		rc = synx_handle_cancel_async_wait(&k_ioctl,
+				session);
+		break;
+	case SYNX_SIGNAL:
+		rc = synx_handle_signal(&k_ioctl, session);
+		break;
+	case SYNX_MERGE:
+		rc = synx_handle_merge(&k_ioctl, session);
+		break;
+	case SYNX_WAIT:
+		rc = synx_handle_wait(&k_ioctl, session);
+		if (copy_to_user((void *)arg,
+			&k_ioctl,
+			sizeof(k_ioctl))) {
+			dprintk(SYNX_ERR, "invalid ioctl args\n");
+			rc = -EFAULT;
+		}
+		break;
+	case SYNX_BIND:
+		rc = synx_handle_bind(&k_ioctl, session);
+		break;
+	case SYNX_GETSTATUS:
+		rc = synx_handle_getstatus(&k_ioctl, session);
+		break;
+	case SYNX_IMPORT:
+		rc = synx_handle_import(&k_ioctl, session);
+		break;
+	case SYNX_IMPORT_ARR:
+		rc = synx_handle_import_arr(&k_ioctl, session);
+		break;
+	case SYNX_EXPORT:
+		rc = synx_handle_export(&k_ioctl, session);
+		break;
+	case SYNX_GETFENCE_FD:
+		rc = synx_handle_get_fence(&k_ioctl, session);
+		break;
+	default:
+		rc = -SYNX_INVALID;
+	}
+
+	dprintk(SYNX_VERB, "[sess :%llu] exit with status %d\n",
+		((struct synx_client *)session)->id, rc);
+
+	return rc;
+}
+
+static ssize_t synx_read(struct file *filep,
+	char __user *buf, size_t size, loff_t *f_pos)
+{
+	ssize_t rc = 0;
+	struct synx_client *client = NULL;
+	struct synx_client_cb *cb;
+	struct synx_session *session = filep->private_data;
+	struct synx_userpayload_info_v2 data;
+
+	if (size != sizeof(struct synx_userpayload_info_v2)) {
+		dprintk(SYNX_ERR, "invalid read size\n");
+		return -SYNX_INVALID;
+	}
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	mutex_lock(&client->event_q_lock);
+	cb = list_first_entry_or_null(&client->event_q,
+			struct synx_client_cb, node);
+	if (IS_ERR_OR_NULL(cb)) {
+		mutex_unlock(&client->event_q_lock);
+		rc = 0;
+		goto fail;
+	}
+
+	if (cb->idx == 0 || cb->idx >= SYNX_MAX_OBJS) {
+		dprintk(SYNX_ERR, "invalid index\n");
+		mutex_unlock(&client->event_q_lock);
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	list_del_init(&cb->node);
+	mutex_unlock(&client->event_q_lock);
+	memset(&data, 0, sizeof(struct synx_userpayload_info_v2));
+
+	rc = size;
+	data.synx_obj = cb->kernel_cb.h_synx;
+	data.reserved = cb->kernel_cb.status;
+	data.payload[0] = (u64)cb->kernel_cb.data;
+	if (copy_to_user(buf,
+			&data,
+			sizeof(struct synx_userpayload_info_v2))) {
+		dprintk(SYNX_ERR, "couldn't copy user callback data\n");
+		rc = -EFAULT;
+	}
+
+	if (synx_util_clear_cb_entry(client, cb))
+		dprintk(SYNX_ERR,
+			"[sess :%llu] error clearing cb for handle %u\n",
+			client->id, data.synx_obj);
+fail:
+	synx_put_client(client);
+	return rc;
+}
+
+static unsigned int synx_poll(struct file *filep,
+	struct poll_table_struct *poll_table)
+{
+	int rc = 0;
+	struct synx_client *client;
+	struct synx_session *session = filep->private_data;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client)) {
+		dprintk(SYNX_ERR, "invalid session in poll\n");
+		return SYNX_SUCCESS;
+	}
+
+	poll_wait(filep, &client->event_wq, poll_table);
+	mutex_lock(&client->event_q_lock);
+	if (!list_empty(&client->event_q))
+		rc = POLLPRI;
+	mutex_unlock(&client->event_q_lock);
+
+	synx_put_client(client);
+	return rc;
+}
+
+struct synx_session *synx_initialize(
+	struct synx_initialization_params *params)
+{
+	struct synx_client *client;
+
+	if (IS_ERR_OR_NULL(params))
+		return ERR_PTR(-SYNX_INVALID);
+
+	client = vzalloc(sizeof(*client));
+	if (IS_ERR_OR_NULL(client))
+		return ERR_PTR(-SYNX_NOMEM);
+
+	if (params->name)
+		strlcpy(client->name, params->name, sizeof(client->name));
+
+	client->active = true;
+	client->dma_context = dma_fence_context_alloc(1);
+	client->id = atomic64_inc_return(&synx_counter);
+	kref_init(&client->refcount);
+	spin_lock_init(&client->handle_map_lock);
+	mutex_init(&client->event_q_lock);
+	INIT_LIST_HEAD(&client->event_q);
+	init_waitqueue_head(&client->event_wq);
+	/* zero idx not allowed */
+	set_bit(0, client->cb_bitmap);
+
+	spin_lock_bh(&synx_dev->native->metadata_map_lock);
+	hash_add(synx_dev->native->client_metadata_map,
+		&client->node, (u64)client);
+	spin_unlock_bh(&synx_dev->native->metadata_map_lock);
+
+	dprintk(SYNX_INFO, "[sess :%llu] session created %s\n",
+		client->id, params->name);
+
+	return (struct synx_session *)client;
+}
+EXPORT_SYMBOL(synx_initialize);
+
+int synx_uninitialize(struct synx_session *session)
+{
+	struct synx_client *client = NULL, *curr;
+
+	spin_lock_bh(&synx_dev->native->metadata_map_lock);
+	hash_for_each_possible(synx_dev->native->client_metadata_map,
+			curr, node, (u64)session) {
+		if (curr == (struct synx_client *)session) {
+			if (curr->active) {
+				curr->active = false;
+				client = curr;
+			}
+			break;
+		}
+	}
+	spin_unlock_bh(&synx_dev->native->metadata_map_lock);
+
+	/* release the reference obtained at synx init */
+	synx_put_client(client);
+	return SYNX_SUCCESS;
+}
+EXPORT_SYMBOL(synx_uninitialize);
+
+static int synx_open(struct inode *inode, struct file *filep)
+{
+	int rc = 0;
+	char name[SYNX_OBJ_NAME_LEN];
+	struct synx_initialization_params params = {0};
+
+	dprintk(SYNX_VERB, "Enter pid: %d\n", current->pid);
+
+	scnprintf(name, SYNX_OBJ_NAME_LEN, "umd-client-%d", current->pid);
+	params.name = name;
+	params.id = SYNX_CLIENT_NATIVE;
+
+	filep->private_data = synx_initialize(&params);
+	if (IS_ERR_OR_NULL(filep->private_data)) {
+		dprintk(SYNX_ERR, "session allocation failed for pid: %d\n",
+			current->pid);
+		rc = PTR_ERR(filep->private_data);
+	} else {
+		dprintk(SYNX_VERB, "allocated new session for pid: %d\n",
+			current->pid);
+	}
+
+	return rc;
+}
+
+static int synx_close(struct inode *inode, struct file *filep)
+{
+	struct synx_session *session = filep->private_data;
+
+	return synx_uninitialize(session);
+}
+
+static const struct file_operations synx_fops = {
+	.owner = THIS_MODULE,
+	.open  = synx_open,
+	.read  = synx_read,
+	.release = synx_close,
+	.poll  = synx_poll,
+	.unlocked_ioctl = synx_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = synx_ioctl,
+#endif
+};
+
+int synx_register_ops(
+	const struct synx_register_params *params)
+{
+	s32 rc = 0;
+	struct synx_registered_ops *client_ops;
+
+	if (!synx_dev || !params || !params->name ||
+		 !synx_util_is_valid_bind_type(params->type) ||
+		 !params->ops.register_callback ||
+		 !params->ops.deregister_callback ||
+		 !params->ops.signal) {
+		dprintk(SYNX_ERR, "invalid register params\n");
+		return -SYNX_INVALID;
+	}
+
+	mutex_lock(&synx_dev->vtbl_lock);
+	client_ops = &synx_dev->bind_vtbl[params->type];
+	if (!client_ops->valid) {
+		client_ops->valid = true;
+		memcpy(&client_ops->ops, &params->ops,
+			sizeof(client_ops->ops));
+		strlcpy(client_ops->name, params->name,
+			sizeof(client_ops->name));
+		client_ops->type = params->type;
+		dprintk(SYNX_INFO,
+			"registered bind ops type %u for %s\n",
+			params->type, params->name);
+	} else {
+		dprintk(SYNX_WARN,
+			"client already registered for type %u by %s\n",
+			client_ops->type, client_ops->name);
+		rc = -SYNX_ALREADY;
+	}
+	mutex_unlock(&synx_dev->vtbl_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(synx_register_ops);
+
+int synx_deregister_ops(
+	const struct synx_register_params *params)
+{
+	struct synx_registered_ops *client_ops;
+
+	if (IS_ERR_OR_NULL(params) || params->name ||
+		!synx_util_is_valid_bind_type(params->type)) {
+		dprintk(SYNX_ERR, "invalid params\n");
+		return -SYNX_INVALID;
+	}
+
+	mutex_lock(&synx_dev->vtbl_lock);
+	client_ops = &synx_dev->bind_vtbl[params->type];
+	memset(client_ops, 0, sizeof(*client_ops));
+	dprintk(SYNX_INFO, "deregistered bind ops for %s\n",
+		params->name);
+	mutex_unlock(&synx_dev->vtbl_lock);
+
+	return SYNX_SUCCESS;
+}
+EXPORT_SYMBOL(synx_deregister_ops);
+
+void synx_ipc_handler(struct work_struct *cb_dispatch)
+{
+	struct synx_signal_cb *signal_cb =
+		container_of(cb_dispatch, struct synx_signal_cb, cb_dispatch);
+	struct synx_map_entry *map_entry;
+
+	map_entry = synx_util_get_map_entry(signal_cb->handle);
+	if (IS_ERR_OR_NULL(map_entry)) {
+		dprintk(SYNX_WARN,
+			"no clients to notify for %u\n",
+			signal_cb->handle);
+		dprintk(SYNX_MEM, "signal cb destroyed %pK\n", signal_cb);
+		kfree(signal_cb);
+		return;
+	}
+
+	/* get reference on synx coredata for signal cb */
+	synx_util_get_object(map_entry->synx_obj);
+	signal_cb->synx_obj = map_entry->synx_obj;
+	synx_util_release_map_entry(map_entry);
+	synx_signal_handler(&signal_cb->cb_dispatch);
+}
+
+int synx_ipc_callback(u32 client_id,
+	s64 data, void *priv)
+{
+	struct synx_signal_cb *signal_cb;
+	u32 status = (u32)data;
+	u32 handle = (u32)(data >> 32);
+
+	signal_cb = kzalloc(sizeof(*signal_cb), GFP_ATOMIC);
+	if (IS_ERR_OR_NULL(signal_cb))
+		return -SYNX_NOMEM;
+
+	dprintk(SYNX_DBG,
+		"signal notification for %u received with status %u\n",
+		handle, status);
+
+	signal_cb->status = status;
+	signal_cb->handle = handle;
+	signal_cb->flag = SYNX_SIGNAL_FROM_IPC;
+
+	INIT_WORK(&signal_cb->cb_dispatch, synx_ipc_handler);
+	queue_work(synx_dev->wq_cb, &signal_cb->cb_dispatch);
+
+	return SYNX_SUCCESS;
+}
+EXPORT_SYMBOL(synx_ipc_callback);
+
+int synx_recover(enum synx_client_id id)
+{
+	u32 core_id;
+
+	core_id = synx_util_map_client_id_to_core(id);
+	if (core_id >= SYNX_CORE_MAX) {
+		dprintk(SYNX_ERR, "invalid client id %u\n", id);
+		return -SYNX_INVALID;
+	}
+
+	switch (core_id) {
+	case SYNX_CORE_EVA:
+	case SYNX_CORE_IRIS:
+	case SYNX_CORE_ICP:
+		break;
+	default:
+		dprintk(SYNX_ERR, "recovery not supported on %u\n", id);
+		return -SYNX_NOSUPPORT;
+	}
+
+	return synx_global_recover(core_id);
+}
+EXPORT_SYMBOL(synx_recover);
+
+static int synx_local_mem_init(void)
+{
+	if (!synx_dev->native)
+		return -SYNX_INVALID;
+
+	hash_init(synx_dev->native->client_metadata_map);
+	hash_init(synx_dev->native->fence_map);
+	hash_init(synx_dev->native->global_map);
+	hash_init(synx_dev->native->local_map);
+	hash_init(synx_dev->native->csl_fence_map);
+
+	spin_lock_init(&synx_dev->native->metadata_map_lock);
+	spin_lock_init(&synx_dev->native->fence_map_lock);
+	spin_lock_init(&synx_dev->native->global_map_lock);
+	spin_lock_init(&synx_dev->native->local_map_lock);
+	spin_lock_init(&synx_dev->native->csl_map_lock);
+
+	/* zero idx not allowed */
+	set_bit(0, synx_dev->native->bitmap);
+	return 0;
+}
+
+static int synx_cdsp_restart_notifier(struct notifier_block *nb,
+	unsigned long code, void *data)
+{
+	struct synx_cdsp_ssr *cdsp_ssr = &synx_dev->cdsp_ssr;
+
+	if (&cdsp_ssr->nb != nb) {
+		dprintk(SYNX_ERR, "Invalid SSR Notifier block\n");
+		return NOTIFY_BAD;
+	}
+
+	switch (code) {
+	case QCOM_SSR_BEFORE_SHUTDOWN:
+		break;
+	case QCOM_SSR_AFTER_SHUTDOWN:
+		if (cdsp_ssr->ssrcnt != 0) {
+			dprintk(SYNX_INFO, "Cleaning up global memory\n");
+			synx_global_recover(SYNX_CORE_NSP);
+		}
+		break;
+	case QCOM_SSR_BEFORE_POWERUP:
+		break;
+	case QCOM_SSR_AFTER_POWERUP:
+		dprintk(SYNX_DBG, "CDSP is up");
+		if (cdsp_ssr->ssrcnt == 0)
+			cdsp_ssr->ssrcnt++;
+		break;
+	default:
+		dprintk(SYNX_ERR, "Unknown status code for CDSP SSR\n");
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int __init synx_init(void)
+{
+	int rc;
+
+	dprintk(SYNX_INFO, "device initialization start\n");
+
+	synx_dev = kzalloc(sizeof(*synx_dev), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(synx_dev))
+		return -SYNX_NOMEM;
+
+	rc = alloc_chrdev_region(&synx_dev->dev, 0, 1, SYNX_DEVICE_NAME);
+	if (rc < 0) {
+		dprintk(SYNX_ERR, "region allocation failed\n");
+		goto alloc_fail;
+	}
+
+	cdev_init(&synx_dev->cdev, &synx_fops);
+	synx_dev->cdev.owner = THIS_MODULE;
+	rc = cdev_add(&synx_dev->cdev, synx_dev->dev, 1);
+	if (rc < 0) {
+		dprintk(SYNX_ERR, "device registation failed\n");
+		goto reg_fail;
+	}
+
+	synx_dev->class = class_create(THIS_MODULE, SYNX_DEVICE_NAME);
+
+	if (IS_ERR(synx_dev->class)) {
+		rc = PTR_ERR(synx_dev->class);
+		goto err_class_create;
+	}
+	device_create(synx_dev->class, NULL, synx_dev->dev,
+		NULL, SYNX_DEVICE_NAME);
+
+	synx_dev->wq_cb = alloc_workqueue(SYNX_WQ_CB_NAME,
+		WQ_HIGHPRI | WQ_UNBOUND, SYNX_WQ_CB_THREADS);
+	synx_dev->wq_cleanup = alloc_workqueue(SYNX_WQ_CLEANUP_NAME,
+		WQ_HIGHPRI | WQ_UNBOUND, SYNX_WQ_CLEANUP_THREADS);
+	if (!synx_dev->wq_cb || !synx_dev->wq_cleanup) {
+		dprintk(SYNX_ERR,
+			"high priority work queue creation failed\n");
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	synx_dev->native = vzalloc(sizeof(*synx_dev->native));
+	if (IS_ERR_OR_NULL(synx_dev->native))
+		goto fail;
+
+	mutex_init(&synx_dev->vtbl_lock);
+	mutex_init(&synx_dev->error_lock);
+	INIT_LIST_HEAD(&synx_dev->error_list);
+	synx_dev->debugfs_root = synx_init_debugfs_dir(synx_dev);
+
+	rc = synx_global_mem_init();
+	if (rc) {
+		dprintk(SYNX_ERR, "shared mem init failed, err=%d\n", rc);
+		goto err;
+	}
+
+	synx_dev->cdsp_ssr.ssrcnt = 0;
+	synx_dev->cdsp_ssr.nb.notifier_call = synx_cdsp_restart_notifier;
+	synx_dev->cdsp_ssr.handle =
+		qcom_register_ssr_notifier("cdsp", &synx_dev->cdsp_ssr.nb);
+	if (synx_dev->cdsp_ssr.handle == NULL) {
+		dprintk(SYNX_ERR, "SSR registration failed\n");
+		goto err;
+	}
+
+	ipclite_register_client(synx_ipc_callback, NULL);
+	synx_local_mem_init();
+
+	dprintk(SYNX_INFO, "device initialization success\n");
+
+	return 0;
+
+err:
+	vfree(synx_dev->native);
+fail:
+	device_destroy(synx_dev->class, synx_dev->dev);
+	class_destroy(synx_dev->class);
+err_class_create:
+	cdev_del(&synx_dev->cdev);
+reg_fail:
+	unregister_chrdev_region(synx_dev->dev, 1);
+alloc_fail:
+	kfree(synx_dev);
+	synx_dev = NULL;
+	return rc;
+}
+
+static void __exit synx_exit(void)
+{
+	struct error_node *err_node, *err_node_tmp;
+
+	flush_workqueue(synx_dev->wq_cb);
+	flush_workqueue(synx_dev->wq_cleanup);
+	device_destroy(synx_dev->class, synx_dev->dev);
+	class_destroy(synx_dev->class);
+	cdev_del(&synx_dev->cdev);
+	unregister_chrdev_region(synx_dev->dev, 1);
+	synx_remove_debugfs_dir(synx_dev);
+	/* release uncleared error nodes */
+	list_for_each_entry_safe(
+			err_node, err_node_tmp,
+			&synx_dev->error_list,
+			node) {
+		list_del(&err_node->node);
+		kfree(err_node);
+	}
+	mutex_destroy(&synx_dev->vtbl_lock);
+	mutex_destroy(&synx_dev->error_lock);
+	vfree(synx_dev->native);
+	kfree(synx_dev);
+}
+
+module_init(synx_init);
+module_exit(synx_exit);
+
+MODULE_DESCRIPTION("Global Synx Driver");
+MODULE_LICENSE("GPL v2");

+ 590 - 0
qcom/opensource/synx-kernel/msm/synx/synx_api.h

@@ -0,0 +1,590 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SYNX_API_H__
+#define __SYNX_API_H__
+
+#include <linux/list.h>
+#include <synx_header.h>
+
+#include "synx_err.h"
+
+#define SYNX_NO_TIMEOUT        ((u64)-1)
+
+/**
+ * SYNX_INVALID_HANDLE      : client can assign the synx handle variable with this value
+ *                            when it doesn't hold a valid synx handle
+ */
+#define SYNX_INVALID_HANDLE 0
+
+/* synx object states */
+#define SYNX_STATE_INVALID             0    // Invalid synx object
+#define SYNX_STATE_ACTIVE              1    // Synx object has not been signaled
+#define SYNX_STATE_SIGNALED_ERROR      3    // Synx object signaled with error
+#define SYNX_STATE_SIGNALED_EXTERNAL   5    // Synx object was signaled by external dma client.
+#define SYNX_STATE_SIGNALED_SSR        6    // Synx object signaled with SSR
+#define SYNX_STATE_TIMEOUT             7    // Callback status for synx object in case of timeout
+
+/**
+ * enum synx_create_flags - Flags passed during synx_create call.
+ *
+ * SYNX_CREATE_LOCAL_FENCE  : Instructs the framework to create local synx object,
+ *                            for local synchronization i.e. within same core.
+ * SYNX_CREATE_GLOBAL_FENCE : Instructs the framework to create global synx object
+ *                            for global synchronization i.e. across supported core.
+ * SYNX_CREATE_DMA_FENCE    : Create a synx object by wrapping the provided dma fence.
+ *                            Need to pass the dma_fence ptr through fence variable
+ *                            if this flag is set. (NOT SUPPORTED)
+ * SYNX_CREATE_CSL_FENCE    : Create a synx object with provided csl fence.
+ *                            Establishes interop with the csl fence through
+ *                            bind operations. (NOT SUPPORTED)
+ */
+enum synx_create_flags {
+	SYNX_CREATE_LOCAL_FENCE  = 0x01,
+	SYNX_CREATE_GLOBAL_FENCE = 0x02,
+	SYNX_CREATE_DMA_FENCE    = 0x04,
+	SYNX_CREATE_CSL_FENCE    = 0x08,
+	SYNX_CREATE_MAX_FLAGS    = 0x10,
+};
+
+/**
+ * enum synx_init_flags - Session initialization flag
+ * SYNX_INIT_DEFAULT   : Initialization flag to be passed
+ *                       when initializing session
+ * SYNX_INIT_MAX       : Used for internal checks
+ */
+enum synx_init_flags {
+	SYNX_INIT_DEFAULT = 0x00,
+	SYNX_INIT_MAX     = 0x01,
+};
+
+/**
+ * enum synx_import_flags - Import flags
+ *
+ * SYNX_IMPORT_LOCAL_FENCE  : Instructs the framework to create local synx object,
+ *                            for local synchronization i.e. within same core.
+ * SYNX_IMPORT_GLOBAL_FENCE : Instructs the framework to create global synx object,
+ *                            for global synchronization i.e. across supported core.
+ * SYNX_IMPORT_SYNX_FENCE   : Import native Synx handle for synchronization.
+ *                            Need to pass the Synx handle ptr through fence variable
+ *                            if this flag is set. Client must pass:
+ *                            a. SYNX_IMPORT_SYNX_FENCE|SYNX_IMPORT_LOCAL_FENCE
+ *                               to import a synx handle as local synx handle.
+ *                            b. SYNX_IMPORT_SYNX_FENCE|SYNX_IMPORT_GLOBAL_FENCE
+ *                               to import a synx handle as global synx handle.
+ * SYNX_IMPORT_DMA_FENCE    : Import dma fence and create Synx handle for interop.
+ *                            Need to pass the dma_fence ptr through fence variable
+ *                            if this flag is set. Client must pass:
+ *                            a. SYNX_IMPORT_DMA_FENCE|SYNX_IMPORT_LOCAL_FENCE
+ *                               to import a dma fence and create local synx handle
+ *                               for interop.
+ *                            b. SYNX_IMPORT_DMA_FENCE|SYNX_IMPORT_GLOBAL_FENCE
+ *                               to import a dma fence and create global synx handle
+ *                               for interop.
+ * SYNX_IMPORT_EX_RELEASE   : Flag to inform relaxed invocation where release call
+ *                            need not be called by client on this handle after import.
+ *                            (NOT SUPPORTED)
+ */
+enum synx_import_flags {
+	SYNX_IMPORT_LOCAL_FENCE  = 0x01,
+	SYNX_IMPORT_GLOBAL_FENCE = 0x02,
+	SYNX_IMPORT_SYNX_FENCE   = 0x04,
+	SYNX_IMPORT_DMA_FENCE    = 0x08,
+	SYNX_IMPORT_EX_RELEASE   = 0x10,
+};
+
+/**
+ * enum synx_signal_status - Signal status
+ *
+ * SYNX_STATE_SIGNALED_SUCCESS : Signal success
+ * SYNX_STATE_SIGNALED_CANCEL  : Signal cancellation
+ * SYNX_STATE_SIGNALED_MAX     : Clients can send custom notification
+ *                               beyond the max value (only positive)
+ */
+enum synx_signal_status {
+	SYNX_STATE_SIGNALED_SUCCESS = 2,
+	SYNX_STATE_SIGNALED_CANCEL  = 4,
+	SYNX_STATE_SIGNALED_MAX     = 64,
+};
+
+/**
+ * synx_callback - Callback invoked by external fence
+ *
+ * External fence dispatch the registered callback to notify
+ * signal to synx framework.
+ */
+typedef void (*synx_callback)(s32 sync_obj, int status, void *data);
+
+/**
+ * synx_user_callback - Callback function registered by clients
+ *
+ * User callback registered for non-blocking wait. Dispatched when
+ * synx object is signaled or timed-out with status of synx object.
+ */
+typedef void (*synx_user_callback_t)(u32 h_synx, int status, void *data);
+
+/**
+ * struct bind_operations - Function pointers that need to be defined
+ *    to achieve bind functionality for external fence with synx obj
+ *
+ * @register_callback   : Function to register with external sync object
+ * @deregister_callback : Function to deregister with external sync object
+ * @enable_signaling    : Function to enable the signaling on the external
+ *                        sync object (optional)
+ * @signal              : Function to signal the external sync object
+ */
+struct bind_operations {
+	int (*register_callback)(synx_callback cb_func,
+		void *userdata, s32 sync_obj);
+	int (*deregister_callback)(synx_callback cb_func,
+		void *userdata, s32 sync_obj);
+	int (*enable_signaling)(s32 sync_obj);
+	int (*signal)(s32 sync_obj, u32 status);
+};
+
+/**
+ * synx_bind_client_type : External fence supported for bind (NOT SUPPORTED)
+ *
+ * SYNX_TYPE_CSL : Camera CSL fence
+ * SYNX_MAX_BIND_TYPES : Used for internal checks
+ */
+enum synx_bind_client_type {
+	SYNX_TYPE_CSL = 0,
+	SYNX_MAX_BIND_TYPES,
+};
+
+/**
+ * struct synx_register_params - External registration parameters  (NOT SUPPORTED)
+ *
+ * @ops  : Bind operations struct
+ * @name : External client name
+ *         Only first 64 bytes are accepted, rest will be ignored
+ * @type : Synx bind client type
+ */
+struct synx_register_params {
+	struct bind_operations ops;
+	char *name;
+	enum synx_bind_client_type type;
+};
+
+/**
+ * struct synx_queue_desc - Memory descriptor of the queue allocated by
+ *                          the fence driver for each client during
+ *                          register. (Clients need not pass any pointer
+ *                          in synx_initialize_params. It is for future
+ *                          use).
+ *
+ * @vaddr    : CPU virtual address of the queue.
+ * @dev_addr : Physical address of the memory object.
+ * @size     : Size of the memory.
+ * @mem_data : Internal pointer with the attributes of the allocation.
+ */
+struct synx_queue_desc {
+	void *vaddr;
+	u64 dev_addr;
+	u64 size;
+	void *mem_data;
+};
+
+/**
+ * enum synx_client_id : Unique identifier of the supported clients
+ *
+ * @SYNX_CLIENT_NATIVE   : Native Client
+ * @SYNX_CLIENT_GFX_CTX0 : GFX Client 0
+ * @SYNX_CLIENT_DPU_CTL0 : DPU Client 0
+ * @SYNX_CLIENT_DPU_CTL1 : DPU Client 1
+ * @SYNX_CLIENT_DPU_CTL2 : DPU Client 2
+ * @SYNX_CLIENT_DPU_CTL3 : DPU Client 3
+ * @SYNX_CLIENT_DPU_CTL4 : DPU Client 4
+ * @SYNX_CLIENT_DPU_CTL5 : DPU Client 5
+ * @SYNX_CLIENT_EVA_CTX0 : EVA Client 0
+ * @SYNX_CLIENT_VID_CTX0 : Video Client 0
+ * @SYNX_CLIENT_NSP_CTX0 : NSP Client 0
+ * @SYNX_CLIENT_IFE_CTX0 : IFE Client 0
+ * @SYNX_CLIENT_ICP_CTX0 : ICP Client 0
+ */
+enum synx_client_id {
+	SYNX_CLIENT_NATIVE = 0,
+	SYNX_CLIENT_GFX_CTX0,
+	SYNX_CLIENT_DPU_CTL0,
+	SYNX_CLIENT_DPU_CTL1,
+	SYNX_CLIENT_DPU_CTL2,
+	SYNX_CLIENT_DPU_CTL3,
+	SYNX_CLIENT_DPU_CTL4,
+	SYNX_CLIENT_DPU_CTL5,
+	SYNX_CLIENT_EVA_CTX0,
+	SYNX_CLIENT_VID_CTX0,
+	SYNX_CLIENT_NSP_CTX0,
+	SYNX_CLIENT_IFE_CTX0,
+	SYNX_CLIENT_ICP_CTX0,
+	SYNX_CLIENT_MAX,
+};
+
+/**
+ * struct synx_session - Client session identifier
+ *
+ * @type   : Session type.
+ *           Internal Member. (Do not access/modify)
+ * @client : Pointer to client session
+ *           Internal Member. (Do not access/modify)
+ */
+struct synx_session {
+	u32 type;
+	void *client;
+};
+
+/**
+ * struct synx_initialization_params - Session params
+ *
+ * @name  : Client session name
+ *          Only first 64 bytes are accepted, rest will be ignored
+ * @ptr   : Memory descriptor of queue allocated by fence during
+ *          device register. (filled by function)
+ * @id    : Client identifier
+ * @flags : Synx initialization flags
+ */
+struct synx_initialization_params {
+	const char *name;
+	struct synx_queue_desc *ptr;
+	enum synx_client_id id;
+	enum synx_init_flags flags;
+};
+
+/**
+ * struct synx_create_params - Synx creation parameters
+ *
+ * @name     : Optional parameter associating a name with the synx
+ *             object for debug purposes
+ *             Only first 64 bytes are accepted,
+ *             rest will be ignored
+ * @h_synx   : Pointer to synx object handle (filled by function)
+ * @fence    : Pointer to external dma fence or csl fence. (NOT SUPPORTED)
+ * @flags    : Synx flags for customization
+ */
+
+struct synx_create_params {
+	const char *name;
+	u32 *h_synx;
+	void *fence;
+	enum synx_create_flags flags;
+};
+
+/**
+ * enum synx_merge_flags - Handle merge flags
+ *
+ * SYNX_MERGE_LOCAL_FENCE   : Create local composite synx object. To be passed along
+ *                            with SYNX_MERGE_NOTIFY_ON_ALL.
+ * SYNX_MERGE_GLOBAL_FENCE  : Create global composite synx object. To be passed along
+ *                            with SYNX_MERGE_NOTIFY_ON_ALL.
+ * SYNX_MERGE_NOTIFY_ON_ALL : Notify on signaling of ALL objects.
+ *                            Clients must pass:
+ *                            a. SYNX_MERGE_LOCAL_FENCE|SYNX_MERGE_NOTIFY_ON_ALL
+ *                               to create local composite synx object and notify
+ *                               it when all child synx objects are signaled.
+ *                            b. SYNX_MERGE_GLOBAL_FENCE|SYNX_MERGE_NOTIFY_ON_ALL
+ *                               to create global composite synx object and notify
+ *                               it when all child synx objects are signaled.
+ * SYNX_MERGE_NOTIFY_ON_ANY : Notify on signaling of ANY object. (NOT SUPPORTED)
+ */
+enum synx_merge_flags {
+	SYNX_MERGE_LOCAL_FENCE   = 0x01,
+	SYNX_MERGE_GLOBAL_FENCE  = 0x02,
+	SYNX_MERGE_NOTIFY_ON_ALL = 0x04,
+	SYNX_MERGE_NOTIFY_ON_ANY = 0x08,
+};
+
+/*
+ * struct synx_merge_params - Synx merge parameters
+ *
+ * @h_synxs      : Pointer to a array of synx handles to be merged
+ * @flags        : Merge flags
+ * @num_objs     : Number of synx handles to be merged (in array h_synxs).
+ * @h_merged_obj : Merged synx handle (filled by function)
+ */
+struct synx_merge_params {
+	u32 *h_synxs;
+	enum synx_merge_flags flags;
+	u32 num_objs;
+	u32 *h_merged_obj;
+};
+
+/**
+ * enum synx_import_type - Import type
+ *
+ * SYNX_IMPORT_INDV_PARAMS : Import filled with synx_import_indv_params struct
+ * SYNX_IMPORT_ARR_PARAMS  : Import filled with synx_import_arr_params struct
+ */
+enum synx_import_type {
+	SYNX_IMPORT_INDV_PARAMS = 0x01,
+	SYNX_IMPORT_ARR_PARAMS  = 0x02,
+};
+
+/**
+ * struct synx_import_indv_params - Synx import indv parameters
+ *
+ * @new_h_synxs : Pointer to new synx object
+ *                (filled by the function)
+ *                The new handle/s should be used by importing
+ *                process for all synx api operations and
+ *                for sharing with FW cores.
+ * @flags       : Synx import flags
+ * @fence       : Pointer to DMA fence fd or synx handle.
+ */
+struct synx_import_indv_params {
+	u32 *new_h_synx;
+	enum synx_import_flags flags;
+	void *fence;
+};
+
+/**
+ * struct synx_import_arr_params - Synx import arr parameters
+ *
+ * @list        : List of synx_import_indv_params
+ * @num_fences  : Number of fences or synx handles to be imported
+ */
+struct synx_import_arr_params {
+	struct synx_import_indv_params *list;
+	u32 num_fences;
+};
+
+/**
+ * struct synx_import_params - Synx import parameters
+ *
+ * @type : Import params type filled by client
+ * @indv : Params to import an individual handle or fence
+ * @arr  : Params to import an array of handles or fences
+ */
+struct synx_import_params {
+	enum synx_import_type type;
+	union {
+		struct synx_import_indv_params indv;
+		struct synx_import_arr_params  arr;
+	};
+};
+
+/**
+ * struct synx_callback_params - Synx callback parameters
+ *
+ * @h_synx         : Synx object handle
+ * @cb_func        : Pointer to callback func to be invoked.
+ * @userdata       : Opaque pointer passed back with callback as data
+ * @cancel_cb_func : Pointer to callback to ack cancellation
+ * @timeout_ms     : Timeout in ms. SYNX_NO_TIMEOUT if no timeout.
+ */
+struct synx_callback_params {
+	u32 h_synx;
+	synx_user_callback_t cb_func;
+	void *userdata;
+	synx_user_callback_t cancel_cb_func;
+	u64 timeout_ms;
+};
+
+/* Kernel APIs */
+
+/* synx_register_ops - Register operations for external synchronization  (NOT SUPPORTED)
+ *
+ * Register with synx for enabling external synchronization through bind
+ *
+ * @param params : Pointer to register params
+ *
+ * @return Status of operation. SYNX_SUCCESS in case of success.
+ * -SYNX_INVALID will be returned if params are invalid.
+ * -SYNX_NOMEM will be returned if bind ops cannot be registered due to
+ * insufficient memory.
+ * -SYNX_ALREADY will be returned if type already in use.
+ */
+int synx_register_ops(const struct synx_register_params *params);
+
+/**
+ * synx_deregister_ops - De-register external synchronization operations  (NOT SUPPORTED)
+ *
+ * @param params : Pointer to register params
+ *
+ * @return Status of operation. SYNX_SUCCESS in case of success.
+ * -SYNX_INVALID will be returned if record not found.
+ */
+int synx_deregister_ops(const struct synx_register_params *params);
+
+/**
+ * synx_initialize - Initializes a new client session
+ *
+ * @param params : Pointer to session init params
+ *
+ * @return Client session pointer on success. NULL or error in case of failure.
+ */
+struct synx_session *synx_initialize(struct synx_initialization_params *params);
+
+/**
+ * synx_uninitialize - Destroys the client session
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ *
+ * @return Status of operation. Negative in case of error, SYNX_SUCCESS otherwise.
+ */
+int synx_uninitialize(struct synx_session *session);
+
+/**
+ * synx_create - Creates a synx object
+ *
+ * Creates a new synx obj and returns the handle to client. There can be
+ * maximum of 4095 global synx handles or local synx handles across
+ * sessions.
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param params  : Pointer to create params
+ *
+ * @return Status of operation. Negative in case of error, SYNX_SUCCESS otherwise.
+ */
+int synx_create(struct synx_session *session, struct synx_create_params *params);
+
+/**
+ * synx_async_wait - Registers a callback with a synx object
+ *
+ * Clients can register maximum of 64 callbacks functions per
+ * synx session. Clients should register callback functions with minimal computation.
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param params  : Callback params.
+ *                  cancel_cb_func in callback params is optional with this API.
+ *
+ * @return Status of operation. Negative in case of error, SYNX_SUCCESS otherwise.
+ */
+int synx_async_wait(struct synx_session *session, struct synx_callback_params *params);
+
+/**
+ * synx_cancel_async_wait - De-registers a callback with a synx object
+ *
+ * This API will cancel one instance of callback function (mapped
+ * with userdata and h_synx) provided in cb_func of callback params.
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param params  : Callback params
+ *
+ * @return Status of operation.Negative in case of error, SYNX_SUCCESS otherwise.
+ */
+int synx_cancel_async_wait(struct synx_session *session,
+	struct synx_callback_params *params);
+
+/**
+ * synx_signal - Signals a synx object with the status argument.
+ *
+ * This function will signal the synx object referenced by h_synx
+ * and invoke any external binding synx objs.
+ * The status parameter will indicate whether the entity
+ * performing the signaling wants to convey an error case or a success case.
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param h_synx  : Synx object handle
+ * @param status  : Status of signaling.
+ *                  Clients can send custom signaling status
+ *                  beyond SYNX_STATE_SIGNALED_MAX.
+ *
+ * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
+ */
+int synx_signal(struct synx_session *session, u32 h_synx,
+	enum synx_signal_status status);
+
+/**
+ * synx_merge - Merges multiple synx objects
+ *
+ * This function will merge multiple synx objects into a synx group.
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param params  : Merge params
+ *
+ * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
+ */
+int synx_merge(struct synx_session *session, struct synx_merge_params *params);
+
+/**
+ * synx_wait - Waits for a synx object synchronously
+ *
+ * Does a wait on the synx object identified by h_synx for a maximum
+ * of timeout_ms milliseconds. Must not be called from interrupt context as
+ * this API can sleep.
+ *
+ * @param session    : Session ptr (returned from synx_initialize)
+ * @param h_synx     : Synx object handle to be waited upon
+ * @param timeout_ms : Timeout in ms
+ *
+ * @return Status of synx object if handle is signaled. -SYNX_INVAL if synx object
+ * is in bad state or arguments are invalid, -SYNX_TIMEOUT if wait times out.
+ */
+int synx_wait(struct synx_session *session, u32 h_synx, u64 timeout_ms);
+
+/**
+ * synx_get_status - Returns the status of the synx object.
+ *
+ * This API should not be used in polling mode to know if the handle
+ * is signaled or not.
+ * Clients need to explicitly wait using synx_wait() or synx_async_wait()
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param h_synx  : Synx object handle
+ *
+ * @return Status of the synx object
+ */
+int synx_get_status(struct synx_session *session, u32 h_synx);
+
+/**
+ * synx_import - Imports (looks up) synx object from given handle or fence
+ * *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param params  : Pointer to import params
+ *
+ * @return Status of operation. Negative in case of failure, SYNX_SUCCESS otherwise.
+ */
+int synx_import(struct synx_session *session, struct synx_import_params *params);
+
+/**
+ * synx_get_fence - Get the native fence backing the synx object
+ *
+ * Synx framework will take additional reference on dma fence and returns the native
+ * fence. Clients need to release additional reference explicitly by calling kref_put.
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param h_synx  : Synx object handle
+ *
+ * @return Fence pointer in case of success and NULL in case of failure.
+ */
+void *synx_get_fence(struct synx_session *session, u32 h_synx);
+
+/**
+ * synx_release - Release the synx object.
+ *
+ * Every created, imported or merged synx object should be released.
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param h_synx  : Synx object handle to be destroyed
+ *
+ * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
+ */
+int synx_release(struct synx_session *session, u32 h_synx);
+
+/**
+ * synx_recover - Recover any possible handle leaks
+ *
+ * Function should be called on HW hang/reset to
+ * recover the Synx handles shared. This cleans up
+ * synx handles owned by subsystem under hang/reset, and avoids
+ * potential resource leaks.
+ *
+ * Function does not destroy the session, but only
+ * recover synx handles belonging to the session.
+ * Synx session would still be active and clients
+ * need to destroy the session explicitly through
+ * synx_uninitialize API.
+ *
+ * All the unsignaled handles owned/imported by the core at the time of reset
+ * will be signaled by synx framework on behalf of hung core with SYNX_STATE_SIGNALED_SSR.
+ *
+ * @param id : Client ID of core to recover
+ *
+ * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
+ */
+int synx_recover(enum synx_client_id id);
+
+#endif /* __SYNX_API_H__ */

+ 203 - 0
qcom/opensource/synx-kernel/msm/synx/synx_debugfs.c

@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+
+#include "synx_api.h"
+#include "synx_debugfs.h"
+#include "synx_util.h"
+#include "synx_global.h"
+#include "synx_debugfs_util.h"
+
+#define MAX_DBG_BUF_SIZE (64 * SYNX_MAX_OBJS)
+#ifdef ENABLE_DEBUGFS
+#define MAX_HELP_BUF_SIZE (4096)
+#define BUF_SIZE 16
+#endif
+
+struct dentry *my_direc;
+u32 lower_handle_id = GLOBAL_HANDLE_STARTING_ID, upper_handle_id = GLOBAL_HANDLE_STARTING_ID;
+long synx_columns = STATUS_COLUMN | ID_COLUMN | REF_CNT_COLUMN |
+	NUM_CHILD_COLUMN | SUBSCRIBERS_COLUMN | WAITERS_COLUMN | PARENTS_COLUMN | GLOBAL_SHARED_MEM;
+EXPORT_SYMBOL(synx_columns);
+
+int synx_debug = SYNX_ERR | SYNX_WARN |
+	SYNX_INFO;
+EXPORT_SYMBOL(synx_debug);
+
+void populate_bound_rows(
+	struct synx_coredata *row, char *cur, char *end)
+{
+	int j;
+
+	for (j = 0; j < row->num_bound_synxs; j++)
+		SYNX_CONSOLE_LOG(cur, end, "\n\tID: %d",
+		row->bound_synxs[j].external_desc.id);
+}
+
+static ssize_t synx_table_read(struct file *file,
+		char *buf,
+		size_t count,
+		loff_t *ppos)
+{
+	struct synx_device *dev = file->private_data;
+	struct error_node *err_node, *err_node_tmp;
+	char *dbuf, *cur, *end;
+	ssize_t len = 0;
+
+	dbuf = kzalloc(MAX_DBG_BUF_SIZE, GFP_KERNEL);
+	if (!dbuf)
+		return -ENOMEM;
+
+	cur = dbuf;
+	end = cur + MAX_DBG_BUF_SIZE;
+#ifdef ENABLE_DEBUGFS
+	SYNX_CONSOLE_LOG(cur, end, "\n\tHandle ID start value : %d", lower_handle_id);
+	SYNX_CONSOLE_LOG(cur, end, "\n\tHandle ID end value : %d\n", upper_handle_id);
+
+	if (synx_columns & GLOBAL_HASHTABLE)
+		synx_debugfs_util_print_hash_table(&cur, &end, true);
+	if (synx_columns & LOCAL_HASHTABLE)
+		synx_debugfs_util_print_hash_table(&cur, &end, false);
+	if (synx_columns & CLIENT_HASHTABLE)
+		synx_debugfs_util_print_client_table(&cur, &end);
+	if (synx_columns & GLOBAL_SHARED_MEM)
+		synx_debugfs_util_print_global_shared_memory(&cur, &end);
+	if (synx_columns & DMA_FENCE_MAP)
+		synx_debugfs_util_print_dma_fence(&cur, &end);
+#endif
+
+	if (synx_columns & ERROR_CODES && !list_empty(&dev->error_list)) {
+		SYNX_CONSOLE_LOG(cur, end, "\nError(s): ");
+		mutex_lock(&dev->error_lock);
+		list_for_each_entry_safe(
+			 err_node, err_node_tmp,
+			 &dev->error_list, node) {
+			SYNX_CONSOLE_LOG(cur, end, "\n\tTime: %s - ", err_node->timestamp);
+			SYNX_CONSOLE_LOG(cur, end, "ID: %d - ", err_node->h_synx);
+			SYNX_CONSOLE_LOG(cur, end, "Code: %d", err_node->error_code);
+			list_del(&err_node->node);
+			kfree(err_node);
+		}
+		mutex_unlock(&dev->error_lock);
+	}
+	len = simple_read_from_buffer(buf, count, ppos,
+		dbuf, cur - dbuf);
+	kfree(dbuf);
+	return len;
+}
+
+#ifdef ENABLE_DEBUGFS
+static ssize_t synx_table_write(struct file *file,
+		const char __user *buf,
+		size_t count,
+		loff_t *ppos)
+{
+	u32 stat = -1;
+	u32 i = 0, base = 10, num = 0;
+	bool invalid_val = false;
+	char *kbuffer = kzalloc(BUF_SIZE, GFP_KERNEL);
+
+	if (!kbuffer)
+		return -ENOMEM;
+	stat = copy_from_user(kbuffer, buf, BUF_SIZE);
+	if (stat != 0) {
+		kfree(kbuffer);
+		return -EFAULT;
+	}
+	if (kbuffer[i] == '0' && (kbuffer[i+1] == 'x' || kbuffer[i+1] == 'X')) {
+		base = 16;
+		i += 2;
+	}
+	for ( ; (i < BUF_SIZE / 2 && kbuffer[i] != '-' && kbuffer[i] != '\n'); i++)
+		SYNX_READ_CHAR(kbuffer, num, base, i);
+	if (!invalid_val)
+		lower_handle_id = num;
+
+	if (kbuffer[i] == '-') {
+		num = 0;
+		i++;
+		for ( ; i < BUF_SIZE && kbuffer[i] != '\n'; i++)
+			SYNX_READ_CHAR(kbuffer, num, base, i);
+		if (!invalid_val)
+			upper_handle_id = num;
+	} else if (kbuffer[i] == '\n')
+		upper_handle_id = lower_handle_id;
+	kfree(kbuffer);
+
+	return count;
+}
+#endif
+
+static const struct file_operations synx_table_fops = {
+	.owner = THIS_MODULE,
+	.read = synx_table_read,
+#ifdef ENABLE_DEBUGFS
+	.write = synx_table_write,
+#endif
+	.open = simple_open,
+};
+
+#ifdef ENABLE_DEBUGFS
+static ssize_t synx_help_read(struct file *file,
+		char *buf,
+		size_t count,
+		loff_t *ppos)
+{
+	char *dbuf, *cur, *end;
+	ssize_t len = 0;
+
+	dbuf = kzalloc(MAX_HELP_BUF_SIZE, GFP_KERNEL);
+	if (!dbuf)
+		return -ENOMEM;
+
+	cur = dbuf;
+	end = cur + MAX_HELP_BUF_SIZE;
+	synx_debugfs_util_load_help_content(&cur, &end);
+	len = simple_read_from_buffer(buf, count, ppos, dbuf, cur - dbuf);
+	kfree(dbuf);
+	return len;
+}
+static const struct file_operations synx_help_fops = {
+	.owner = THIS_MODULE,
+	.read = synx_help_read,
+};
+#endif
+struct dentry *synx_init_debugfs_dir(struct synx_device *dev)
+{
+	struct dentry *dir = NULL;
+	dir = debugfs_create_dir("synx_debug", NULL);
+	if (!dir) {
+		dprintk(SYNX_ERR, "Failed to create debugfs for synx\n");
+		return NULL;
+	}
+	debugfs_create_u32("debug_level", 0644, dir, &synx_debug);
+	debugfs_create_ulong("column_level", 0644, dir, &synx_columns);
+
+	if (!debugfs_create_file("synx_table",
+		0644, dir, dev, &synx_table_fops)) {
+		dprintk(SYNX_ERR, "Failed to create debugfs file for synx\n");
+		return NULL;
+	}
+#ifdef ENABLE_DEBUGFS
+	if (!debugfs_create_file("help",
+		0444, dir, dev, &synx_help_fops)) {
+		dprintk(SYNX_ERR, "Failed to create debugfs help file for synx\n");
+		return NULL;
+	}
+#endif
+	return dir;
+}
+
+void synx_remove_debugfs_dir(struct synx_device *dev)
+{
+	debugfs_remove_recursive(dev->debugfs_root);
+}

+ 144 - 0
qcom/opensource/synx-kernel/msm/synx/synx_debugfs.h

@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SYNX_DEBUGFS_H__
+#define __SYNX_DEBUGFS_H__
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+
+#include "synx_private.h"
+//#define ENABLE_DEBUGFS
+#define STATE_NAME_SPACE (4)
+
+enum synx_debug_level {
+	SYNX_ERR  = 0x0001,
+	SYNX_WARN = 0x0002,
+	SYNX_INFO = 0x0004,
+	SYNX_DBG  = 0x0008,
+	SYNX_VERB = 0x0010,
+	SYNX_IPCL = 0x0020,
+	SYNX_GSM  = 0x0040,
+	SYNX_MEM  = 0x0080,
+	SYNX_ALL  = SYNX_ERR | SYNX_WARN | SYNX_INFO |
+				SYNX_DBG | SYNX_IPCL | SYNX_GSM  | SYNX_MEM,
+};
+
+enum synx_columns_level {
+	NAME_COLUMN         = 0x00000001,
+	ID_COLUMN           = 0x00000002,
+	BOUND_COLUMN        = 0x00000004,
+	STATUS_COLUMN       = 0x00000008,
+	FENCE_COLUMN        = 0x00000010,
+	COREDATA_COLUMN     = 0x00000020,
+	GLOBAL_IDX_COLUMN   = 0x00000040,
+	REL_CNT_COLUMN      = 0x00000080,
+	MAP_CNT_COLUMN      = 0x00000100,
+	REF_CNT_COLUMN      = 0x00000200,
+	NUM_CHILD_COLUMN    = 0x00000400,
+	SUBSCRIBERS_COLUMN  = 0x00000800,
+	WAITERS_COLUMN      = 0x00001000,
+	PARENTS_COLUMN      = 0x00002000,
+	CLIENT_ID_COLUMN    = 0x00004000,
+
+	LOCAL_HASHTABLE     = 0x00010000,
+	GLOBAL_HASHTABLE    = 0x00020000,
+	CLIENT_HASHTABLE    = 0x00040000,
+	GLOBAL_SHARED_MEM   = 0x00080000,
+	DMA_FENCE_MAP       = 0x00100000,
+	CSL_FENCE_MAP       = 0x00200000,
+
+	ERROR_CODES         = 0x00008000,
+};
+
+#ifndef SYNX_DBG_LABEL
+#define SYNX_DBG_LABEL "synx"
+#endif
+
+#define SYNX_DBG_TAG SYNX_DBG_LABEL ": %4s: "
+
+extern int synx_debug;
+extern u32 lower_handle_id, upper_handle_id;
+extern long synx_columns;
+
+static inline char *synx_debug_str(int level)
+{
+	switch (level) {
+	case SYNX_ERR:
+		return "err";
+	case SYNX_WARN:
+		return "warn";
+	case SYNX_INFO:
+		return "info";
+	case SYNX_DBG:
+		return "dbg";
+	case SYNX_VERB:
+		return "verb";
+	case SYNX_IPCL:
+		return "ipcl";
+	case SYNX_GSM:
+		return "gmem";
+	case SYNX_MEM:
+		return "mem";
+	default:
+		return "???";
+	}
+}
+
+#define dprintk(__level, __fmt, arg...)                 \
+	do {                                                \
+		if (synx_debug & __level) {                     \
+			pr_info(SYNX_DBG_TAG "%s: %d: "  __fmt,     \
+				synx_debug_str(__level), __func__,      \
+				__LINE__, ## arg);                      \
+		}                                               \
+	} while (0)
+
+#define SYNX_CONSOLE_LOG(__cur, __end,                  \
+		__fmt_string, arg...)                           \
+	do {                                                \
+		if ((__end - __cur) * (sizeof(char *))          \
+			- strlen(__fmt_string) <= STATE_NAME_SPACE) \
+			dprintk(SYNX_DBG, __fmt_string, ## arg);    \
+		else                                            \
+			__cur += scnprintf(__cur, __end - __cur,    \
+			__fmt_string, ## arg);                      \
+	} while (0)
+
+#define SYNX_READ_CHAR(__buf, __num,                    \
+		__base, __pos)                                  \
+	do {                                                \
+		if (__buf[__pos]  >= '0' &&                     \
+		__buf[__pos] <= '9')                            \
+			__num = __num * __base +                    \
+			(__buf[__pos] - '0');                       \
+		else if (__buf[__pos] >= 'a' &&                 \
+		__buf[__pos] <= 'f')                            \
+			__num = __num * __base +                    \
+			(__buf[__pos] - 'a' + 10);                  \
+		else if (__buf[__pos] >= 'A' &&                 \
+		__buf[__pos] <= 'F')                            \
+			__num = __num * __base +                    \
+			(__buf[__pos] - 'A' + 10);                  \
+		else                                            \
+			invalid_val = true;                         \
+	} while (0)
+
+/**
+ * synx_init_debugfs_dir - Initializes debugfs
+ *
+ * @param dev : Pointer to synx device structure
+ */
+struct dentry *synx_init_debugfs_dir(struct synx_device *dev);
+
+/**
+ * synx_remove_debugfs_dir - Removes debugfs
+ *
+ * @param dev : Pointer to synx device structure
+ */
+void synx_remove_debugfs_dir(struct synx_device *dev);
+
+#endif /* __SYNX_DEBUGFS_H__ */

+ 497 - 0
qcom/opensource/synx-kernel/msm/synx/synx_debugfs_util.c

@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
+
+#include "synx_debugfs.h"
+#include "synx_debugfs_util.h"
+#include "synx_util.h"
+#include "synx_private.h"
+#include "synx_global.h"
+
+#define MAX_CUSTOM_STATUS ((1UL << 32) - 1)
+
+char *synx_debugfs_util_get_state_name(u32 status)
+{
+	char *state;
+
+	if (status == 0)
+		state = "INV";
+	else if (status == 1)
+		state = "ACT";
+	else if (status == 2)
+		state = "SUC";
+	else if (status == 3)
+		state = "ERR";
+	else if (status == 4)
+		state = "CAN";
+	else if (status == 5)
+		state = "EXT";
+	else if (status == 6)
+		state = "SSR";
+	else if (status > 64 && status <= MAX_CUSTOM_STATUS)
+		state = "CUS";
+	else
+		state = "???";
+
+	return state;
+}
+
+static int synx_debugfs_util_get_client_data(struct synx_client *client)
+{
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_NOENT;
+	kref_get(&client->refcount);
+	return SYNX_SUCCESS;
+}
+
+static void synx_debugfs_util_put_client_data(struct synx_client *client)
+{
+	if (!IS_ERR_OR_NULL(client))
+		kref_put(&client->refcount, synx_client_destroy);
+}
+
+static int synx_debugfs_util_get_handle(struct synx_handle_coredata *handle_coredata)
+{
+	if (IS_ERR_OR_NULL(handle_coredata))
+		return -SYNX_NOENT;
+	kref_get(&handle_coredata->refcount);
+	return SYNX_SUCCESS;
+}
+
+static void synx_debugfs_util_put_handle(struct synx_handle_coredata *handle_coredata)
+{
+	if (!IS_ERR_OR_NULL(handle_coredata))
+		kref_put(&handle_coredata->refcount, synx_util_destroy_handle);
+}
+
+static int synx_debugfs_util_get_CSL_fence_entry(struct synx_entry_64 *entry)
+{
+	if (IS_ERR_OR_NULL(entry))
+		return -SYNX_NOENT;
+	kref_get(&entry->refcount);
+	return SYNX_SUCCESS;
+}
+
+static void synx_debugfs_util_put_CSL_fence_entry(struct synx_entry_64 *entry)
+{
+	if (!IS_ERR_OR_NULL(entry))
+		kref_put(&entry->refcount, synx_util_destroy_data);
+}
+
+bool synx_debugfs_util_is_valid_global_shared_memory_entry(struct synx_global_coredata *entry,
+	u32 idx)
+{
+	int i;
+
+	if (!entry || entry->handle != idx)
+		return false;
+	if (entry->status || entry->handle || entry->refcount ||
+	 entry->num_child || entry->subscribers || entry->waiters)
+		return true;
+
+	for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
+		if (entry->parents[i])
+			return true;
+	}
+	return false;
+}
+
+static bool synx_debugfs_util_is_valid_dma_handle_range(struct synx_fence_entry *fence_entry)
+{
+	if ((fence_entry->g_handle >= lower_handle_id &&
+		fence_entry->g_handle <= upper_handle_id) ||
+		(fence_entry->l_handle >= lower_handle_id &&
+		fence_entry->l_handle <= upper_handle_id))
+		return true;
+	return false;
+}
+
+static void synx_debugfs_util_print_map_column_values(char **cur,
+	char **end,
+	struct synx_map_entry *entry)
+{
+	if (synx_columns & STATUS_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "\t\t%s", synx_debugfs_util_get_state_name
+		(synx_util_get_object_status(entry->synx_obj)));
+	if (synx_columns & ID_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "\t\t  %x", entry->key);
+	if (synx_columns & REF_CNT_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "\t  %d", kref_read(&entry->refcount));
+	if (synx_columns & BOUND_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "\t  %d", entry->synx_obj->num_bound_synxs);
+	if (synx_columns & GLOBAL_IDX_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "\t\t  %d", entry->synx_obj->global_idx);
+	if (synx_columns & MAP_CNT_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "\t\t  %d", entry->synx_obj->map_count);
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\t-------------------------------------");
+	SYNX_CONSOLE_LOG(*cur, *end, "-----------------------------------------");
+	SYNX_CONSOLE_LOG(*cur, *end, "-----------\n");
+}
+
+void synx_debugfs_util_print_hash_table(char **cur,
+	char **end,
+	bool is_global)
+{
+	struct synx_map_entry *map_entry = NULL;
+	struct synx_coredata *synx_obj = NULL;
+	u32 key;
+
+	if (is_global)
+		SYNX_CONSOLE_LOG(*cur, *end,
+			"\n\t-------------GLOBAL MAP TABLE------------\n");
+	else
+		SYNX_CONSOLE_LOG(*cur, *end,
+			"\n\t-------------LOCAL MAP TABLE------------\n");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\t\t");
+
+	if (synx_columns & STATUS_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|  STATUS  |");
+	if (synx_columns & ID_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|  HANDLE  |");
+	if (synx_columns & REF_CNT_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|  REF CNT |");
+	if (synx_columns & BOUND_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "| NUM BOUND |");
+	if (synx_columns & GLOBAL_IDX_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "| GLOBAL INDEX |");
+	if (synx_columns & MAP_CNT_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|  MAP CNT |");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n");
+
+	for (key = lower_handle_id; key <= upper_handle_id; key++) {
+		map_entry = synx_util_get_map_entry(key);
+		if (IS_ERR_OR_NULL(map_entry) || IS_ERR_OR_NULL(map_entry->synx_obj) ||
+			(is_global ^ synx_util_is_global_handle(key))) {
+			synx_util_release_map_entry(map_entry);
+			continue;
+		}
+		synx_obj = map_entry->synx_obj;
+		synx_util_get_object(synx_obj);
+		mutex_lock(&synx_obj->obj_lock);
+		synx_debugfs_util_print_map_column_values(cur, end, map_entry);
+		mutex_unlock(&synx_obj->obj_lock);
+		synx_util_put_object(synx_obj);
+		synx_util_release_map_entry(map_entry);
+	}
+}
+
+void synx_debugfs_util_print_dma_fence(char **cur,
+	char **end)
+{
+	struct synx_fence_entry *curr = NULL;
+	struct hlist_node *tmp;
+	struct dma_fence *fence_entry = NULL;
+	u32 map_itr;
+
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\t-------------DMA FENCE MAP TABLE------------\n");
+
+	if (synx_columns & FENCE_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|         DMA FENCE           |");
+	if (synx_columns & STATUS_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|   STATUS   |");
+	if (synx_columns & ID_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|         HANDLE          |");
+	if (synx_columns & REF_CNT_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|    REF CNT   |");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n");
+
+	spin_lock_bh(&synx_dev->native->fence_map_lock);
+	hash_for_each_safe(synx_dev->native->fence_map, map_itr, tmp, curr, node) {
+		if (IS_ERR_OR_NULL(curr))
+			continue;
+		fence_entry = (struct dma_fence *)curr->key;
+		dma_fence_get(fence_entry);
+		if (synx_debugfs_util_is_valid_dma_handle_range(curr)) {
+			if (synx_columns & FENCE_COLUMN)
+				SYNX_CONSOLE_LOG(*cur, *end, "\t%p", fence_entry);
+			if (synx_columns & STATUS_COLUMN)
+				SYNX_CONSOLE_LOG(*cur, *end, "\t\t%s",
+				synx_debugfs_util_get_state_name
+				(__fence_state(fence_entry, false)));
+			if (synx_columns & ID_COLUMN) {
+				SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", curr->g_handle);
+				SYNX_CONSOLE_LOG(*cur, *end, "\t%d", curr->l_handle);
+			}
+			if (synx_columns & REF_CNT_COLUMN)
+				SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d",
+				kref_read(&(fence_entry)->refcount));
+			SYNX_CONSOLE_LOG(*cur, *end,
+				"\n\t-------------------------------------");
+			SYNX_CONSOLE_LOG(*cur, *end,
+				"-----------------------------------------");
+			SYNX_CONSOLE_LOG(*cur, *end, "-----------\n");
+		}
+		dma_fence_put(fence_entry);
+	}
+	spin_unlock_bh(&synx_dev->native->fence_map_lock);
+}
+
+void synx_debugfs_util_print_csl_fence(char **cur,
+	char **end)
+{
+	u32 itr, rc = SYNX_SUCCESS;
+	struct synx_entry_64 *curr = NULL;
+	struct hlist_node *tmp;
+	struct synx_map_entry *map_entry = NULL;
+
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\t------------- CSL FENCE MAP TABLE------------\n");
+
+	if (synx_columns & FENCE_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|           CSL FENCE        |");
+	if (synx_columns & STATUS_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|   STATUS   |");
+	if (synx_columns & ID_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|    HANDLE   |");
+	if (synx_columns & REF_CNT_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|    REF CNT   |");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n");
+
+	spin_lock_bh(&synx_dev->native->csl_map_lock);
+	hash_for_each_safe(synx_dev->native->csl_fence_map, itr, tmp, curr, node) {
+		rc = synx_debugfs_util_get_CSL_fence_entry(curr);
+		if (rc) {
+			spin_unlock_bh(&synx_dev->native->csl_map_lock);
+			return;
+		}
+		if (curr->data[0] >= lower_handle_id && curr->data[0] <= upper_handle_id) {
+			if (synx_columns & FENCE_COLUMN)
+				SYNX_CONSOLE_LOG(*cur, *end, "%p", curr->key);
+			if (synx_columns & STATUS_COLUMN) {
+				map_entry = synx_util_get_map_entry(curr->data[0]);
+				if (!IS_ERR_OR_NULL(map_entry) &&
+					!IS_ERR_OR_NULL(map_entry->synx_obj)) {
+					SYNX_CONSOLE_LOG(*cur, *end, "\t%s",
+					synx_debugfs_util_get_state_name
+					(synx_util_get_object_status(map_entry->synx_obj)));
+					synx_util_release_map_entry(map_entry);
+				}
+			} //TODO : Update status field of CSL Fence with updated structure
+			if (synx_columns & ID_COLUMN)
+				SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", curr->data[0]);
+			if (synx_columns & REF_CNT_COLUMN)
+				SYNX_CONSOLE_LOG(*cur, *end, "\t%d", kref_read(&curr->refcount));
+			SYNX_CONSOLE_LOG(*cur, *end, "\n\t-------------------------------------");
+			SYNX_CONSOLE_LOG(*cur, *end, "-----------------------------------------");
+			SYNX_CONSOLE_LOG(*cur, *end, "-----------\n");
+		}
+		synx_debugfs_util_put_CSL_fence_entry(curr);
+	}
+	spin_unlock_bh(&synx_dev->native->csl_map_lock);
+}
+
+void synx_debugfs_util_print_global_shared_memory(char **cur,
+	char **end)
+{
+	struct synx_global_coredata synx_global_entry;
+	u32 i, idx;
+
+	/* Column heading set up*/
+	SYNX_CONSOLE_LOG(*cur, *end,
+				"\n\t  ------------- GLOBAL SHARED MEMORY ------------\n\t");
+
+	if (synx_columns & STATUS_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|  STATUS  |");
+	if (synx_columns & ID_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|  HANDLE  |");
+	if (synx_columns & REF_CNT_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|  REF CNT |");
+	if (synx_columns & NUM_CHILD_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "| NUM CHILD |");
+	if (synx_columns & SUBSCRIBERS_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "| SUBSCRIBERS |");
+	if (synx_columns & WAITERS_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "| WAITERS |");
+	if (synx_columns & PARENTS_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|    PARENTS    |");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n");
+
+	for (idx = lower_handle_id ; idx <= upper_handle_id ; idx++) {
+		if (!synx_fetch_global_shared_memory_handle_details(idx, &synx_global_entry) ||
+		!synx_debugfs_util_is_valid_global_shared_memory_entry(&synx_global_entry, idx))
+			continue;
+		if (synx_columns & STATUS_COLUMN)
+			SYNX_CONSOLE_LOG(*cur, *end, "\t   %s",
+			synx_debugfs_util_get_state_name(synx_global_entry.status));
+		if (synx_columns & ID_COLUMN)
+			SYNX_CONSOLE_LOG(*cur, *end, "\t\t%x", synx_global_entry.handle);
+		if (synx_columns & REF_CNT_COLUMN)
+			SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", synx_global_entry.refcount);
+		if (synx_columns & NUM_CHILD_COLUMN)
+			SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", synx_global_entry.num_child);
+		if (synx_columns & SUBSCRIBERS_COLUMN)
+			SYNX_CONSOLE_LOG(*cur, *end, "\t%d", synx_global_entry.subscribers);
+		if (synx_columns & WAITERS_COLUMN)
+			SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", synx_global_entry.waiters);
+		if (synx_columns & PARENTS_COLUMN) {
+			for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
+				if (synx_global_entry.parents[i])
+					SYNX_CONSOLE_LOG(*cur, *end, "   %2u",
+					synx_global_entry.parents[i]);
+			}
+		}
+		SYNX_CONSOLE_LOG(*cur, *end, "\n\t-------------------------------------");
+		SYNX_CONSOLE_LOG(*cur, *end, "-----------------------------------------");
+		SYNX_CONSOLE_LOG(*cur, *end, "-----------\n");
+	}
+}
+
+void synx_debugfs_util_print_client_table(char **cur,
+	char **end)
+{
+	u32 rc = SYNX_SUCCESS;
+	struct synx_client *curr;
+	struct hlist_node *tmp;
+	struct hlist_node *tmp2;
+	struct synx_handle_coredata *curr2 = NULL;
+	u32 client_map_itr, handle_map_itr;
+
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\t ------------- CLIENT MAP TABLE------------\n");
+	if (synx_columns & CLIENT_ID_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "| CLIENT ID |");
+	if (synx_columns & REF_CNT_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|CLIENT REF COUNT|");
+	if (synx_columns & ID_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "| HANDLE ID |");
+	if (synx_columns & REF_CNT_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|REF COUNT|");
+	if (synx_columns & REL_CNT_COLUMN)
+		SYNX_CONSOLE_LOG(*cur, *end, "|REL COUNT|");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n");
+	spin_lock_bh(&synx_dev->native->metadata_map_lock);
+	hash_for_each_safe(synx_dev->native->client_metadata_map,
+		client_map_itr, tmp, curr, node) {
+		rc = synx_debugfs_util_get_client_data(curr);
+		if (rc)
+			goto bail;
+		spin_lock_bh(&curr->handle_map_lock);
+		hash_for_each_safe(curr->handle_map,
+			handle_map_itr, tmp2, curr2, node) {
+			rc = synx_debugfs_util_get_handle(curr2);
+			if (rc) {
+				spin_unlock_bh(&curr->handle_map_lock);
+				synx_debugfs_util_put_client_data(curr);
+				goto bail;
+			}
+			if (curr2->key >= lower_handle_id && curr2->key <= upper_handle_id) {
+				if (synx_columns & CLIENT_ID_COLUMN)
+					SYNX_CONSOLE_LOG(*cur, *end, "\t%u", curr->id);
+				if (synx_columns & REF_CNT_COLUMN)
+					SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d",
+					kref_read(&curr->refcount));
+				if (synx_columns & ID_COLUMN)
+					SYNX_CONSOLE_LOG(*cur, *end, "\t%d", curr2->key);
+				if (synx_columns & REF_CNT_COLUMN)
+					SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d",
+					kref_read(&curr2->refcount));
+				if (synx_columns & REL_CNT_COLUMN)
+					SYNX_CONSOLE_LOG(*cur, *end, "\t\t%d", curr2->rel_count);
+				SYNX_CONSOLE_LOG(*cur, *end,
+					"\n\t-------------------------------------");
+				SYNX_CONSOLE_LOG(*cur, *end,
+					"-----------------------------------------");
+				SYNX_CONSOLE_LOG(*cur, *end, "-----------\n");
+			}
+			synx_debugfs_util_put_handle(curr2);
+		}
+		spin_unlock_bh(&curr->handle_map_lock);
+		synx_debugfs_util_put_client_data(curr);
+	}
+bail:
+	spin_unlock_bh(&synx_dev->native->metadata_map_lock);
+}
+
+void synx_debugfs_util_load_help_content(char **cur,
+	char **end)
+{
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\n\n\tSynx tables Supported for debugfs with the column names:");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\n\n\tGLOBAL/LOCAL MAP COLUMNS : STATUS, ID, REF_CNT, BOUND,");
+	SYNX_CONSOLE_LOG(*cur, *end, "\tGLOBAL INDEX, MAP CNT\n");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\n\tGLOBAL SHARED MEMORY COLUMNS : STATUS, ID,");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"REF_CNT, NUM_CHILD, \tSUBSCRIBERS, WAITERS, PARENTS");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\n\n\tCLIENT MAP COLUMNS : CLIENT_ID, REF_CNT, REL_CNT, ID");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\n\n\tDMA FENCE COLUMNS: STATUS, ID, REF_CNT, DMA FENCE");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\n\tINSTRUCTIONS TO BE FOLLOWED:");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\n\n\tTO PRINT CHOOSE THE COLUMNS ACCORDINGLY AND ADD UP THE");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\tHEXADECIMAL VALUES & PASS THE ADDED UP VALUES FOR COLUMN ALONG");
+	SYNX_CONSOLE_LOG(*cur, *end, "WITH TABLE SELECTION VALUE AS SHOWN BELOW:");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\n\tSet Below Values for Column selection\n");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\n\tNAME_COLUMN       = 0x0001");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tID_COLUMN         = 0x0002");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tBOUND_COLUMN      = 0x0004");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tSTATUS_COLUMN     = 0x0008");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tFENCE_COLUMN      = 0x0010");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tCOREDATA_COLUMN   = 0x0020");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tGLOBAL_IDX_COLUMN = 0x0040");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tREL_CNT_COLUMN    = 0x0080");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tMAP_CNT_COLUMN    = 0x0100");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tREF_CNT_COLUMN    = 0x0200");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tNUM_CHILD_COLUMN  = 0x0400");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tSUBSCRIBERS_COLUMN= 0x0800");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tWAITERS_COLUMN    = 0x1000");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tPARENTS_COLUMN    = 0x2000");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tCLIENT_ID_COLUMN  = 0x4000");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\n\n\tSet Below Values for Table selection\n");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tLOCAL_HASHTABLE   = 0x00010000");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tGLOBAL_HASHTABLE  = 0x00020000");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tCLIENT_HASHTABLE  = 0x00040000");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tGLOBAL_SHARED_MEM = 0x00080000");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tDMA_FENCE_MAP     = 0x00100000\n");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\n\tExample : To select Global map & all its columns :");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\t echo 0x2034E>column_level");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\t Last four digits in hexadecimal flag");
+	SYNX_CONSOLE_LOG(*cur, *end, " is dedicated for setting columns,");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\tuser can even set \"FFFF\" to set all columns");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\n\t Instead of passing 0x2034E, \tuser can even pass");
+	SYNX_CONSOLE_LOG(*cur, *end, " 0x2FFFF to fetch the same");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\n\n\tUser can set Handle Range with echo command as shown below\n");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\techo 1048577-1048580>synx_table");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\n\tFor single handle : echo \"1048577\">synx_table");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\n\tHandle range can be set in hexadecimal values as shown below:");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\techo 0x100001-10000f>synx_table");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\n\tSingle handle : echo 0x100001>synx_table");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\n\n\tTo print info on console : cat synx_table");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\n\tHandle states :");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tACT : SYNX_STATE_ACTIVE");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tINV : SYNX_STATE_INVALID");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tERR : SYNX_STATE_SIGNALED_ERROR");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tSUC : SYNX_STATE_SIGNALED_SUCCESS");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tCAN : SYNX_STATE_SIGNALED_CANCELLED");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tEXT : SYNX_STATE_SIGNALED_EXTERNAL");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tSSR : SYNX_STATE_SIGNALED_SSR\n");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tCUS : CUSTOM SIGNAL");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\t??? : UNKNOWN / UNDEFINED");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\n\tAdditional information:");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\n\tNo need to set handle ID range and column or table selection");
+	SYNX_CONSOLE_LOG(*cur, *end, "\tvalues again if once it is already set");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\n\tSimply using cat synx_table command user can print the data");
+	SYNX_CONSOLE_LOG(*cur, *end, "\tfor same table with same set of columns");
+	SYNX_CONSOLE_LOG(*cur, *end, "\n\tTo print all tables and all");
+	SYNX_CONSOLE_LOG(*cur, *end, "columns set column level value to 0x1fffff");
+	SYNX_CONSOLE_LOG(*cur, *end,
+		"\n\tCurrently we do not support CSL fence\n\n");
+}

+ 39 - 0
qcom/opensource/synx-kernel/msm/synx/synx_debugfs_util.h

@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SYNX_DEBUGFS_UTIL_H__
+#define __SYNX_DEBUGFS_UTIL_H__
+
+#include "synx_api.h"
+#include "synx_private.h"
+
+#define GLOBAL_HANDLE_STARTING_ID (1048577)
+
+/* DMA FENCE print function */
+void synx_debugfs_util_print_dma_fence(char **cur, char **end);
+
+/* CSL FENCE print function */
+void synx_debugfs_util_print_csl_fence(char **cur, char **end);
+
+/* GLOBAL & LOCAL MAP print function */
+void synx_debugfs_util_print_hash_table(char **cur, char **end, bool flag);
+
+/* GLOBAL SHARED MEMORY print function */
+void synx_debugfs_util_print_global_shared_memory(char **cur, char **end);
+
+/* CLIENT MAP print function */
+void synx_debugfs_util_print_client_table(char **cur, char **end);
+
+/* Function to get SYNX State Name */
+char *synx_debugfs_util_get_state_name(u32 status);
+
+/* Function for loading content of the help option for debugfs v2 */
+void synx_debugfs_util_load_help_content(char **cur, char **end);
+
+/* Function to check entry of the global shared memory is valid or not */
+bool synx_debugfs_util_is_valid_global_shared_memory_entry(struct synx_global_coredata *entry,
+u32 idx);
+
+#endif /* __SYNX_DEBUGFS_UTIL_H__ */

+ 27 - 0
qcom/opensource/synx-kernel/msm/synx/synx_err.h

@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SYNX_ERR_H__
+#define __SYNX_ERR_H__
+
+#include <linux/err.h>
+
+/**
+ * Error codes returned from framework
+ *
+ * Return codes are mapped to platform specific
+ * return values.
+ */
+#define SYNX_SUCCESS   0
+#define SYNX_NOMEM     ENOMEM
+#define SYNX_NOSUPPORT EOPNOTSUPP
+#define SYNX_NOPERM    EPERM
+#define SYNX_TIMEOUT   ETIMEDOUT
+#define SYNX_ALREADY   EALREADY
+#define SYNX_NOENT     ENOENT
+#define SYNX_INVALID   EINVAL
+#define SYNX_BUSY      EBUSY
+
+#endif /* __SYNX_ERR_H__ */

+ 916 - 0
qcom/opensource/synx-kernel/msm/synx/synx_global.c

@@ -0,0 +1,916 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/hwspinlock.h>
+#include <linux/string.h>
+#include "synx_debugfs.h"
+#include "synx_global.h"
+
+static struct synx_shared_mem synx_gmem;
+static struct hwspinlock *synx_hwlock;
+
+static u32 synx_gmem_lock_owner(u32 idx)
+{
+	/*
+	 * subscribers field of global table index 0 is used to
+	 * maintain synx gmem lock owner data.
+	 * core updates the field after acquiring the lock and
+	 * before releasing the lock appropriately.
+	 */
+	return synx_gmem.table[0].subscribers;
+}
+
+static void synx_gmem_lock_owner_set(u32 idx)
+{
+	synx_gmem.table[0].subscribers = SYNX_CORE_APSS;
+}
+
+static void synx_gmem_lock_owner_clear(u32 idx)
+{
+	if (synx_gmem.table[0].subscribers != SYNX_CORE_APSS)
+		dprintk(SYNX_WARN, "reset lock owned by core %u\n",
+			synx_gmem.table[0].subscribers);
+
+	synx_gmem.table[0].subscribers = SYNX_CORE_MAX;
+}
+
+static int synx_gmem_lock(u32 idx, unsigned long *flags)
+{
+	int rc;
+
+	if (!synx_hwlock)
+		return -SYNX_INVALID;
+
+	rc = hwspin_lock_timeout_irqsave(
+		synx_hwlock, SYNX_HWSPIN_TIMEOUT, flags);
+	if (!rc)
+		synx_gmem_lock_owner_set(idx);
+
+	return rc;
+}
+
+static void synx_gmem_unlock(u32 idx, unsigned long *flags)
+{
+	synx_gmem_lock_owner_clear(idx);
+	hwspin_unlock_irqrestore(synx_hwlock, flags);
+}
+
+static void synx_global_print_data(
+	struct synx_global_coredata *synx_g_obj,
+	const char *func)
+{
+	int i = 0;
+
+	dprintk(SYNX_VERB, "%s: status %u, handle %u, refcount %u",
+		func, synx_g_obj->status,
+		synx_g_obj->handle, synx_g_obj->refcount);
+
+	dprintk(SYNX_VERB, "%s: subscribers %u, waiters %u, pending %u",
+		func, synx_g_obj->subscribers, synx_g_obj->waiters,
+		synx_g_obj->num_child);
+
+	for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++)
+		if (synx_g_obj->parents[i])
+			dprintk(SYNX_VERB, "%s: parents %u:%u",
+				func, i, synx_g_obj->parents[i]);
+}
+
+bool synx_fetch_global_shared_memory_handle_details(u32 synx_handle,
+		struct synx_global_coredata *synx_global_entry)
+{
+	int rc = SYNX_SUCCESS;
+	u32 idx;
+	unsigned long flags;
+	struct synx_global_coredata *entry;
+
+	if (!synx_gmem.table) {
+		dprintk(SYNX_VERB, "synx_gmem is NULL\n");
+		return false;
+	}
+	idx = synx_handle & SYNX_HANDLE_INDEX_MASK;
+	if (!synx_is_valid_idx(idx))
+		return false;
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc) {
+		dprintk(SYNX_VERB, "Failed to lock entry %d\n", idx);
+		return false;
+	}
+	entry = &synx_gmem.table[idx];
+	memcpy(synx_global_entry, entry, sizeof(struct synx_global_coredata));
+	synx_gmem_unlock(idx, &flags);
+
+	return true;
+}
+
+int synx_global_dump_shared_memory(void)
+{
+	int rc = SYNX_SUCCESS, idx;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return -SYNX_INVALID;
+
+	/* Print bitmap memory*/
+	for (idx = 0; idx < SHRD_MEM_DUMP_NUM_BMAP_WORDS; idx++) {
+		rc = synx_gmem_lock(idx, &flags);
+
+		if (rc)
+			return rc;
+
+		dprintk(SYNX_VERB, "%s: idx %d, bitmap value %d",
+		__func__, idx, synx_gmem.bitmap[idx]);
+
+		synx_gmem_unlock(idx, &flags);
+	}
+
+	/* Print table memory*/
+	for (idx = 0;
+		idx < SHRD_MEM_DUMP_NUM_BMAP_WORDS * sizeof(u32) * NUM_CHAR_BIT;
+		idx++) {
+		rc = synx_gmem_lock(idx, &flags);
+
+		if (rc)
+			return rc;
+
+		dprintk(SYNX_VERB, "%s: idx %d\n", __func__, idx);
+
+		synx_g_obj = &synx_gmem.table[idx];
+		synx_global_print_data(synx_g_obj, __func__);
+
+		synx_gmem_unlock(idx, &flags);
+	}
+	return rc;
+}
+
+static int synx_gmem_init(void)
+{
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	synx_hwlock = hwspin_lock_request_specific(SYNX_HWSPIN_ID);
+	if (!synx_hwlock) {
+		dprintk(SYNX_ERR, "hwspinlock request failed\n");
+		return -SYNX_NOMEM;
+	}
+
+	/* zero idx not allocated for clients */
+	ipclite_global_test_and_set_bit(0,
+		(ipclite_atomic_uint32_t *)synx_gmem.bitmap);
+	memset(&synx_gmem.table[0], 0, sizeof(struct synx_global_coredata));
+
+	return SYNX_SUCCESS;
+}
+
+u32 synx_global_map_core_id(enum synx_core_id id)
+{
+	u32 host_id;
+
+	switch (id) {
+	case SYNX_CORE_APSS:
+		host_id = IPCMEM_APPS; break;
+	case SYNX_CORE_NSP:
+		host_id = IPCMEM_CDSP; break;
+	case SYNX_CORE_IRIS:
+		host_id = IPCMEM_VPU; break;
+	case SYNX_CORE_EVA:
+		host_id = IPCMEM_CVP; break;
+	case SYNX_CORE_ICP:
+		host_id = IPCMEM_CAM; break;
+	default:
+		host_id = IPCMEM_NUM_HOSTS;
+		dprintk(SYNX_ERR, "invalid core id\n");
+	}
+
+	return host_id;
+}
+
+int synx_global_alloc_index(u32 *idx)
+{
+	int rc = SYNX_SUCCESS;
+	u32 prev, index;
+	const u32 size = SYNX_GLOBAL_MAX_OBJS;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (IS_ERR_OR_NULL(idx))
+		return -SYNX_INVALID;
+
+	do {
+		index = find_first_zero_bit((unsigned long *)synx_gmem.bitmap, size);
+		if (index >= size) {
+			rc = -SYNX_NOMEM;
+			break;
+		}
+		prev = ipclite_global_test_and_set_bit(index % 32,
+				(ipclite_atomic_uint32_t *)(synx_gmem.bitmap + index/32));
+		if ((prev & (1UL << (index % 32))) == 0) {
+			*idx = index;
+			dprintk(SYNX_MEM, "allocated global idx %u\n", *idx);
+			break;
+		}
+	} while (true);
+
+	return rc;
+}
+
+int synx_global_init_coredata(u32 h_synx)
+{
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+	u32 idx = h_synx & SYNX_HANDLE_INDEX_MASK;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (!synx_is_valid_idx(idx))
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	if (synx_g_obj->status != 0 || synx_g_obj->refcount != 0 ||
+		synx_g_obj->subscribers != 0 || synx_g_obj->handle != 0 ||
+		synx_g_obj->parents[0] != 0) {
+		dprintk(SYNX_ERR,
+				"entry not cleared for idx %u,\n"
+				"synx_g_obj->status %d,\n"
+				"synx_g_obj->refcount %d,\n"
+				"synx_g_obj->subscribers %d,\n"
+				"synx_g_obj->handle %u,\n"
+				"synx_g_obj->parents[0] %d\n",
+				idx, synx_g_obj->status,
+				synx_g_obj->refcount,
+				synx_g_obj->subscribers,
+				synx_g_obj->handle,
+				synx_g_obj->parents[0]);
+		synx_gmem_unlock(idx, &flags);
+		return -SYNX_INVALID;
+	}
+	memset(synx_g_obj, 0, sizeof(*synx_g_obj));
+	/* set status to active */
+	synx_g_obj->status = SYNX_STATE_ACTIVE;
+	synx_g_obj->refcount = 1;
+	synx_g_obj->subscribers = (1UL << SYNX_CORE_APSS);
+	synx_g_obj->handle = h_synx;
+	synx_gmem_unlock(idx, &flags);
+
+	return SYNX_SUCCESS;
+}
+
+static int synx_global_get_waiting_cores_locked(
+	struct synx_global_coredata *synx_g_obj,
+	bool *cores)
+{
+	int i;
+
+	synx_global_print_data(synx_g_obj, __func__);
+	for (i = 0; i < SYNX_CORE_MAX; i++) {
+		if (synx_g_obj->waiters & (1UL << i)) {
+			cores[i] = true;
+			dprintk(SYNX_VERB,
+				"waiting for handle %u/n",
+				synx_g_obj->handle);
+		}
+	}
+
+	/* clear waiter list so signals are not repeated */
+	synx_g_obj->waiters = 0;
+
+	return SYNX_SUCCESS;
+}
+
+int synx_global_get_waiting_cores(u32 idx, bool *cores)
+{
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (IS_ERR_OR_NULL(cores) || !synx_is_valid_idx(idx))
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	synx_global_get_waiting_cores_locked(synx_g_obj, cores);
+	synx_gmem_unlock(idx, &flags);
+
+	return SYNX_SUCCESS;
+}
+
+int synx_global_set_waiting_core(u32 idx, enum synx_core_id id)
+{
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	synx_g_obj->waiters |= (1UL << id);
+	synx_gmem_unlock(idx, &flags);
+
+	return SYNX_SUCCESS;
+}
+
+int synx_global_get_subscribed_cores(u32 idx, bool *cores)
+{
+	int i;
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (IS_ERR_OR_NULL(cores) || !synx_is_valid_idx(idx))
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	for (i = 0; i < SYNX_CORE_MAX; i++)
+		if (synx_g_obj->subscribers & (1UL << i))
+			cores[i] = true;
+	synx_gmem_unlock(idx, &flags);
+
+	return SYNX_SUCCESS;
+}
+
+int synx_global_fetch_handle_details(u32 idx, u32 *h_synx)
+{
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (IS_ERR_OR_NULL(h_synx) || !synx_is_valid_idx(idx))
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	*h_synx = synx_g_obj->handle;
+	synx_gmem_unlock(idx, &flags);
+
+	return SYNX_SUCCESS;
+}
+
+int synx_global_set_subscribed_core(u32 idx, enum synx_core_id id)
+{
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	synx_g_obj->subscribers |= (1UL << id);
+	synx_gmem_unlock(idx, &flags);
+
+	return SYNX_SUCCESS;
+}
+
+int synx_global_clear_subscribed_core(u32 idx, enum synx_core_id id)
+{
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	synx_g_obj->subscribers &= ~(1UL << id);
+	synx_gmem_unlock(idx, &flags);
+
+	return SYNX_SUCCESS;
+}
+
+u32 synx_global_get_parents_num(u32 idx)
+{
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+	u32 i, count = 0;
+
+	if (!synx_gmem.table)
+		return 0;
+
+	if (!synx_is_valid_idx(idx))
+		return 0;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
+		if (synx_g_obj->parents[i] != 0)
+			count++;
+	}
+	synx_gmem_unlock(idx, &flags);
+
+	return count;
+}
+
+static int synx_global_get_parents_locked(
+	struct synx_global_coredata *synx_g_obj, u32 *parents)
+{
+	u32 i;
+
+	if (!synx_g_obj || !parents)
+		return -SYNX_NOMEM;
+
+	for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++)
+		parents[i] = synx_g_obj->parents[i];
+
+	return SYNX_SUCCESS;
+}
+
+int synx_global_get_parents(u32 idx, u32 *parents)
+{
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table || !parents)
+		return -SYNX_NOMEM;
+
+	if (!synx_is_valid_idx(idx))
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	rc = synx_global_get_parents_locked(synx_g_obj, parents);
+	synx_gmem_unlock(idx, &flags);
+
+	return rc;
+}
+
+u32 synx_global_get_status(u32 idx)
+{
+	int rc;
+	unsigned long flags;
+	u32 status = SYNX_STATE_ACTIVE;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return 0;
+
+	if (!synx_is_valid_idx(idx))
+		return 0;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	if (synx_g_obj->status != SYNX_STATE_ACTIVE && synx_g_obj->num_child == 0)
+		status = synx_g_obj->status;
+	synx_gmem_unlock(idx, &flags);
+
+	return status;
+}
+
+u32 synx_global_test_status_set_wait(u32 idx,
+	enum synx_core_id id)
+{
+	int rc;
+	unsigned long flags;
+	u32 status;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return 0;
+
+	if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
+		return 0;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return 0;
+	synx_g_obj = &synx_gmem.table[idx];
+	synx_global_print_data(synx_g_obj, __func__);
+	status = synx_g_obj->status;
+	/* if handle is still ACTIVE */
+	if (status == SYNX_STATE_ACTIVE || synx_g_obj->num_child != 0) {
+		synx_g_obj->waiters |= (1UL << id);
+		status = SYNX_STATE_ACTIVE;
+	}
+	else
+		dprintk(SYNX_DBG, "handle %u already signaled %u",
+			synx_g_obj->handle, synx_g_obj->status);
+	synx_gmem_unlock(idx, &flags);
+
+	return status;
+}
+
+static int synx_global_update_status_core(u32 idx,
+	u32 status)
+{
+	u32 i, p_idx;
+	int rc;
+	bool clear = false;
+	unsigned long flags;
+	uint64_t data;
+	struct synx_global_coredata *synx_g_obj;
+	u32 h_parents[SYNX_GLOBAL_MAX_PARENTS] = {0};
+	bool wait_cores[SYNX_CORE_MAX] = {false};
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	synx_global_print_data(synx_g_obj, __func__);
+	/* prepare for cross core signaling */
+	data = synx_g_obj->handle;
+	data <<= 32;
+	if (synx_g_obj->num_child != 0) {
+		/* composite handle */
+		synx_g_obj->num_child--;
+		if (synx_g_obj->status == SYNX_STATE_ACTIVE ||
+			(status > SYNX_STATE_SIGNALED_SUCCESS &&
+			status <= SYNX_STATE_SIGNALED_MAX))
+			synx_g_obj->status = status;
+
+		if (synx_g_obj->num_child == 0) {
+			data |= synx_g_obj->status;
+			synx_global_get_waiting_cores_locked(synx_g_obj,
+				wait_cores);
+			synx_global_get_parents_locked(synx_g_obj, h_parents);
+
+			/* release ref held by constituting handles */
+			synx_g_obj->refcount--;
+			if (synx_g_obj->refcount == 0) {
+				memset(synx_g_obj, 0,
+					sizeof(*synx_g_obj));
+				clear = true;
+			}
+		} else {
+			/* pending notification from  handles */
+			data = 0;
+			dprintk(SYNX_DBG,
+				"Child notified parent handle %u, pending %u\n",
+				synx_g_obj->handle, synx_g_obj->num_child);
+		}
+	} else {
+		synx_g_obj->status = status;
+		data |= synx_g_obj->status;
+		synx_global_get_waiting_cores_locked(synx_g_obj,
+			wait_cores);
+		synx_global_get_parents_locked(synx_g_obj, h_parents);
+	}
+	synx_gmem_unlock(idx, &flags);
+
+	if (clear) {
+		ipclite_global_test_and_clear_bit(idx%32,
+			(ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
+		dprintk(SYNX_MEM,
+			"cleared global idx %u\n", idx);
+	}
+
+	/* notify waiting clients on signal */
+	if (data) {
+		/* notify wait client */
+
+	/* In case of SSR, someone might be waiting on same core
+	 * However, in other cases, synx_signal API will take care
+	 * of signaling handles on same core and thus we don't need
+	 * to send interrupt
+	 */
+		if (status == SYNX_STATE_SIGNALED_SSR)
+			i = 0;
+		else
+			i = 1;
+
+		for (; i < SYNX_CORE_MAX ; i++) {
+			if (!wait_cores[i])
+				continue;
+			dprintk(SYNX_DBG,
+				"invoking ipc signal handle %u, status %u\n",
+				synx_g_obj->handle, synx_g_obj->status);
+			if (ipclite_msg_send(
+				synx_global_map_core_id(i),
+				data))
+				dprintk(SYNX_ERR,
+					"ipc signaling %llu to core %u failed\n",
+					data, i);
+		}
+	}
+
+	/* handle parent notifications */
+	for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
+		p_idx = h_parents[i];
+		if (p_idx == 0)
+			continue;
+		synx_global_update_status_core(p_idx, status);
+	}
+
+	return SYNX_SUCCESS;
+}
+
+int synx_global_update_status(u32 idx, u32 status)
+{
+	int rc = -SYNX_INVALID;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (!synx_is_valid_idx(idx) || status <= SYNX_STATE_ACTIVE)
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	if (synx_g_obj->num_child != 0) {
+		/* composite handle cannot be signaled */
+		goto fail;
+	} else if (synx_g_obj->status != SYNX_STATE_ACTIVE) {
+		rc = -SYNX_ALREADY;
+		goto fail;
+	}
+	synx_gmem_unlock(idx, &flags);
+
+	return synx_global_update_status_core(idx, status);
+
+fail:
+	synx_gmem_unlock(idx, &flags);
+	return rc;
+}
+
+int synx_global_get_ref(u32 idx)
+{
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (!synx_is_valid_idx(idx))
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	synx_global_print_data(synx_g_obj, __func__);
+	if (synx_g_obj->handle && synx_g_obj->refcount)
+		synx_g_obj->refcount++;
+	else
+		rc = -SYNX_NOENT;
+	synx_gmem_unlock(idx, &flags);
+
+	return rc;
+}
+
+void synx_global_put_ref(u32 idx)
+{
+	int rc;
+	bool clear = false;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return;
+
+	if (!synx_is_valid_idx(idx))
+		return;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return;
+	synx_g_obj = &synx_gmem.table[idx];
+	synx_g_obj->refcount--;
+	if (synx_g_obj->refcount == 0) {
+		memset(synx_g_obj, 0, sizeof(*synx_g_obj));
+		clear = true;
+	}
+	synx_gmem_unlock(idx, &flags);
+
+	if (clear) {
+		ipclite_global_test_and_clear_bit(idx%32,
+			(ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
+		dprintk(SYNX_MEM, "cleared global idx %u\n", idx);
+	}
+}
+
+int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx)
+{
+	int rc = -SYNX_INVALID;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+	u32 i, j = 0;
+	u32 idx;
+	u32 num_child = 0;
+	u32 parent_status = SYNX_STATE_ACTIVE;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (!synx_is_valid_idx(p_idx))
+		return -SYNX_INVALID;
+
+	if (num_list == 0)
+		return SYNX_SUCCESS;
+
+	while (j < num_list) {
+		idx = idx_list[j];
+
+		if (!synx_is_valid_idx(idx))
+			goto fail;
+
+		rc = synx_gmem_lock(idx, &flags);
+		if (rc)
+			goto fail;
+
+		synx_g_obj = &synx_gmem.table[idx];
+		for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
+			if (synx_g_obj->parents[i] == 0) {
+				synx_g_obj->parents[i] = p_idx;
+				break;
+			}
+		}
+		if (synx_g_obj->status == SYNX_STATE_ACTIVE)
+			num_child++;
+		else if (synx_g_obj->status >
+			SYNX_STATE_SIGNALED_SUCCESS &&
+			synx_g_obj->status <= SYNX_STATE_SIGNALED_MAX)
+			parent_status = synx_g_obj->status;
+		else if (parent_status == SYNX_STATE_ACTIVE)
+			parent_status = synx_g_obj->status;
+
+		if (synx_g_obj->status != SYNX_STATE_ACTIVE && synx_g_obj->num_child != 0)
+			num_child++;
+
+		dprintk(SYNX_MEM, "synx_obj->status %d parent status %d\n",
+			synx_g_obj->status, parent_status);
+		synx_gmem_unlock(idx, &flags);
+
+		if (i >= SYNX_GLOBAL_MAX_PARENTS) {
+			rc = -SYNX_NOMEM;
+			goto fail;
+		}
+
+		j++;
+	}
+
+	rc = synx_gmem_lock(p_idx, &flags);
+	if (rc)
+		goto fail;
+	synx_g_obj = &synx_gmem.table[p_idx];
+	synx_g_obj->num_child += num_child;
+	if (synx_g_obj->num_child != 0)
+		synx_g_obj->refcount++;
+	synx_g_obj->status = parent_status;
+	synx_global_print_data(synx_g_obj, __func__);
+	synx_gmem_unlock(p_idx, &flags);
+
+	return SYNX_SUCCESS;
+
+fail:
+	while (num_child--) {
+		idx = idx_list[num_child];
+
+		if (synx_gmem_lock(idx, &flags))
+			continue;
+		synx_g_obj = &synx_gmem.table[idx];
+		for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
+			if (synx_g_obj->parents[i] == p_idx) {
+				synx_g_obj->parents[i] = 0;
+				break;
+			}
+		}
+		synx_gmem_unlock(idx, &flags);
+	}
+
+	return rc;
+}
+
+int synx_global_recover(enum synx_core_id core_id)
+{
+	int rc = SYNX_SUCCESS;
+	u32 idx = 0;
+	const u32 size = SYNX_GLOBAL_MAX_OBJS;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+	bool update;
+	int *clear_idx = NULL;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	clear_idx = kzalloc(sizeof(int)*SYNX_GLOBAL_MAX_OBJS, GFP_KERNEL);
+	if (!clear_idx)
+		return -SYNX_NOMEM;
+
+	ipclite_recover(synx_global_map_core_id(core_id));
+
+	/* recover synx gmem lock if it was owned by core in ssr */
+	if (synx_gmem_lock_owner(0) == core_id) {
+		synx_gmem_lock_owner_clear(0);
+		hwspin_unlock_raw(synx_hwlock);
+	}
+
+	idx = find_next_bit((unsigned long *)synx_gmem.bitmap,
+			size, idx + 1);
+	while (idx < size) {
+		update = false;
+		rc = synx_gmem_lock(idx, &flags);
+		if (rc)
+			goto free;
+		synx_g_obj = &synx_gmem.table[idx];
+		if (synx_g_obj->refcount &&
+			 synx_g_obj->subscribers & (1UL << core_id)) {
+			synx_g_obj->subscribers &= ~(1UL << core_id);
+			synx_g_obj->refcount--;
+			if (synx_g_obj->refcount == 0) {
+				memset(synx_g_obj, 0, sizeof(*synx_g_obj));
+				clear_idx[idx] = 1;
+			} else if (synx_g_obj->status == SYNX_STATE_ACTIVE) {
+				update = true;
+			}
+		}
+		synx_gmem_unlock(idx, &flags);
+		if (update)
+			synx_global_update_status(idx,
+				SYNX_STATE_SIGNALED_SSR);
+		idx = find_next_bit((unsigned long *)synx_gmem.bitmap,
+				size, idx + 1);
+	}
+
+	for (idx = 1; idx < size; idx++) {
+		if (clear_idx[idx]) {
+			ipclite_global_test_and_clear_bit(idx % 32,
+				(ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
+			dprintk(SYNX_MEM, "released global idx %u\n", idx);
+		}
+	}
+free:
+	kfree(clear_idx);
+
+	return rc;
+}
+
+int synx_global_mem_init(void)
+{
+	int rc;
+	int bitmap_size = SYNX_GLOBAL_MAX_OBJS/32;
+	struct global_region_info mem_info;
+
+	rc = get_global_partition_info(&mem_info);
+	if (rc) {
+		dprintk(SYNX_ERR, "error setting up global shared memory\n");
+		return rc;
+	}
+
+	memset(mem_info.virt_base, 0, mem_info.size);
+	dprintk(SYNX_DBG, "global shared memory %pK size %u\n",
+		mem_info.virt_base, mem_info.size);
+
+	synx_gmem.bitmap = (u32 *)mem_info.virt_base;
+	synx_gmem.locks = synx_gmem.bitmap + bitmap_size;
+	synx_gmem.table =
+		(struct synx_global_coredata *)(synx_gmem.locks + 2);
+	dprintk(SYNX_DBG, "global memory bitmap %pK, table %pK\n",
+		synx_gmem.bitmap, synx_gmem.table);
+
+	return synx_gmem_init();
+}

+ 305 - 0
qcom/opensource/synx-kernel/msm/synx/synx_global.h

@@ -0,0 +1,305 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SYNX_SHARED_MEM_H__
+#define __SYNX_SHARED_MEM_H__
+
+#include "synx_err.h"
+#include "ipclite_client.h"
+
+#include <synx_header.h>
+/**
+ * enum synx_core_id - Synx core IDs
+ *
+ * SYNX_CORE_APSS : APSS core
+ * SYNX_CORE_NSP  : NSP core
+ * SYNX_CORE_EVA  : EVA core
+ * SYNX_CORE_IRIS : IRIS core
+ * SYNX_CORE_ICP  : ICP core
+ */
+enum synx_core_id {
+	SYNX_CORE_APSS = 0,
+	SYNX_CORE_NSP,
+	SYNX_CORE_EVA,
+	SYNX_CORE_IRIS,
+	SYNX_CORE_ICP,
+	SYNX_CORE_MAX,
+};
+
+/* synx handle encoding */
+#define SYNX_HANDLE_INDEX_BITS         16
+#define SYNX_HANDLE_CORE_BITS          4
+#define SYNX_HANDLE_GLOBAL_FLAG_BIT    1
+
+#define SYNX_GLOBAL_SHARED_LOCKS       1
+#define SYNX_GLOBAL_MAX_OBJS           4096
+#define SYNX_GLOBAL_MAX_PARENTS        4
+
+#define SYNX_HANDLE_INDEX_MASK         ((1UL<<SYNX_HANDLE_INDEX_BITS)-1)
+
+#define SHRD_MEM_DUMP_NUM_BMAP_WORDS   10
+#define NUM_CHAR_BIT                   8
+
+/* spin lock timeout (ms) */
+#define SYNX_HWSPIN_TIMEOUT            500
+#define SYNX_HWSPIN_ID                 10
+
+/* dma fence states */
+#define SYNX_DMA_FENCE_STATE_MAX             4096
+
+/**
+ * struct synx_global_coredata - Synx global object, used for book keeping
+ * of all metadata associated with each individual global entry
+ *
+ * @status      : Synx signaling status
+ * @handle      : Handle of global entry
+ * @refcount    : References owned by each core
+ * @num_child   : Count of children pending signal (for composite handle)
+ * @subscribers : Cores owning reference on this object
+ * @waiters     : Cores waiting for notification
+ * @parents     : Composite global coredata index of parent entities
+ *                Can be part of SYNX_GLOBAL_MAX_PARENTS composite entries.
+ */
+struct synx_global_coredata {
+	u32 status;
+	u32 handle;
+	u16 refcount;
+	u16 num_child;
+	u16 subscribers;
+	u16 waiters;
+	u16 parents[SYNX_GLOBAL_MAX_PARENTS];
+};
+
+/**
+ * struct synx_shared_mem - Synx global shared memory descriptor
+ *
+ * @bitmap : Bitmap for allocating entries form table
+ * @locks  : Array of locks for exclusive access to table entries
+ * @table  : Array of Synx global entries
+ */
+struct synx_shared_mem {
+	u32 *bitmap;
+	u32 *locks;
+	struct synx_global_coredata *table;
+};
+
+static inline bool synx_is_valid_idx(u32 idx)
+{
+	if (idx < SYNX_GLOBAL_MAX_OBJS)
+		return true;
+	return false;
+}
+
+/**
+ * synx_global_mem_init - Initialize global shared memory
+ *
+ * @return Zero on success, negative error on failure.
+ */
+int synx_global_mem_init(void);
+
+/**
+ * synx_global_map_core_id - Map Synx core ID to IPC Lite host
+ *
+ * @param id : Core Id to map
+ *
+ * @return IPC host ID.
+ */
+u32 synx_global_map_core_id(enum synx_core_id id);
+
+/**
+ * synx_global_alloc_index - Allocate new global entry
+ *
+ * @param idx : Pointer to global table index (filled by function)
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_alloc_index(u32 *idx);
+
+/**
+ * synx_global_init_coredata - Allocate new global entry
+ *
+ * @param h_synx : Synx global handle
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_init_coredata(u32 h_synx);
+
+/**
+ * synx_global_get_waiting_cores - Get list of all the waiting core on global entry
+ *
+ * Will fill the cores array with TRUE if core is waiting, and
+ * false if not. Indexed through enum synx_core_id.
+ *
+ * @param idx   : Global entry index
+ * @param cores : Array of boolean variables, one each for supported core.
+ *                Array should contain SYNX_CORE_MAX entries.
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_get_waiting_cores(u32 idx, bool *cores);
+
+/**
+ * synx_global_set_waiting_core - Set core as a waiting core on global entry
+ *
+ * @param idx : Global entry index
+ * @param id  : Core to be set as waiter
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_set_waiting_core(u32 idx, enum synx_core_id id);
+
+/**
+ * synx_global_get_subscribed_cores - Get list of all the subscribed core on global entry
+ *
+ * Will fill the cores array with TRUE if core is subscribed, and
+ * false if not. Indexed through enum synx_core_id.
+ *
+ * @param idx   : Global entry index
+ * @param cores : Array of boolean variables, one each for supported core.
+ *                Array should contain SYNX_CORE_MAX entries.
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_get_subscribed_cores(u32 idx, bool *cores);
+
+/**
+ * synx_global_set_subscribed_core - Set core as a subscriber core on global entry
+ *
+ * @param idx : Global entry index
+ * @param id  : Core to be added as subscriber
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_set_subscribed_core(u32 idx, enum synx_core_id id);
+
+/**
+ * synx_global_clear_subscribed_core - Clear core as a subscriber core on global entry
+ *
+ * @param idx : Global entry index
+ * @param id  : Core to be added as subscriber
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_clear_subscribed_core(u32 idx, enum synx_core_id id);
+
+/**
+ * synx_global_get_status - Get status of the global entry
+ *
+ * @param idx : Global entry index
+ *
+ * @return Global entry status
+ */
+u32 synx_global_get_status(u32 idx);
+
+/**
+ * synx_global_test_status_set_wait - Check status and add core as waiter is not signaled
+ *
+ * This tests and adds the waiter in one atomic operation, to avoid
+ * race with signal which can miss sending the IPC signal if
+ * check status and set as done as two different operations
+ * (signal coming in between the two ops).
+ *
+ * @param idx : Global entry index
+ * @param id  : Core to be set as waiter (if unsignaled)
+ *
+ * @return Status of global entry idx.
+ */
+u32 synx_global_test_status_set_wait(u32 idx,
+	enum synx_core_id id);
+
+/**
+ * synx_global_update_status - Update status of the global entry
+ *
+ * Function also updates the parent composite handles
+ * about the signaling.
+ *
+ * @param idx    : Global entry index
+ * @param status : Signaling status
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_update_status(u32 idx, u32 status);
+
+/**
+ * synx_global_get_ref - Get additional reference on global entry
+ *
+ * @param idx : Global entry index
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_get_ref(u32 idx);
+
+/**
+ * synx_global_put_ref - Release reference on global entry
+ *
+ * @param idx : Global entry index
+ */
+void synx_global_put_ref(u32 idx);
+
+/**
+ * synx_global_get_parents - Get the global entry index of all composite parents
+ *
+ * @param idx     : Global entry index whose parents are requested
+ * @param parents : Array of global entry index of composite handles
+ *                  Filled by the function. Array should contain atleast
+ *                  SYNX_GLOBAL_MAX_PARENTS entries.
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_get_parents(u32 idx, u32 *parents);
+
+/**
+ * synx_global_merge - Merge handles to form global handle
+ *
+ * Is essential for merge functionality.
+ *
+ * @param idx_list : List of global indexes to merge
+ * @param num_list : Number of handles in the list to merge
+ * @params p_idx   : Global entry index allocated for composite handle
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx);
+
+/**
+ * synx_global_recover - Recover handles subscribed by specific core
+ *
+ * @param id : Core ID to clean up
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_recover(enum synx_core_id id);
+
+/**
+ * synx_global_clean_cdsp_mem - Release handles created/used by CDSP
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_clean_cdsp_mem(void);
+
+/**
+ * synx_global_dump_shared_memory - Prints the top entries of
+ * bitmap and table in global shared memory.
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+
+int synx_global_dump_shared_memory(void);
+
+/**
+ * synx_global_fetch_handle_details - Fetches the synx handle from
+ * global shared memory.
+ *
+ * @param idx :  Global entry index whose handle is requested.
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_fetch_handle_details(u32 idx, u32 *h_synx);
+
+/* Function to fetch global shared memory entry */
+bool synx_fetch_global_shared_memory_handle_details(u32 synx_handle,
+	struct synx_global_coredata *synx_global_entry);
+
+#endif /* __SYNX_SHARED_MEM_H__ */

+ 249 - 0
qcom/opensource/synx-kernel/msm/synx/synx_private.h

@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SYNX_PRIVATE_H__
+#define __SYNX_PRIVATE_H__
+
+#include <linux/bitmap.h>
+#include <linux/cdev.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
+#include <linux/hashtable.h>
+#include <linux/ktime.h>
+#include <linux/workqueue.h>
+
+#include "synx_api.h"
+#include "synx_global.h"
+
+#define SYNX_MAX_OBJS               SYNX_GLOBAL_MAX_OBJS
+
+#define SYNX_NAME                   "synx"
+#define SYNX_DEVICE_NAME            "synx_device"
+#define SYNX_WQ_CB_NAME             "hiprio_synx_cb_queue"
+#define SYNX_WQ_CB_THREADS          4
+#define SYNX_WQ_CLEANUP_NAME        "hiprio_synx_cleanup_queue"
+#define SYNX_WQ_CLEANUP_THREADS     2
+#define SYNX_MAX_NUM_BINDINGS       8
+
+#define SYNX_OBJ_HANDLE_SHIFT       SYNX_HANDLE_INDEX_BITS
+#define SYNX_OBJ_CORE_ID_SHIFT      (SYNX_OBJ_HANDLE_SHIFT+SYNX_HANDLE_CORE_BITS)
+#define SYNX_OBJ_GLOBAL_FLAG_SHIFT  (SYNX_OBJ_CORE_ID_SHIFT+SYNX_HANDLE_GLOBAL_FLAG_BIT)
+
+#define SYNX_OBJ_HANDLE_MASK        GENMASK_ULL(SYNX_OBJ_HANDLE_SHIFT-1, 0)
+#define SYNX_OBJ_CORE_ID_MASK       GENMASK_ULL(SYNX_OBJ_CORE_ID_SHIFT-1, SYNX_OBJ_HANDLE_SHIFT)
+#define SYNX_OBJ_GLOBAL_FLAG_MASK   \
+	GENMASK_ULL(SYNX_OBJ_GLOBAL_FLAG_SHIFT-1, SYNX_OBJ_CORE_ID_SHIFT)
+
+#define MAX_TIMESTAMP_SIZE          32
+#define SYNX_OBJ_NAME_LEN           64
+
+#define SYNX_PAYLOAD_WORDS          4
+
+#define SYNX_CREATE_IM_EX_RELEASE   SYNX_CREATE_MAX_FLAGS
+#define SYNX_CREATE_MERGED_FENCE    (SYNX_CREATE_MAX_FLAGS << 1)
+
+#define SYNX_MAX_REF_COUNTS         100
+
+struct synx_bind_desc {
+	struct synx_external_desc_v2 external_desc;
+	void *external_data;
+};
+
+struct error_node {
+	char timestamp[32];
+	u64 session;
+	u32 client_id;
+	u32 h_synx;
+	s32 error_code;
+	struct list_head node;
+};
+
+struct synx_entry_32 {
+	u32 key;
+	void *data;
+	struct hlist_node node;
+};
+
+struct synx_entry_64 {
+	u64 key;
+	u32 data[2];
+	struct kref refcount;
+	struct hlist_node node;
+};
+
+struct synx_map_entry {
+	struct synx_coredata *synx_obj;
+	struct kref refcount;
+	u32 flags;
+	u32 key;
+	struct work_struct dispatch;
+	struct hlist_node node;
+};
+
+struct synx_fence_entry {
+	u32 g_handle;
+	u32 l_handle;
+	u64 key;
+	struct hlist_node node;
+};
+
+struct synx_kernel_payload {
+	u32 h_synx;
+	u32 status;
+	void *data;
+	synx_user_callback_t cb_func;
+	synx_user_callback_t cancel_cb_func;
+};
+
+struct synx_cb_data {
+	struct synx_session *session;
+	u32 idx;
+	u32 h_synx;
+	u32 status;
+	struct timer_list synx_timer;
+	u64 timeout;
+	struct work_struct cb_dispatch;
+	struct list_head node;
+};
+
+struct synx_client_cb {
+	bool is_valid;
+	u32 idx;
+	struct synx_client *client;
+	struct synx_kernel_payload kernel_cb;
+	struct list_head node;
+};
+
+struct synx_registered_ops {
+	char name[SYNX_OBJ_NAME_LEN];
+	struct bind_operations ops;
+	enum synx_bind_client_type type;
+	bool valid;
+};
+
+struct synx_cleanup_cb {
+	void *data;
+	struct work_struct cb_dispatch;
+};
+
+enum synx_signal_handler {
+	SYNX_SIGNAL_FROM_CLIENT   = 0x1,
+	SYNX_SIGNAL_FROM_FENCE    = 0x2,
+	SYNX_SIGNAL_FROM_IPC      = 0x4,
+	SYNX_SIGNAL_FROM_CALLBACK = 0x8,
+};
+
+struct synx_signal_cb {
+	u32 handle;
+	u32 status;
+	u64 ext_sync_id;
+	struct synx_coredata *synx_obj;
+	enum synx_signal_handler flag;
+	struct dma_fence_cb fence_cb;
+	struct work_struct cb_dispatch;
+};
+
+struct synx_coredata {
+	char name[SYNX_OBJ_NAME_LEN];
+	struct dma_fence *fence;
+	struct mutex obj_lock;
+	struct kref refcount;
+	u32 type;
+	u32 status;
+	u32 num_bound_synxs;
+	struct synx_bind_desc bound_synxs[SYNX_MAX_NUM_BINDINGS];
+	struct list_head reg_cbs_list;
+	u32 global_idx;
+	u32 map_count;
+	struct synx_signal_cb *signal_cb;
+};
+
+struct synx_client;
+struct synx_device;
+
+struct synx_handle_coredata {
+	struct synx_client *client;
+	struct synx_coredata *synx_obj;
+	void *map_entry;
+	struct kref refcount;
+	u32 key;
+	u32 rel_count;
+	struct work_struct dispatch;
+	struct hlist_node node;
+};
+
+struct synx_client {
+	u32 type;
+	bool active;
+	struct synx_device *device;
+	char name[SYNX_OBJ_NAME_LEN];
+	u64 id;
+	u64 dma_context;
+	struct kref refcount;
+	struct mutex event_q_lock;
+	struct list_head event_q;
+	wait_queue_head_t event_wq;
+	DECLARE_BITMAP(cb_bitmap, SYNX_MAX_OBJS);
+	struct synx_client_cb cb_table[SYNX_MAX_OBJS];
+	DECLARE_HASHTABLE(handle_map, 8);
+	spinlock_t handle_map_lock;
+	struct work_struct dispatch;
+	struct hlist_node node;
+};
+
+struct synx_native {
+	spinlock_t metadata_map_lock;
+	DECLARE_HASHTABLE(client_metadata_map, 8);
+	spinlock_t fence_map_lock;
+	DECLARE_HASHTABLE(fence_map, 10);
+	spinlock_t global_map_lock;
+	DECLARE_HASHTABLE(global_map, 10);
+	spinlock_t local_map_lock;
+	DECLARE_HASHTABLE(local_map, 8);
+	spinlock_t csl_map_lock;
+	DECLARE_HASHTABLE(csl_fence_map, 8);
+	DECLARE_BITMAP(bitmap, SYNX_MAX_OBJS);
+};
+
+struct synx_cdsp_ssr {
+	u64 ssrcnt;
+	void *handle;
+	struct notifier_block nb;
+};
+
+struct synx_device {
+	struct cdev cdev;
+	dev_t dev;
+	struct class *class;
+	struct synx_native *native;
+	struct workqueue_struct *wq_cb;
+	struct workqueue_struct *wq_cleanup;
+	struct mutex vtbl_lock;
+	struct synx_registered_ops bind_vtbl[SYNX_MAX_BIND_TYPES];
+	struct dentry *debugfs_root;
+	struct list_head error_list;
+	struct mutex error_lock;
+	struct synx_cdsp_ssr cdsp_ssr;
+};
+
+int synx_signal_core(struct synx_coredata *synx_obj,
+	u32 status,
+	bool cb_signal,
+	s32 ext_sync_id);
+
+int synx_ipc_callback(uint32_t client_id,
+	int64_t data, void *priv);
+
+void synx_signal_handler(struct work_struct *cb_dispatch);
+
+int synx_native_release_core(struct synx_client *session,
+	u32 h_synx);
+
+int synx_bind(struct synx_session *session,
+	u32 h_synx,
+	struct synx_external_desc_v2 external_sync);
+
+#endif /* __SYNX_PRIVATE_H__ */

+ 1685 - 0
qcom/opensource/synx-kernel/msm/synx/synx_util.c

@@ -0,0 +1,1685 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
+
+#include "synx_debugfs.h"
+#include "synx_util.h"
+#include "synx_private.h"
+
+extern void synx_external_callback(s32 sync_obj, int status, void *data);
+
+int synx_util_init_coredata(struct synx_coredata *synx_obj,
+	struct synx_create_params *params,
+	struct dma_fence_ops *ops,
+	u64 dma_context)
+{
+	int rc = -SYNX_INVALID;
+	spinlock_t *fence_lock;
+	struct dma_fence *fence;
+	struct synx_fence_entry *entry;
+
+	if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(params) ||
+		 IS_ERR_OR_NULL(ops) || IS_ERR_OR_NULL(params->h_synx))
+		return -SYNX_INVALID;
+
+	if (params->flags & SYNX_CREATE_GLOBAL_FENCE &&
+		*params->h_synx != 0) {
+		rc = synx_global_get_ref(
+			synx_util_global_idx(*params->h_synx));
+		synx_obj->global_idx = synx_util_global_idx(*params->h_synx);
+	} else if (params->flags & SYNX_CREATE_GLOBAL_FENCE) {
+		rc = synx_alloc_global_handle(params->h_synx);
+		synx_obj->global_idx = synx_util_global_idx(*params->h_synx);
+	} else {
+		rc = synx_alloc_local_handle(params->h_synx);
+	}
+
+	if (rc != SYNX_SUCCESS)
+		return rc;
+
+	synx_obj->map_count = 1;
+	synx_obj->num_bound_synxs = 0;
+	synx_obj->type |= params->flags;
+	kref_init(&synx_obj->refcount);
+	mutex_init(&synx_obj->obj_lock);
+	INIT_LIST_HEAD(&synx_obj->reg_cbs_list);
+	if (params->name)
+		strlcpy(synx_obj->name, params->name, sizeof(synx_obj->name));
+
+	if (params->flags & SYNX_CREATE_DMA_FENCE) {
+		fence = (struct dma_fence *)params->fence;
+		if (IS_ERR_OR_NULL(fence)) {
+			dprintk(SYNX_ERR, "invalid external fence\n");
+			goto free;
+		}
+
+		dma_fence_get(fence);
+		synx_obj->fence = fence;
+	} else {
+		/*
+		 * lock and fence memory will be released in fence
+		 * release function
+		 */
+		fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL);
+		if (IS_ERR_OR_NULL(fence_lock)) {
+			rc = -SYNX_NOMEM;
+			goto free;
+		}
+
+		fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+		if (IS_ERR_OR_NULL(fence)) {
+			kfree(fence_lock);
+			rc = -SYNX_NOMEM;
+			goto free;
+		}
+
+		spin_lock_init(fence_lock);
+		dma_fence_init(fence, ops, fence_lock, dma_context, 1);
+
+		synx_obj->fence = fence;
+		synx_util_activate(synx_obj);
+		dprintk(SYNX_MEM,
+			"allocated backing fence %pK\n", fence);
+
+		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+		if (IS_ERR_OR_NULL(entry)) {
+			rc = -SYNX_NOMEM;
+			goto clean;
+		}
+
+		entry->key = (u64)fence;
+		if (params->flags & SYNX_CREATE_GLOBAL_FENCE)
+			entry->g_handle = *params->h_synx;
+		else
+			entry->l_handle = *params->h_synx;
+
+		rc = synx_util_insert_fence_entry(entry,
+				params->h_synx,
+				params->flags & SYNX_CREATE_GLOBAL_FENCE);
+		BUG_ON(rc != SYNX_SUCCESS);
+	}
+
+	if (rc != SYNX_SUCCESS)
+		goto clean;
+
+	synx_obj->status = synx_util_get_object_status(synx_obj);
+	return SYNX_SUCCESS;
+
+clean:
+	dma_fence_put(fence);
+free:
+	if (params->flags & SYNX_CREATE_GLOBAL_FENCE)
+		synx_global_put_ref(
+			synx_util_global_idx(*params->h_synx));
+	else
+		clear_bit(synx_util_global_idx(*params->h_synx),
+			synx_dev->native->bitmap);
+
+	return rc;
+}
+
+int synx_util_add_callback(struct synx_coredata *synx_obj,
+	u32 h_synx)
+{
+	int rc;
+	struct synx_signal_cb *signal_cb;
+
+	if (IS_ERR_OR_NULL(synx_obj))
+		return -SYNX_INVALID;
+
+	signal_cb = kzalloc(sizeof(*signal_cb), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(signal_cb))
+		return -SYNX_NOMEM;
+
+	signal_cb->handle = h_synx;
+	signal_cb->flag = SYNX_SIGNAL_FROM_FENCE;
+	signal_cb->synx_obj = synx_obj;
+
+	/* get reference on synx coredata for signal cb */
+	synx_util_get_object(synx_obj);
+
+	/*
+	 * adding callback enables synx framework to
+	 * get notified on signal from clients using
+	 * native dma fence operations.
+	 */
+	rc = dma_fence_add_callback(synx_obj->fence,
+			&signal_cb->fence_cb, synx_fence_callback);
+	if (rc != 0) {
+		if (rc == -ENOENT) {
+			if (synx_util_is_global_object(synx_obj)) {
+				/* signal (if) global handle */
+				rc = synx_global_update_status(
+					synx_obj->global_idx,
+					synx_util_get_object_status(synx_obj));
+				if (rc != SYNX_SUCCESS)
+					dprintk(SYNX_ERR,
+						"status update of %u with fence %pK\n",
+						synx_obj->global_idx, synx_obj->fence);
+			} else {
+				rc = SYNX_SUCCESS;
+			}
+		} else {
+			dprintk(SYNX_ERR,
+				"error adding callback for %pK err %d\n",
+				synx_obj->fence, rc);
+		}
+		synx_util_put_object(synx_obj);
+		kfree(signal_cb);
+		return rc;
+	}
+
+	synx_obj->signal_cb = signal_cb;
+	dprintk(SYNX_VERB, "added callback %pK to fence %pK\n",
+		signal_cb, synx_obj->fence);
+
+	return SYNX_SUCCESS;
+}
+
+static int synx_util_count_dma_array_fences(struct dma_fence *fence)
+{
+	struct dma_fence_cb *cur, *tmp;
+	int32_t num_dma_array = 0;
+	struct dma_fence_array_cb *cb_array = NULL;
+	struct dma_fence_array *array = NULL;
+
+	if (IS_ERR_OR_NULL(fence)) {
+		dprintk(SYNX_ERR, "invalid fence passed\n");
+		return num_dma_array;
+	}
+
+	list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
+		// count for parent fences
+		cb_array = container_of(cur, struct dma_fence_array_cb, cb);
+		if (IS_ERR_OR_NULL(cb_array)) {
+			dprintk(SYNX_VERB, "cb_array not found in fence %pK\n", fence);
+			continue;
+		}
+		array = cb_array->array;
+		if (!IS_ERR_OR_NULL(array) && dma_fence_is_array(&(array->base)))
+			num_dma_array++;
+	}
+
+	dprintk(SYNX_VERB, "number of fence_array found %d for child fence %pK\n",
+		num_dma_array, fence);
+
+	return num_dma_array;
+}
+
+int synx_util_init_group_coredata(struct synx_coredata *synx_obj,
+	struct dma_fence **fences,
+	struct synx_merge_params *params,
+	u32 num_objs,
+	u64 dma_context)
+{
+	int rc;
+	struct dma_fence_array *array;
+
+	if (IS_ERR_OR_NULL(synx_obj))
+		return -SYNX_INVALID;
+
+	if (params->flags & SYNX_MERGE_GLOBAL_FENCE) {
+		rc = synx_alloc_global_handle(params->h_merged_obj);
+		synx_obj->global_idx =
+			synx_util_global_idx(*params->h_merged_obj);
+	} else {
+		rc = synx_alloc_local_handle(params->h_merged_obj);
+	}
+
+	if (rc != SYNX_SUCCESS)
+		return rc;
+
+	array = dma_fence_array_create(num_objs, fences,
+				dma_context, 1, false);
+	if (IS_ERR_OR_NULL(array))
+		return -SYNX_INVALID;
+
+	synx_obj->fence = &array->base;
+	synx_obj->map_count = 1;
+	synx_obj->type = params->flags;
+	synx_obj->type |= SYNX_CREATE_MERGED_FENCE;
+	synx_obj->num_bound_synxs = 0;
+	kref_init(&synx_obj->refcount);
+	mutex_init(&synx_obj->obj_lock);
+	INIT_LIST_HEAD(&synx_obj->reg_cbs_list);
+	synx_obj->status = synx_util_get_object_status(synx_obj);
+
+	synx_util_activate(synx_obj);
+	return rc;
+}
+
+void synx_util_destroy_coredata(struct kref *kref)
+{
+	int rc;
+	struct synx_coredata *synx_obj =
+		container_of(kref, struct synx_coredata, refcount);
+
+	if (synx_util_is_global_object(synx_obj)) {
+		rc = synx_global_clear_subscribed_core(synx_obj->global_idx, SYNX_CORE_APSS);
+		if (rc)
+			dprintk(SYNX_ERR, "Failed to clear subscribers");
+
+		synx_global_put_ref(synx_obj->global_idx);
+	}
+	synx_util_object_destroy(synx_obj);
+}
+
+void synx_util_get_object(struct synx_coredata *synx_obj)
+{
+	kref_get(&synx_obj->refcount);
+}
+
+void synx_util_put_object(struct synx_coredata *synx_obj)
+{
+	kref_put(&synx_obj->refcount, synx_util_destroy_coredata);
+}
+
+int synx_util_cleanup_merged_fence(struct synx_coredata *synx_obj, int status)
+{
+	struct dma_fence_array *array = NULL;
+	u32 i;
+	int rc = 0;
+
+	if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(synx_obj->fence))
+		return -SYNX_INVALID;
+
+	if (dma_fence_is_array(synx_obj->fence)) {
+		array = to_dma_fence_array(synx_obj->fence);
+		if (IS_ERR_OR_NULL(array))
+			return -SYNX_INVALID;
+
+		for (i = 0; i < array->num_fences; i++) {
+			if (kref_read(&array->fences[i]->refcount) == 1 &&
+				__fence_state(array->fences[i], false) == SYNX_STATE_ACTIVE) {
+				dma_fence_set_error(array->fences[i],
+					-SYNX_STATE_SIGNALED_CANCEL);
+
+				rc = dma_fence_signal(array->fences[i]);
+				if (rc)
+					dprintk(SYNX_ERR,
+						"signaling child fence %pK failed=%d\n",
+						array->fences[i], rc);
+			}
+		}
+	}
+	return rc;
+}
+
+void synx_util_object_destroy(struct synx_coredata *synx_obj)
+{
+	int rc;
+	int num_dma_array = 0;
+	u32 i;
+	s32 sync_id;
+	u32 type;
+	unsigned long flags;
+	struct synx_cb_data *synx_cb, *synx_cb_temp;
+	struct synx_bind_desc *bind_desc;
+	struct bind_operations *bind_ops;
+	struct synx_external_data *data;
+
+	/* clear all the undispatched callbacks */
+	list_for_each_entry_safe(synx_cb,
+		synx_cb_temp, &synx_obj->reg_cbs_list, node) {
+		dprintk(SYNX_ERR,
+			"dipatching un-released callbacks of session %pK\n",
+			synx_cb->session);
+		synx_cb->status = SYNX_STATE_SIGNALED_CANCEL;
+		if (synx_cb->timeout != SYNX_NO_TIMEOUT) {
+			dprintk(SYNX_VERB,
+				"Deleting timer synx_cb 0x%x, timeout 0x%llx\n",
+				synx_cb, synx_cb->timeout);
+			del_timer(&synx_cb->synx_timer);
+		}
+		list_del_init(&synx_cb->node);
+		queue_work(synx_dev->wq_cb,
+			&synx_cb->cb_dispatch);
+		dprintk(SYNX_VERB, "dispatched callback for fence %pKn", synx_obj->fence);
+	}
+
+	for (i = 0; i < synx_obj->num_bound_synxs; i++) {
+		bind_desc = &synx_obj->bound_synxs[i];
+		sync_id = bind_desc->external_desc.id;
+		type = bind_desc->external_desc.type;
+		data = bind_desc->external_data;
+		bind_ops = synx_util_get_bind_ops(type);
+		if (IS_ERR_OR_NULL(bind_ops)) {
+			dprintk(SYNX_ERR,
+				"bind ops fail id: %d, type: %u, err: %d\n",
+				sync_id, type, rc);
+			continue;
+		}
+
+		/* clear the hash table entry */
+		synx_util_remove_data(&sync_id, type);
+
+		rc = bind_ops->deregister_callback(
+				synx_external_callback, data, sync_id);
+		if (rc < 0) {
+			dprintk(SYNX_ERR,
+				"de-registration fail id: %d, type: %u, err: %d\n",
+				sync_id, type, rc);
+			continue;
+		}
+
+		/*
+		 * release the memory allocated for external data.
+		 * It is safe to release this memory
+		 * only if deregistration is successful.
+		 */
+		kfree(data);
+	}
+
+	mutex_destroy(&synx_obj->obj_lock);
+	synx_util_release_fence_entry((u64)synx_obj->fence);
+
+	/* dma fence framework expects handles are signaled before release,
+	 * so signal if active handle and has last refcount. Synx handles
+	 * on other cores are still active to carry out usual callflow.
+	 */
+	if (!IS_ERR_OR_NULL(synx_obj->fence)) {
+		spin_lock_irqsave(synx_obj->fence->lock, flags);
+		if (synx_util_is_merged_object(synx_obj) &&
+			synx_util_get_object_status_locked(synx_obj) == SYNX_STATE_ACTIVE)
+			rc = synx_util_cleanup_merged_fence(synx_obj, -SYNX_STATE_SIGNALED_CANCEL);
+		else if (synx_util_get_object_status_locked(synx_obj) == SYNX_STATE_ACTIVE) {
+			num_dma_array = synx_util_count_dma_array_fences(synx_obj->fence);
+			if (kref_read(&synx_obj->fence->refcount) == 1 + num_dma_array) {
+				// set fence error to cancel
+				dma_fence_set_error(synx_obj->fence,
+					-SYNX_STATE_SIGNALED_CANCEL);
+
+				rc = dma_fence_signal_locked(synx_obj->fence);
+			}
+		}
+		spin_unlock_irqrestore(synx_obj->fence->lock, flags);
+		if (rc)
+			dprintk(SYNX_ERR,
+				"signaling fence %pK failed=%d\n",
+				synx_obj->fence, rc);
+	}
+
+	dma_fence_put(synx_obj->fence);
+	kfree(synx_obj);
+	dprintk(SYNX_MEM, "released synx object %pK\n", synx_obj);
+}
+
+long synx_util_get_free_handle(unsigned long *bitmap, unsigned int size)
+{
+	bool bit;
+	long idx;
+
+	do {
+		idx = find_first_zero_bit(bitmap, size);
+		if (idx >= size)
+			break;
+		bit = test_and_set_bit(idx, bitmap);
+	} while (bit);
+
+	return idx;
+}
+
+u32 synx_encode_handle(u32 idx, u32 core_id, bool global_idx)
+{
+	u32 handle = 0;
+
+	if (idx >= SYNX_MAX_OBJS)
+		return 0;
+
+	if (global_idx) {
+		handle = 1;
+		handle <<= SYNX_HANDLE_CORE_BITS;
+	}
+
+	handle |= core_id;
+	handle <<= SYNX_HANDLE_INDEX_BITS;
+	handle |= idx;
+
+	return handle;
+}
+
+int synx_alloc_global_handle(u32 *new_synx)
+{
+	int rc;
+	u32 idx;
+
+	rc = synx_global_alloc_index(&idx);
+	if (rc != SYNX_SUCCESS)
+		return rc;
+
+	*new_synx = synx_encode_handle(idx, SYNX_CORE_APSS, true);
+	dprintk(SYNX_DBG, "allocated global handle %u (0x%x)\n",
+		*new_synx, *new_synx);
+
+	rc = synx_global_init_coredata(*new_synx);
+	return rc;
+}
+
+int synx_alloc_local_handle(u32 *new_synx)
+{
+	u32 idx;
+
+	idx = synx_util_get_free_handle(synx_dev->native->bitmap,
+		SYNX_MAX_OBJS);
+	if (idx >= SYNX_MAX_OBJS)
+		return -SYNX_NOMEM;
+
+	*new_synx = synx_encode_handle(idx, SYNX_CORE_APSS, false);
+	dprintk(SYNX_DBG, "allocated local handle %u (0x%x)\n",
+		*new_synx, *new_synx);
+
+	return SYNX_SUCCESS;
+}
+
+int synx_util_init_handle(struct synx_client *client,
+	struct synx_coredata *synx_obj, u32 *new_h_synx,
+	void *map_entry)
+{
+	int rc = SYNX_SUCCESS;
+	bool found = false;
+	struct synx_handle_coredata *synx_data, *curr;
+
+	if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(synx_obj) ||
+		IS_ERR_OR_NULL(new_h_synx) || IS_ERR_OR_NULL(map_entry))
+		return -SYNX_INVALID;
+
+	synx_data = kzalloc(sizeof(*synx_data), GFP_ATOMIC);
+	if (IS_ERR_OR_NULL(synx_data))
+		return -SYNX_NOMEM;
+
+	synx_data->client = client;
+	synx_data->synx_obj = synx_obj;
+	synx_data->key = *new_h_synx;
+	synx_data->map_entry = map_entry;
+	kref_init(&synx_data->refcount);
+	synx_data->rel_count = 1;
+
+	spin_lock_bh(&client->handle_map_lock);
+	hash_for_each_possible(client->handle_map,
+		curr, node, *new_h_synx) {
+		if (curr->key == *new_h_synx) {
+			if (curr->synx_obj != synx_obj) {
+				rc = -SYNX_INVALID;
+				dprintk(SYNX_ERR,
+					"inconsistent data in handle map\n");
+			} else {
+				kref_get(&curr->refcount);
+				curr->rel_count++;
+			}
+			found = true;
+			break;
+		}
+	}
+	if (unlikely(found))
+		kfree(synx_data);
+	else
+		hash_add(client->handle_map,
+			&synx_data->node, *new_h_synx);
+	spin_unlock_bh(&client->handle_map_lock);
+
+	return rc;
+}
+
+int synx_util_activate(struct synx_coredata *synx_obj)
+{
+	if (IS_ERR_OR_NULL(synx_obj))
+		return -SYNX_INVALID;
+
+	/* move synx to ACTIVE state and register cb for merged object */
+	dma_fence_enable_sw_signaling(synx_obj->fence);
+	return 0;
+}
+
+static u32 synx_util_get_references(struct synx_coredata *synx_obj)
+{
+	u32 count = 0;
+	u32 i = 0;
+	struct dma_fence_array *array = NULL;
+
+	/* obtain dma fence reference */
+	if (dma_fence_is_array(synx_obj->fence)) {
+		array = to_dma_fence_array(synx_obj->fence);
+		if (IS_ERR_OR_NULL(array))
+			return 0;
+
+		for (i = 0; i < array->num_fences; i++)
+			dma_fence_get(array->fences[i]);
+		count = array->num_fences;
+	} else {
+		dma_fence_get(synx_obj->fence);
+		count = 1;
+	}
+
+	return count;
+}
+
+static void synx_util_put_references(struct synx_coredata *synx_obj)
+{
+	u32 i = 0;
+	struct dma_fence_array *array = NULL;
+
+	if (dma_fence_is_array(synx_obj->fence)) {
+		array = to_dma_fence_array(synx_obj->fence);
+		if (IS_ERR_OR_NULL(array))
+			return;
+
+		for (i = 0; i < array->num_fences; i++)
+			dma_fence_put(array->fences[i]);
+	} else {
+		dma_fence_put(synx_obj->fence);
+	}
+}
+
+static u32 synx_util_add_fence(struct synx_coredata *synx_obj,
+	struct dma_fence **fences,
+	u32 idx)
+{
+	struct dma_fence_array *array = NULL;
+	u32 i = 0;
+
+	if (dma_fence_is_array(synx_obj->fence)) {
+		array = to_dma_fence_array(synx_obj->fence);
+		if (IS_ERR_OR_NULL(array))
+			return 0;
+
+		for (i = 0; i < array->num_fences; i++)
+			fences[idx+i] = array->fences[i];
+
+		return array->num_fences;
+	}
+
+	fences[idx] = synx_obj->fence;
+	return 1;
+}
+
+static u32 synx_util_remove_duplicates(struct dma_fence **arr, u32 num)
+{
+	int i, j;
+	u32 wr_idx = 1;
+
+	if (IS_ERR_OR_NULL(arr)) {
+		dprintk(SYNX_ERR, "invalid input array\n");
+		return 0;
+	}
+
+	for (i = 1; i < num; i++) {
+		for (j = 0; j < wr_idx ; j++) {
+			if (arr[i] == arr[j]) {
+				/* release reference obtained for duplicate */
+				dprintk(SYNX_DBG,
+					"releasing duplicate reference\n");
+				dma_fence_put(arr[i]);
+				break;
+			}
+		}
+		if (j == wr_idx)
+			arr[wr_idx++] = arr[i];
+	}
+
+	return wr_idx;
+}
+
+s32 synx_util_merge_error(struct synx_client *client,
+	u32 *h_synxs,
+	u32 num_objs)
+{
+	u32 i = 0;
+	struct synx_handle_coredata *synx_data;
+	struct synx_coredata *synx_obj;
+
+	if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(h_synxs))
+		return -SYNX_INVALID;
+
+	for (i = 0; i < num_objs; i++) {
+		synx_data = synx_util_acquire_handle(client, h_synxs[i]);
+		synx_obj = synx_util_obtain_object(synx_data);
+		if (IS_ERR_OR_NULL(synx_obj) ||
+			IS_ERR_OR_NULL(synx_obj->fence)) {
+			dprintk(SYNX_ERR,
+				"[sess :%llu] invalid handle %d in cleanup\n",
+				client->id, h_synxs[i]);
+			continue;
+		}
+		/* release all references obtained during merge validatation */
+		synx_util_put_references(synx_obj);
+		synx_util_release_handle(synx_data);
+	}
+
+	return 0;
+}
+
+int synx_util_validate_merge(struct synx_client *client,
+	u32 *h_synxs,
+	u32 num_objs,
+	struct dma_fence ***fence_list,
+	u32 *fence_cnt)
+{
+	u32 count = 0;
+	u32 i = 0;
+	struct synx_handle_coredata **synx_datas;
+	struct synx_coredata **synx_objs;
+	struct dma_fence **fences = NULL;
+
+	if (num_objs <= 1) {
+		dprintk(SYNX_ERR, "single handle merge is not allowed\n");
+		return -SYNX_INVALID;
+	}
+
+	synx_datas = kcalloc(num_objs, sizeof(*synx_datas), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(synx_datas))
+		return -SYNX_NOMEM;
+
+	synx_objs = kcalloc(num_objs, sizeof(*synx_objs), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(synx_objs)) {
+		kfree(synx_datas);
+		return -SYNX_NOMEM;
+	}
+
+	for (i = 0; i < num_objs; i++) {
+		synx_datas[i] = synx_util_acquire_handle(client, h_synxs[i]);
+		synx_objs[i] = synx_util_obtain_object(synx_datas[i]);
+		if (IS_ERR_OR_NULL(synx_objs[i]) ||
+			IS_ERR_OR_NULL(synx_objs[i]->fence)) {
+			dprintk(SYNX_ERR,
+				"[sess :%llu] invalid handle %d in merge list\n",
+				client->id, h_synxs[i]);
+			*fence_cnt = i;
+			goto error;
+		}
+		count += synx_util_get_references(synx_objs[i]);
+	}
+
+	fences = kcalloc(count, sizeof(*fences), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(fences)) {
+		*fence_cnt = num_objs;
+		goto error;
+	}
+
+	/* memory will be released later in the invoking function */
+	*fence_list = fences;
+	count = 0;
+
+	for (i = 0; i < num_objs; i++) {
+		count += synx_util_add_fence(synx_objs[i], fences, count);
+		/* release the reference obtained earlier in the function */
+		synx_util_release_handle(synx_datas[i]);
+	}
+
+	*fence_cnt = synx_util_remove_duplicates(fences, count);
+	kfree(synx_objs);
+	kfree(synx_datas);
+	return 0;
+
+error:
+	/* release the reference/s obtained earlier in the function */
+	for (i = 0; i < *fence_cnt; i++) {
+		synx_util_put_references(synx_objs[i]);
+		synx_util_release_handle(synx_datas[i]);
+	}
+	*fence_cnt = 0;
+	kfree(synx_objs);
+	kfree(synx_datas);
+	return -SYNX_INVALID;
+}
+
+u32 __fence_state(struct dma_fence *fence, bool locked)
+{
+	s32 status;
+	u32 state = SYNX_STATE_INVALID;
+
+	if (IS_ERR_OR_NULL(fence)) {
+		dprintk(SYNX_ERR, "invalid fence\n");
+		return SYNX_STATE_INVALID;
+	}
+
+	if (locked)
+		status = dma_fence_get_status_locked(fence);
+	else
+		status = dma_fence_get_status(fence);
+
+	/* convert fence status to synx state */
+	switch (status) {
+	case 0:
+		state = SYNX_STATE_ACTIVE;
+		break;
+	case 1:
+		state = SYNX_STATE_SIGNALED_SUCCESS;
+		break;
+	case -SYNX_STATE_SIGNALED_CANCEL:
+		state = SYNX_STATE_SIGNALED_CANCEL;
+		break;
+	case -SYNX_STATE_SIGNALED_EXTERNAL:
+		state = SYNX_STATE_SIGNALED_EXTERNAL;
+		break;
+	case -SYNX_STATE_SIGNALED_ERROR:
+		state = SYNX_STATE_SIGNALED_ERROR;
+		break;
+	default:
+		state = (u32)(-status);
+	}
+
+	return state;
+}
+
+static u32 __fence_group_state(struct dma_fence *fence, bool locked)
+{
+	u32 i = 0;
+	u32 state = SYNX_STATE_INVALID, parent_state = SYNX_STATE_INVALID;
+	struct dma_fence_array *array = NULL;
+	u32 intr, actv_cnt, sig_cnt, err_cnt;
+
+	if (IS_ERR_OR_NULL(fence)) {
+		dprintk(SYNX_ERR, "invalid fence\n");
+		return SYNX_STATE_INVALID;
+	}
+
+	actv_cnt = sig_cnt = err_cnt = 0;
+	array = to_dma_fence_array(fence);
+	if (IS_ERR_OR_NULL(array))
+		return SYNX_STATE_INVALID;
+
+	for (i = 0; i < array->num_fences; i++) {
+		intr = __fence_state(array->fences[i], locked);
+		if (err_cnt == 0)
+			parent_state = intr;
+		switch (intr) {
+		case SYNX_STATE_ACTIVE:
+			actv_cnt++;
+			break;
+		case SYNX_STATE_SIGNALED_SUCCESS:
+			sig_cnt++;
+			break;
+		default:
+			intr > SYNX_STATE_SIGNALED_MAX ? sig_cnt++ : err_cnt++;
+		}
+	}
+
+	dprintk(SYNX_DBG,
+		"group cnt stats act:%u, sig: %u, err: %u\n",
+		actv_cnt, sig_cnt, err_cnt);
+
+	if (actv_cnt)
+		state = SYNX_STATE_ACTIVE;
+	else
+		state = parent_state;
+
+	return state;
+}
+
+/*
+ * WARN: Should not hold the fence spinlock when invoking
+ * this function. Use synx_fence_state_locked instead
+ */
+u32 synx_util_get_object_status(struct synx_coredata *synx_obj)
+{
+	u32 state;
+
+	if (IS_ERR_OR_NULL(synx_obj))
+		return SYNX_STATE_INVALID;
+
+	if (synx_util_is_merged_object(synx_obj))
+		state = __fence_group_state(synx_obj->fence, false);
+	else
+		state = __fence_state(synx_obj->fence, false);
+
+	return state;
+}
+
+/* use this for status check when holding on to metadata spinlock */
+u32 synx_util_get_object_status_locked(struct synx_coredata *synx_obj)
+{
+	u32 state;
+
+	if (IS_ERR_OR_NULL(synx_obj))
+		return SYNX_STATE_INVALID;
+
+	if (synx_util_is_merged_object(synx_obj))
+		state = __fence_group_state(synx_obj->fence, true);
+	else
+		state = __fence_state(synx_obj->fence, true);
+
+	return state;
+}
+
+struct synx_handle_coredata *synx_util_acquire_handle(
+	struct synx_client *client, u32 h_synx)
+{
+	struct synx_handle_coredata *synx_data = NULL;
+	struct synx_handle_coredata *synx_handle =
+		ERR_PTR(-SYNX_NOENT);
+
+	if (IS_ERR_OR_NULL(client))
+		return ERR_PTR(-SYNX_INVALID);
+
+	spin_lock_bh(&client->handle_map_lock);
+	hash_for_each_possible(client->handle_map,
+		synx_data, node, h_synx) {
+		if (synx_data->key == h_synx &&
+			synx_data->rel_count != 0) {
+			kref_get(&synx_data->refcount);
+			synx_handle = synx_data;
+			break;
+		}
+	}
+	spin_unlock_bh(&client->handle_map_lock);
+
+	return synx_handle;
+}
+
+struct synx_map_entry *synx_util_insert_to_map(
+	struct synx_coredata *synx_obj,
+	u32 h_synx, u32 flags)
+{
+	struct synx_map_entry *map_entry;
+
+	map_entry = kzalloc(sizeof(*map_entry), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(map_entry))
+		return ERR_PTR(-SYNX_NOMEM);
+
+	kref_init(&map_entry->refcount);
+	map_entry->synx_obj = synx_obj;
+	map_entry->flags = flags;
+	map_entry->key = h_synx;
+
+	if (synx_util_is_global_handle(h_synx)) {
+		spin_lock_bh(&synx_dev->native->global_map_lock);
+		hash_add(synx_dev->native->global_map,
+			&map_entry->node, h_synx);
+		spin_unlock_bh(&synx_dev->native->global_map_lock);
+		dprintk(SYNX_MEM,
+			"added handle %u to global map %pK\n",
+			h_synx, map_entry);
+	} else {
+		spin_lock_bh(&synx_dev->native->local_map_lock);
+		hash_add(synx_dev->native->local_map,
+			&map_entry->node, h_synx);
+		spin_unlock_bh(&synx_dev->native->local_map_lock);
+		dprintk(SYNX_MEM,
+			"added handle %u to local map %pK\n",
+			h_synx, map_entry);
+	}
+
+	return map_entry;
+}
+
+struct synx_map_entry *synx_util_get_map_entry(u32 h_synx)
+{
+	struct synx_map_entry *curr;
+	struct synx_map_entry *map_entry = ERR_PTR(-SYNX_NOENT);
+
+	if (h_synx == 0)
+		return ERR_PTR(-SYNX_INVALID);
+
+	if (synx_util_is_global_handle(h_synx)) {
+		spin_lock_bh(&synx_dev->native->global_map_lock);
+		hash_for_each_possible(synx_dev->native->global_map,
+			curr, node, h_synx) {
+			if (curr->key == h_synx) {
+				kref_get(&curr->refcount);
+				map_entry = curr;
+				break;
+			}
+		}
+		spin_unlock_bh(&synx_dev->native->global_map_lock);
+	} else {
+		spin_lock_bh(&synx_dev->native->local_map_lock);
+		hash_for_each_possible(synx_dev->native->local_map,
+			curr, node, h_synx) {
+			if (curr->key == h_synx) {
+				kref_get(&curr->refcount);
+				map_entry = curr;
+				break;
+			}
+		}
+		spin_unlock_bh(&synx_dev->native->local_map_lock);
+	}
+
+	/* should we allocate if entry not found? */
+	return map_entry;
+}
+
+static void synx_util_cleanup_fence(
+	struct synx_coredata *synx_obj)
+{
+	struct synx_signal_cb *signal_cb;
+	unsigned long flags;
+	u32 g_status;
+	u32 f_status;
+	u32 h_synx = 0;
+
+	mutex_lock(&synx_obj->obj_lock);
+	synx_obj->map_count--;
+	signal_cb = synx_obj->signal_cb;
+	f_status = synx_util_get_object_status(synx_obj);
+	dprintk(SYNX_VERB, "f_status:%u, signal_cb:%p, map:%u, idx:%u\n",
+		f_status, signal_cb, synx_obj->map_count, synx_obj->global_idx);
+	if (synx_obj->map_count == 0 &&
+		(signal_cb != NULL) &&
+		(synx_obj->global_idx != 0) &&
+		(f_status == SYNX_STATE_ACTIVE)) {
+		/*
+		 * no more clients interested for notification
+		 * on handle on local core.
+		 * remove reference held by callback on synx
+		 * coredata structure and update cb (if still
+		 * un-signaled) with global handle idx to
+		 * notify any cross-core clients waiting on
+		 * handle.
+		 */
+		g_status = synx_global_get_status(synx_obj->global_idx);
+		if (g_status > SYNX_STATE_ACTIVE) {
+			dprintk(SYNX_DBG, "signaling fence %pK with status %u\n",
+				synx_obj->fence, g_status);
+			synx_native_signal_fence(synx_obj, g_status);
+		} else {
+			spin_lock_irqsave(synx_obj->fence->lock, flags);
+			if (synx_util_get_object_status_locked(synx_obj) ==
+				SYNX_STATE_ACTIVE) {
+				signal_cb->synx_obj = NULL;
+				synx_global_fetch_handle_details(synx_obj->global_idx, &h_synx);
+				signal_cb->handle = h_synx;
+				synx_obj->signal_cb =  NULL;
+				/*
+				 * release reference held by signal cb and
+				 * get reference on global index instead.
+				 */
+				synx_util_put_object(synx_obj);
+				synx_global_get_ref(synx_obj->global_idx);
+			}
+			spin_unlock_irqrestore(synx_obj->fence->lock, flags);
+		}
+	} else if (synx_obj->map_count == 0 && signal_cb &&
+		(f_status == SYNX_STATE_ACTIVE)) {
+		if (dma_fence_remove_callback(synx_obj->fence,
+			&signal_cb->fence_cb)) {
+			kfree(signal_cb);
+			synx_obj->signal_cb = NULL;
+			/*
+			 * release reference held by signal cb and
+			 * get reference on global index instead.
+			 */
+			synx_util_put_object(synx_obj);
+			dprintk(SYNX_MEM, "signal cb destroyed %pK\n",
+				synx_obj->signal_cb);
+		}
+	}
+	mutex_unlock(&synx_obj->obj_lock);
+}
+
+static void synx_util_destroy_map_entry_worker(
+	struct work_struct *dispatch)
+{
+	struct synx_map_entry *map_entry =
+		container_of(dispatch, struct synx_map_entry, dispatch);
+	struct synx_coredata *synx_obj;
+
+	synx_obj = map_entry->synx_obj;
+	if (!IS_ERR_OR_NULL(synx_obj)) {
+		synx_util_cleanup_fence(synx_obj);
+		/* release reference held by map entry */
+		synx_util_put_object(synx_obj);
+	}
+
+	if (!synx_util_is_global_handle(map_entry->key))
+		clear_bit(synx_util_global_idx(map_entry->key),
+			synx_dev->native->bitmap);
+	dprintk(SYNX_VERB, "map entry for %u destroyed %pK\n",
+		map_entry->key, map_entry);
+	kfree(map_entry);
+}
+
+void synx_util_destroy_map_entry(struct kref *kref)
+{
+	struct synx_map_entry *map_entry =
+		container_of(kref, struct synx_map_entry, refcount);
+
+	hash_del(&map_entry->node);
+	dprintk(SYNX_MEM, "map entry for %u removed %pK\n",
+		map_entry->key, map_entry);
+	INIT_WORK(&map_entry->dispatch, synx_util_destroy_map_entry_worker);
+	queue_work(synx_dev->wq_cleanup, &map_entry->dispatch);
+}
+
+void synx_util_release_map_entry(struct synx_map_entry *map_entry)
+{
+	spinlock_t *lock;
+
+	if (IS_ERR_OR_NULL(map_entry))
+		return;
+
+	if (synx_util_is_global_handle(map_entry->key))
+		lock = &synx_dev->native->global_map_lock;
+	else
+		lock = &synx_dev->native->local_map_lock;
+
+	spin_lock_bh(lock);
+	kref_put(&map_entry->refcount,
+		synx_util_destroy_map_entry);
+	spin_unlock_bh(lock);
+}
+
+static void synx_util_destroy_handle_worker(
+	struct work_struct *dispatch)
+{
+	struct synx_handle_coredata *synx_data =
+		container_of(dispatch, struct synx_handle_coredata,
+		dispatch);
+
+	synx_util_release_map_entry(synx_data->map_entry);
+	dprintk(SYNX_VERB, "handle %u destroyed %pK\n",
+		synx_data->key, synx_data);
+	kfree(synx_data);
+}
+
+void synx_util_destroy_handle(struct kref *kref)
+{
+	struct synx_handle_coredata *synx_data =
+		container_of(kref, struct synx_handle_coredata,
+		refcount);
+
+	hash_del(&synx_data->node);
+	dprintk(SYNX_MEM, "[sess :%llu] handle %u removed %pK\n",
+		synx_data->client->id, synx_data->key, synx_data);
+	INIT_WORK(&synx_data->dispatch, synx_util_destroy_handle_worker);
+	queue_work(synx_dev->wq_cleanup, &synx_data->dispatch);
+}
+
+void synx_util_release_handle(struct synx_handle_coredata *synx_data)
+{
+	struct synx_client *client;
+
+	if (IS_ERR_OR_NULL(synx_data))
+		return;
+
+	client = synx_data->client;
+	if (IS_ERR_OR_NULL(client))
+		return;
+
+	spin_lock_bh(&client->handle_map_lock);
+	kref_put(&synx_data->refcount,
+		synx_util_destroy_handle);
+	spin_unlock_bh(&client->handle_map_lock);
+}
+
+struct bind_operations *synx_util_get_bind_ops(u32 type)
+{
+	struct synx_registered_ops *client_ops;
+
+	if (!synx_util_is_valid_bind_type(type))
+		return NULL;
+
+	mutex_lock(&synx_dev->vtbl_lock);
+	client_ops = &synx_dev->bind_vtbl[type];
+	if (!client_ops->valid) {
+		mutex_unlock(&synx_dev->vtbl_lock);
+		return NULL;
+	}
+	mutex_unlock(&synx_dev->vtbl_lock);
+
+	return &client_ops->ops;
+}
+
+int synx_util_alloc_cb_entry(struct synx_client *client,
+	struct synx_kernel_payload *data,
+	u32 *cb_idx)
+{
+	long idx;
+	struct synx_client_cb *cb;
+
+	if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(data) ||
+		IS_ERR_OR_NULL(cb_idx))
+		return -SYNX_INVALID;
+
+	idx = synx_util_get_free_handle(client->cb_bitmap, SYNX_MAX_OBJS);
+	if (idx >= SYNX_MAX_OBJS) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] free cb index not available\n",
+			client->id);
+		return -SYNX_NOMEM;
+	}
+
+	cb = &client->cb_table[idx];
+	memset(cb, 0, sizeof(*cb));
+	cb->is_valid = true;
+	cb->client = client;
+	cb->idx = idx;
+	memcpy(&cb->kernel_cb, data,
+		sizeof(cb->kernel_cb));
+
+	*cb_idx = idx;
+	dprintk(SYNX_VERB, "[sess :%llu] allocated cb index %u\n",
+		client->id, *cb_idx);
+	return 0;
+}
+
+int synx_util_clear_cb_entry(struct synx_client *client,
+	struct synx_client_cb *cb)
+{
+	int rc = 0;
+	u32 idx;
+
+	if (IS_ERR_OR_NULL(cb))
+		return -SYNX_INVALID;
+
+	idx = cb->idx;
+	memset(cb, 0, sizeof(*cb));
+	if (idx && idx < SYNX_MAX_OBJS) {
+		clear_bit(idx, client->cb_bitmap);
+	} else {
+		dprintk(SYNX_ERR, "invalid index\n");
+		rc = -SYNX_INVALID;
+	}
+
+	return rc;
+}
+
+void synx_util_default_user_callback(u32 h_synx,
+	int status, void *data)
+{
+	struct synx_client_cb *cb = data;
+	struct synx_client *client = NULL;
+
+	if (cb && cb->client) {
+		client = cb->client;
+		dprintk(SYNX_VERB,
+			"[sess :%llu] user cb queued for handle %d\n",
+			client->id, h_synx);
+		cb->kernel_cb.status = status;
+		mutex_lock(&client->event_q_lock);
+		list_add_tail(&cb->node, &client->event_q);
+		mutex_unlock(&client->event_q_lock);
+		wake_up_all(&client->event_wq);
+	} else {
+		dprintk(SYNX_ERR, "invalid params\n");
+	}
+}
+
+void synx_util_callback_dispatch(struct synx_coredata *synx_obj, u32 status)
+{
+	struct synx_cb_data *synx_cb, *synx_cb_temp;
+
+	if (IS_ERR_OR_NULL(synx_obj)) {
+		dprintk(SYNX_ERR, "invalid arguments\n");
+		return;
+	}
+
+	list_for_each_entry_safe(synx_cb,
+		synx_cb_temp, &synx_obj->reg_cbs_list, node) {
+		synx_cb->status = status;
+		if (synx_cb->timeout != SYNX_NO_TIMEOUT) {
+			dprintk(SYNX_VERB,
+				"Deleting timer synx_cb 0x%x, timeout 0x%llx\n",
+				synx_cb, synx_cb->timeout);
+			del_timer(&synx_cb->synx_timer);
+		}
+		list_del_init(&synx_cb->node);
+		queue_work(synx_dev->wq_cb,
+			&synx_cb->cb_dispatch);
+		dprintk(SYNX_VERB, "dispatched callback\n");
+	}
+}
+
+void synx_util_cb_dispatch(struct work_struct *cb_dispatch)
+{
+	struct synx_cb_data *synx_cb =
+		container_of(cb_dispatch, struct synx_cb_data, cb_dispatch);
+	struct synx_client *client;
+	struct synx_client_cb *cb;
+	struct synx_kernel_payload payload;
+	u32 status;
+
+	client = synx_get_client(synx_cb->session);
+	if (IS_ERR_OR_NULL(client)) {
+		dprintk(SYNX_ERR,
+			"invalid session data %pK in cb payload\n",
+			synx_cb->session);
+		goto free;
+	}
+
+	if (synx_cb->idx == 0 ||
+		synx_cb->idx >= SYNX_MAX_OBJS) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] invalid cb index %u\n",
+			client->id, synx_cb->idx);
+		goto fail;
+	}
+
+	status = synx_cb->status;
+	cb = &client->cb_table[synx_cb->idx];
+	if (!cb->is_valid) {
+		dprintk(SYNX_ERR, "invalid cb payload\n");
+		goto fail;
+	}
+
+	memcpy(&payload, &cb->kernel_cb, sizeof(cb->kernel_cb));
+	payload.status = status;
+
+	if (payload.cb_func == synx_util_default_user_callback) {
+		/*
+		 * need to send client cb data for default
+		 * user cb (userspace cb)
+		 */
+		payload.data = cb;
+	} else {
+		/*
+		 * clear the cb entry. userspace cb entry
+		 * will be cleared after data read by the
+		 * polling thread or when client is destroyed
+		 */
+		if (synx_util_clear_cb_entry(client, cb))
+			dprintk(SYNX_ERR,
+				"[sess :%llu] error clearing cb entry\n",
+				client->id);
+	}
+
+	dprintk(SYNX_DBG,
+		"callback dispatched for handle %u, status %u, data %pK\n",
+		payload.h_synx, payload.status, payload.data);
+
+	/* dispatch kernel callback */
+	payload.cb_func(payload.h_synx,
+		payload.status, payload.data);
+
+fail:
+	synx_put_client(client);
+free:
+	kfree(synx_cb);
+}
+
+int synx_get_child_coredata(struct synx_coredata *synx_obj, struct synx_coredata ***child_synx_obj, int *num_fences)
+{
+	int rc = SYNX_SUCCESS;
+	int i = 0, handle_count = 0;
+	u32 h_child = 0;
+	struct dma_fence_array *array = NULL;
+	struct synx_coredata **synx_datas = NULL;
+	struct synx_map_entry *fence_entry = NULL;
+
+	if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(num_fences))
+		return -SYNX_INVALID;
+	if (dma_fence_is_array(synx_obj->fence)) {
+		array = to_dma_fence_array(synx_obj->fence);
+		if (IS_ERR_OR_NULL(array))
+			return -SYNX_INVALID;
+		synx_datas = kcalloc(array->num_fences, sizeof(*synx_datas), GFP_KERNEL);
+		if (IS_ERR_OR_NULL(synx_datas))
+			return -SYNX_NOMEM;
+
+		for (i = 0; i < array->num_fences; i++) {
+			h_child = synx_util_get_fence_entry((u64)array->fences[i], 1);
+			fence_entry = synx_util_get_map_entry(h_child);
+			if (IS_ERR_OR_NULL(fence_entry) || IS_ERR_OR_NULL(fence_entry->synx_obj))
+			{
+				dprintk(SYNX_ERR, "Invalid handle access %u", h_child);
+				rc = -SYNX_NOENT;
+				goto fail;
+			}
+
+			synx_datas[handle_count++] = fence_entry->synx_obj;
+			synx_util_release_map_entry(fence_entry);
+		}
+	}
+
+	*child_synx_obj = synx_datas;
+	*num_fences = handle_count;
+	return rc;
+fail:
+	kfree(synx_datas);
+	return rc;
+}
+
+u32 synx_util_get_fence_entry(u64 key, u32 global)
+{
+	u32 h_synx = 0;
+	struct synx_fence_entry *curr;
+
+	spin_lock_bh(&synx_dev->native->fence_map_lock);
+	hash_for_each_possible(synx_dev->native->fence_map,
+		curr, node, key) {
+		if (curr->key == key) {
+			if (global)
+				h_synx = curr->g_handle;
+			/* return local handle if global not available */
+			if (h_synx == 0)
+				h_synx = curr->l_handle;
+
+			break;
+		}
+	}
+	spin_unlock_bh(&synx_dev->native->fence_map_lock);
+
+	return h_synx;
+}
+
+void synx_util_release_fence_entry(u64 key)
+{
+	struct synx_fence_entry *entry = NULL, *curr;
+
+	spin_lock_bh(&synx_dev->native->fence_map_lock);
+	hash_for_each_possible(synx_dev->native->fence_map,
+		curr, node, key) {
+		if (curr->key == key) {
+			entry = curr;
+			break;
+		}
+	}
+
+	if (entry) {
+		hash_del(&entry->node);
+		dprintk(SYNX_MEM,
+			"released fence entry %pK for fence %pK\n",
+			entry, (void *)key);
+		kfree(entry);
+	}
+
+	spin_unlock_bh(&synx_dev->native->fence_map_lock);
+}
+
+int synx_util_insert_fence_entry(struct synx_fence_entry *entry,
+	u32 *h_synx, u32 global)
+{
+	int rc = SYNX_SUCCESS;
+	struct synx_fence_entry *curr;
+
+	if (IS_ERR_OR_NULL(entry) || IS_ERR_OR_NULL(h_synx))
+		return -SYNX_INVALID;
+
+	spin_lock_bh(&synx_dev->native->fence_map_lock);
+	hash_for_each_possible(synx_dev->native->fence_map,
+		curr, node, entry->key) {
+		/* raced with import from another process on same fence */
+		if (curr->key == entry->key) {
+			if (global)
+				*h_synx = curr->g_handle;
+
+			if (*h_synx == 0 || !global)
+				*h_synx = curr->l_handle;
+
+			rc = -SYNX_ALREADY;
+			break;
+		}
+	}
+	/* add entry only if its not present in the map */
+	if (rc == SYNX_SUCCESS) {
+		hash_add(synx_dev->native->fence_map,
+			&entry->node, entry->key);
+		dprintk(SYNX_MEM,
+			"added fence entry %pK for fence %pK\n",
+			entry, (void *)entry->key);
+	}
+	spin_unlock_bh(&synx_dev->native->fence_map_lock);
+
+	return rc;
+}
+
+struct synx_client *synx_get_client(struct synx_session *session)
+{
+	struct synx_client *client = NULL;
+	struct synx_client *curr;
+
+	if (IS_ERR_OR_NULL(session))
+		return ERR_PTR(-SYNX_INVALID);
+
+	spin_lock_bh(&synx_dev->native->metadata_map_lock);
+	hash_for_each_possible(synx_dev->native->client_metadata_map,
+		curr, node, (u64)session) {
+		if (curr == (struct synx_client *)session) {
+			if (curr->active) {
+				kref_get(&curr->refcount);
+				client = curr;
+			}
+			break;
+		}
+	}
+	spin_unlock_bh(&synx_dev->native->metadata_map_lock);
+
+	return client;
+}
+
+static void synx_client_cleanup(struct work_struct *dispatch)
+{
+	int i, j;
+	struct synx_client *client =
+		container_of(dispatch, struct synx_client, dispatch);
+	struct synx_handle_coredata *curr;
+	struct hlist_node *tmp;
+
+	dprintk(SYNX_INFO, "[sess :%llu] session removed %s\n",
+		client->id, client->name);
+	/*
+	 * go over all the remaining synx obj handles
+	 * un-released from this session and remove them.
+	 */
+	hash_for_each_safe(client->handle_map, i, tmp, curr, node) {
+		dprintk(SYNX_WARN,
+			"[sess :%llu] un-released handle %u\n",
+			client->id, curr->key);
+		j = kref_read(&curr->refcount);
+		/* release pending reference */
+		while (j--)
+			kref_put(&curr->refcount, synx_util_destroy_handle);
+	}
+
+	mutex_destroy(&client->event_q_lock);
+
+	dprintk(SYNX_VERB, "session %llu [%s] destroyed %pK\n",
+		client->id, client->name, client);
+	vfree(client);
+}
+
+void synx_client_destroy(struct kref *kref)
+{
+	struct synx_client *client =
+		container_of(kref, struct synx_client, refcount);
+
+	hash_del(&client->node);
+
+	INIT_WORK(&client->dispatch, synx_client_cleanup);
+	queue_work(synx_dev->wq_cleanup, &client->dispatch);
+}
+
+void synx_put_client(struct synx_client *client)
+{
+	if (IS_ERR_OR_NULL(client))
+		return;
+
+	spin_lock_bh(&synx_dev->native->metadata_map_lock);
+	kref_put(&client->refcount, synx_client_destroy);
+	spin_unlock_bh(&synx_dev->native->metadata_map_lock);
+}
+
+void synx_util_generate_timestamp(char *timestamp, size_t size)
+{
+	struct timespec64 tv;
+	struct tm tm;
+
+	ktime_get_real_ts64(&tv);
+	time64_to_tm(tv.tv_sec, 0, &tm);
+	snprintf(timestamp, size, "%02d-%02d %02d:%02d:%02d",
+		tm.tm_mon + 1, tm.tm_mday, tm.tm_hour,
+		tm.tm_min, tm.tm_sec);
+}
+
+void synx_util_log_error(u32 client_id, u32 h_synx, s32 err)
+{
+	struct error_node *err_node;
+
+	if (!synx_dev->debugfs_root)
+		return;
+
+	err_node = kzalloc(sizeof(*err_node), GFP_KERNEL);
+	if (!err_node)
+		return;
+
+	err_node->client_id = client_id;
+	err_node->error_code = err;
+	err_node->h_synx = h_synx;
+	synx_util_generate_timestamp(err_node->timestamp,
+		sizeof(err_node->timestamp));
+	mutex_lock(&synx_dev->error_lock);
+	list_add(&err_node->node,
+		&synx_dev->error_list);
+	mutex_unlock(&synx_dev->error_lock);
+}
+
+int synx_util_save_data(void *fence, u32 flags,
+	u32 h_synx)
+{
+	int rc = SYNX_SUCCESS;
+	struct synx_entry_64 *entry, *curr;
+	u64 key;
+	u32 tbl = synx_util_map_params_to_type(flags);
+
+	switch (tbl) {
+	case SYNX_TYPE_CSL:
+		key = *(u32 *)fence;
+		spin_lock_bh(&synx_dev->native->csl_map_lock);
+		/* ensure fence is not already added to map */
+		hash_for_each_possible(synx_dev->native->csl_fence_map,
+			curr, node, key) {
+			if (curr->key == key) {
+				rc = -SYNX_ALREADY;
+				break;
+			}
+		}
+		if (rc == SYNX_SUCCESS) {
+			entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+			if (entry) {
+				entry->data[0] = h_synx;
+				entry->key = key;
+				kref_init(&entry->refcount);
+				hash_add(synx_dev->native->csl_fence_map,
+					&entry->node, entry->key);
+				dprintk(SYNX_MEM, "added csl fence %d to map %pK\n",
+					entry->key, entry);
+			} else {
+				rc = -SYNX_NOMEM;
+			}
+		}
+		spin_unlock_bh(&synx_dev->native->csl_map_lock);
+		break;
+	default:
+		dprintk(SYNX_ERR, "invalid hash table selection\n");
+		kfree(entry);
+		rc = -SYNX_INVALID;
+	}
+
+	return rc;
+}
+
+struct synx_entry_64 *synx_util_retrieve_data(void *fence,
+	u32 type)
+{
+	u64 key;
+	struct synx_entry_64 *entry = NULL;
+	struct synx_entry_64 *curr;
+
+	switch (type) {
+	case SYNX_TYPE_CSL:
+		key = *(u32 *)fence;
+		spin_lock_bh(&synx_dev->native->csl_map_lock);
+		hash_for_each_possible(synx_dev->native->csl_fence_map,
+			curr, node, key) {
+			if (curr->key == key) {
+				kref_get(&curr->refcount);
+				entry = curr;
+				break;
+			}
+		}
+		spin_unlock_bh(&synx_dev->native->csl_map_lock);
+		break;
+	default:
+		dprintk(SYNX_ERR, "invalid hash table selection %u\n",
+			type);
+	}
+
+	return entry;
+}
+
+void synx_util_destroy_data(struct kref *kref)
+{
+	struct synx_entry_64 *entry =
+		container_of(kref, struct synx_entry_64, refcount);
+
+	hash_del(&entry->node);
+	dprintk(SYNX_MEM, "released fence %llu entry %pK\n",
+		entry->key, entry);
+	kfree(entry);
+}
+
+void synx_util_remove_data(void *fence,
+	u32 type)
+{
+	u64 key;
+	struct synx_entry_64 *entry = NULL;
+	struct synx_entry_64 *curr;
+
+	if (IS_ERR_OR_NULL(fence))
+		return;
+
+	switch (type) {
+	case SYNX_TYPE_CSL:
+		key = *((u32 *)fence);
+		spin_lock_bh(&synx_dev->native->csl_map_lock);
+		hash_for_each_possible(synx_dev->native->csl_fence_map,
+			curr, node, key) {
+			if (curr->key == key) {
+				entry = curr;
+				break;
+			}
+		}
+		if (entry)
+			kref_put(&entry->refcount, synx_util_destroy_data);
+		spin_unlock_bh(&synx_dev->native->csl_map_lock);
+		break;
+	default:
+		dprintk(SYNX_ERR, "invalid hash table selection %u\n",
+			type);
+	}
+}
+
+void synx_util_map_import_params_to_create(
+	struct synx_import_indv_params *params,
+	struct synx_create_params *c_params)
+{
+	if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(c_params))
+		return;
+
+	if (params->flags & SYNX_IMPORT_GLOBAL_FENCE)
+		c_params->flags |= SYNX_CREATE_GLOBAL_FENCE;
+
+	if (params->flags & SYNX_IMPORT_LOCAL_FENCE)
+		c_params->flags |= SYNX_CREATE_LOCAL_FENCE;
+
+	if (params->flags & SYNX_IMPORT_DMA_FENCE)
+		c_params->flags |= SYNX_CREATE_DMA_FENCE;
+}
+
+u32 synx_util_map_client_id_to_core(
+	enum synx_client_id id)
+{
+	u32 core_id;
+
+	switch (id) {
+	case SYNX_CLIENT_NATIVE:
+		core_id = SYNX_CORE_APSS; break;
+	case SYNX_CLIENT_ICP_CTX0:
+		core_id = SYNX_CORE_ICP; break;
+	case SYNX_CLIENT_EVA_CTX0:
+		core_id = SYNX_CORE_EVA; break;
+	case SYNX_CLIENT_VID_CTX0:
+		core_id = SYNX_CORE_IRIS; break;
+	case SYNX_CLIENT_NSP_CTX0:
+		core_id = SYNX_CORE_NSP; break;
+	default:
+		core_id = SYNX_CORE_MAX;
+	}
+
+	return core_id;
+}

+ 188 - 0
qcom/opensource/synx-kernel/msm/synx/synx_util.h

@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SYNX_UTIL_H__
+#define __SYNX_UTIL_H__
+
+#include "synx_api.h"
+#include "synx_private.h"
+
+extern struct synx_device *synx_dev;
+u32 __fence_state(struct dma_fence *fence, bool locked);
+void synx_util_destroy_coredata(struct kref *kref);
+extern void synx_fence_callback(struct dma_fence *fence,
+	struct dma_fence_cb *cb);
+extern int synx_native_signal_fence(struct synx_coredata *synx_obj,
+	u32 status);
+
+static inline bool synx_util_is_valid_bind_type(u32 type)
+{
+	if (type < SYNX_MAX_BIND_TYPES)
+		return true;
+
+	return false;
+}
+
+static inline bool synx_util_is_global_handle(u32 h_synx)
+{
+	return (h_synx & SYNX_OBJ_GLOBAL_FLAG_MASK) ? true : false;
+}
+
+static inline u32 synx_util_get_object_type(
+	struct synx_coredata *synx_obj)
+{
+	return synx_obj ? synx_obj->type : 0;
+}
+
+static inline bool synx_util_is_merged_object(
+	struct synx_coredata *synx_obj)
+{
+	if (synx_obj &&
+		(synx_obj->type & SYNX_CREATE_MERGED_FENCE))
+		return true;
+
+	return false;
+}
+
+static inline bool synx_util_is_global_object(
+	struct synx_coredata *synx_obj)
+{
+	if (synx_obj &&
+		(synx_obj->type & SYNX_CREATE_GLOBAL_FENCE))
+		return true;
+
+	return false;
+}
+
+static inline bool synx_util_is_external_object(
+	struct synx_coredata *synx_obj)
+{
+	if (synx_obj &&
+		!(synx_obj->type & SYNX_CREATE_MERGED_FENCE) &&
+		(synx_obj->type & SYNX_CREATE_DMA_FENCE))
+		return true;
+
+	return false;
+}
+
+static inline u32 synx_util_map_params_to_type(u32 flags)
+{
+	if (flags & SYNX_CREATE_CSL_FENCE)
+		return SYNX_TYPE_CSL;
+
+	return SYNX_MAX_BIND_TYPES;
+}
+
+static inline u32 synx_util_global_idx(u32 h_synx)
+{
+	return (h_synx & SYNX_OBJ_HANDLE_MASK);
+}
+
+/* coredata memory functions */
+void synx_util_get_object(struct synx_coredata *synx_obj);
+void synx_util_put_object(struct synx_coredata *synx_obj);
+void synx_util_object_destroy(struct synx_coredata *synx_obj);
+
+static inline struct synx_coredata *synx_util_obtain_object(
+	struct synx_handle_coredata *synx_data)
+{
+	if (IS_ERR_OR_NULL(synx_data))
+		return NULL;
+
+	return synx_data->synx_obj;
+}
+
+/* global/local map functions */
+struct synx_map_entry *synx_util_insert_to_map(struct synx_coredata *synx_obj,
+			u32 h_synx, u32 flags);
+struct synx_map_entry *synx_util_get_map_entry(u32 h_synx);
+void synx_util_release_map_entry(struct synx_map_entry *map_entry);
+void synx_util_destroy_map_entry(struct kref *kref);
+/* fence map functions */
+int synx_util_insert_fence_entry(struct synx_fence_entry *entry, u32 *h_synx,
+			u32 global);
+u32 synx_util_get_fence_entry(u64 key, u32 global);
+void synx_util_release_fence_entry(u64 key);
+
+/* coredata initialize functions */
+int synx_util_init_coredata(struct synx_coredata *synx_obj,
+			struct synx_create_params *params,
+			struct dma_fence_ops *ops,
+			u64 dma_context);
+int synx_util_init_group_coredata(struct synx_coredata *synx_obj,
+			struct dma_fence **fences,
+			struct synx_merge_params *params,
+			u32 num_objs,
+			u64 dma_context);
+
+/* handle related functions */
+int synx_alloc_global_handle(u32 *new_synx);
+int synx_alloc_local_handle(u32 *new_synx);
+long synx_util_get_free_handle(unsigned long *bitmap, unsigned int size);
+int synx_util_init_handle(struct synx_client *client, struct synx_coredata *obj,
+			u32 *new_h_synx,
+			void *map_entry);
+
+u32 synx_encode_handle(u32 idx, u32 core_id, bool global_idx);
+
+/* callback related functions */
+int synx_util_alloc_cb_entry(struct synx_client *client,
+			struct synx_kernel_payload *data,
+			u32 *cb_idx);
+int synx_util_clear_cb_entry(struct synx_client *client,
+			struct synx_client_cb *cb);
+void synx_util_default_user_callback(u32 h_synx, int status, void *data);
+void synx_util_callback_dispatch(struct synx_coredata *synx_obj, u32 state);
+void synx_util_cb_dispatch(struct work_struct *cb_dispatch);
+
+/* external fence functions */
+int synx_util_activate(struct synx_coredata *synx_obj);
+int synx_util_add_callback(struct synx_coredata *synx_obj, u32 h_synx);
+
+/* merge related helper functions */
+s32 synx_util_merge_error(struct synx_client *client, u32 *h_synxs, u32 num_objs);
+int synx_util_validate_merge(struct synx_client *client, u32 *h_synxs, u32 num_objs,
+			struct dma_fence ***fences,
+			u32 *fence_cnt);
+
+/* coredata status functions */
+u32 synx_util_get_object_status(struct synx_coredata *synx_obj);
+u32 synx_util_get_object_status_locked(struct synx_coredata *synx_obj);
+
+/* client handle map related functions */
+struct synx_handle_coredata *synx_util_acquire_handle(struct synx_client *client,
+			u32 h_synx);
+void synx_util_release_handle(struct synx_handle_coredata *synx_data);
+int synx_util_update_handle(struct synx_client *client, u32 h_synx, u32 sync_id,
+			u32 type, struct synx_handle_coredata **handle);
+void synx_client_destroy(struct kref *kref);
+void synx_util_destroy_handle(struct kref *kref);
+
+/* client memory handler functions */
+struct synx_client *synx_get_client(struct synx_session *session);
+void synx_put_client(struct synx_client *client);
+
+/* error log functions */
+void synx_util_generate_timestamp(char *timestamp, size_t size);
+void synx_util_log_error(u32 id, u32 h_synx, s32 err);
+
+/* external fence map functions */
+int synx_util_save_data(void *fence, u32 flags, u32 data);
+struct synx_entry_64 *synx_util_retrieve_data(void *fence, u32 type);
+void synx_util_remove_data(void *fence, u32 type);
+
+/* misc */
+void synx_util_destroy_data(struct kref *kref);
+void synx_util_map_import_params_to_create(
+			struct synx_import_indv_params *params,
+			struct synx_create_params *c_params);
+
+struct bind_operations *synx_util_get_bind_ops(u32 type);
+u32 synx_util_map_client_id_to_core(enum synx_client_id id);
+
+int synx_get_child_coredata(struct synx_coredata *synx_obj, struct synx_coredata ***child_synx_obj, int *num_fences);
+
+#endif /* __SYNX_UTIL_H__ */

+ 1455 - 0
qcom/opensource/synx-kernel/msm/synx/test/ipclite_test.c

@@ -0,0 +1,1455 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/kthread.h>
+#include <linux/string.h>
+#include <linux/bits.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include "ipclite_test.h"
+
+struct kobject *sysfs_dir;
+
+static int threads_started, threads_completed, cores_completed;
+static bool ssr_complete;
+/* data_lock spinlock is used to increment ping counters in thread safe manner.
+ * core_wq to ensure all the cores have completed the test before next step.
+ * ssr_wq to wait during ssr operation.
+ * reply_wq to wait on replies to ping sent.
+ * thread_wq to wait on all threads local to APPS to complete
+ * test_done is a completion barrier which ensures test case is completed
+ * crash_done is a completion barrier which ensures ssr crash is completed
+ */
+DEFINE_SPINLOCK(data_lock);
+DECLARE_WAIT_QUEUE_HEAD(core_wq);
+DECLARE_WAIT_QUEUE_HEAD(ssr_wq);
+DECLARE_WAIT_QUEUE_HEAD(reply_wq);
+DECLARE_WAIT_QUEUE_HEAD(thread_wq);
+DECLARE_COMPLETION(test_done);
+DECLARE_COMPLETION(crash_done);
+
+static struct ipclite_thread_data wakeup_check, bg_pings;
+static struct ipclite_thread_data thread_data;
+
+struct handle_t *handle_ptr;
+static int handle_data[512];
+static struct ipclite_test_data *data;
+
+static void init_test_params(void)
+{
+	data->test_params.wait = 1;
+	data->test_params.num_pings = 1000;
+	data->test_params.num_itr = 1;
+	data->test_params.selected_senders = 1;
+	data->test_params.selected_receivers = 1;
+	data->test_params.enabled_cores = IPCLITE_TEST_ALL_CORES;
+	data->test_params.selected_test_case = 0;
+	data->test_params.num_thread = 1;
+	data->test_params.num_senders = 1;
+	data->test_params.num_receivers = 1;
+}
+/* Function to pack the different fields into one 64 bit message value
+ * 1 byte header of constant patter 01010101
+ * 1 byte to store the parameter type
+ * 1 byte to store the test case id
+ * 3 bytes to store the value of parameter in payload
+ * 1 byte to store test start/stop information
+ * 1 byte to store test pass/fail information
+ */
+static uint64_t get_param_macro(uint64_t parameter_info, uint64_t test_info,
+				uint64_t payload_info, uint64_t start_stop_info,
+				uint64_t pass_fail_info)
+{
+	uint64_t param_macro = 0;
+
+	parameter_info &= GENMASK_ULL(7, 0);
+	test_info &= GENMASK_ULL(7, 0);
+	payload_info &= GENMASK_ULL(23, 0);
+	start_stop_info &= GENMASK_ULL(7, 0);
+	pass_fail_info &= GENMASK_ULL(7, 0);
+
+	param_macro = ((uint64_t)IPCLITE_TEST_HEADER) << 56;
+	param_macro |= parameter_info << 48;
+	param_macro |= test_info << 40;
+	param_macro |= payload_info << 16;
+	param_macro |= start_stop_info << 8;
+	param_macro |= pass_fail_info;
+
+	return param_macro;
+}
+
+static inline bool is_enabled_core(int core_id)
+{
+	return (data->test_params.enabled_cores & BIT(core_id)) ? true : false;
+}
+
+static inline bool is_selected_receiver(int core_id)
+{
+	return (data->test_params.selected_receivers & BIT(core_id)) ? true : false;
+}
+
+static inline bool is_selected_sender(int core_id)
+{
+	return (data->test_params.selected_senders & BIT(core_id)) ? true : false;
+}
+
+static void ping_receive(struct ipclite_test_data *data)
+{
+	pr_debug("Successfully received a ping\n");
+	data->pings_received[data->client_id]++;
+	wake_up_interruptible(&reply_wq);
+}
+
+static int check_pings(struct ipclite_test_data *data)
+{
+	for (int i = 0; i < IPCMEM_NUM_HOSTS; ++i) {
+		if (!is_selected_receiver(i))
+			continue;
+		if (data->pings_sent[i] != data->pings_received[i])
+			return -IPCLITE_TEST_FAIL;
+	}
+	return 0;
+}
+
+static void ping_all_enabled_cores(u64 msg)
+{
+	for (int i = 0; i < IPCMEM_NUM_HOSTS; ++i) {
+		if (i == IPCMEM_APPS || !is_enabled_core(i))
+			continue;
+		ipclite_test_msg_send(i, msg);
+	}
+}
+
+static void ping_sel_senders(uint64_t msg)
+{
+	for (int i = 0; i < IPCMEM_NUM_HOSTS; ++i) {
+		if (i == IPCMEM_APPS || !(data->test_params.selected_senders & BIT(i)))
+			continue;
+		ipclite_test_msg_send(i, msg);
+	}
+}
+
+static int thread_init(struct ipclite_thread_data *th_data, void *data_ptr, void *fptr)
+{
+	th_data->data = data_ptr;
+	th_data->run = false;
+	init_waitqueue_head(&th_data->wq);
+	th_data->thread = kthread_run(fptr, th_data, "test thread");
+	if (IS_ERR(th_data->thread)) {
+		pr_err("Thread creation failed\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int ping_selected_receivers(void *data_ptr)
+{
+	struct ipclite_thread_data *t_data = data_ptr;
+	struct ipclite_test_data *data = t_data->data;
+	int ret = 0;
+	uint64_t macro_to_ping = get_param_macro(TEST_CASE,
+						data->test_params.selected_test_case,
+						PING_SEND, 0, 0);
+	bool fail = false;
+
+	while (!kthread_should_stop()) {
+
+		wait_event_interruptible(t_data->wq, t_data->run);
+		if (kthread_should_stop())
+			break;
+		t_data->run = false;
+
+		for (int i = 0; i < data->test_params.num_pings/data->test_params.num_thread; ++i) {
+			for (int j = 0; j < IPCMEM_NUM_HOSTS; ++j) {
+				if (!is_selected_receiver(j))
+					continue;
+				ret = ipclite_test_msg_send(j, macro_to_ping);
+				if (ret == 0) {
+					spin_lock(&data_lock);
+					data->pings_sent[j]++;
+					spin_unlock(&data_lock);
+				} else
+					fail = true;
+				/* If wait is enabled and number of pings to wait on is sent,
+				 * Wait for replies or timeout
+				 */
+				if (data->test_params.wait != 0 &&
+							(i+1) % data->test_params.wait == 0) {
+					ret = wait_event_interruptible_timeout(reply_wq,
+								check_pings(data) == 0,
+								msecs_to_jiffies(1000));
+					if (ret < 1)
+						pr_err("Timeout occurred\n");
+				}
+			}
+		}
+		pr_debug("Completed iteration. Marking thread as completed\n");
+		spin_lock(&data_lock);
+		threads_completed++;
+		wake_up_interruptible(&thread_wq);
+		spin_unlock(&data_lock);
+	}
+
+	return fail ? -IPCLITE_TEST_FAIL : 0;
+}
+
+static int negative_tests(void *data_ptr)
+{
+	struct ipclite_thread_data *t_data = data_ptr;
+	int ret = 0, fail = 0;
+	uint64_t param;
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(t_data->wq, t_data->run);
+		if (kthread_should_stop())
+			break;
+		t_data->run = false;
+		pr_info("Test 1: Sending messages to disabled cores\n");
+		for (int i = 0; i < IPCMEM_NUM_HOSTS; ++i) {
+			if (!is_selected_receiver(i))
+				continue;
+			param = get_param_macro(TEST_CASE, NEGATIVE,
+						PING_SEND, 0, 0);
+			ret = ipclite_test_msg_send(i, param);
+			if (ret == 0) {
+				pr_err("TEST FAILED\n");
+				fail++;
+			}
+		}
+		if (!fail)
+			pr_info("TEST PASSED\n");
+
+		pr_info("Test 2: Passing NULL to get_global_parition_info\n");
+		ret = get_global_partition_info(NULL);
+		if (ret == 0) {
+			pr_err("TEST FAILED\n");
+			fail++;
+		} else
+			pr_info("TEST PASSED\n");
+
+		if (fail != 0)
+			pr_err("Negative TEST FAILED\n");
+		else
+			pr_info("Negative TEST PASSED\n");
+
+		param = get_param_macro(TEST_CASE, NEGATIVE, 0,
+					IPCLITE_TEST_STOP, 0);
+		ipclite_test_msg_send(IPCMEM_APPS, param);
+		wait_event_interruptible_timeout(core_wq,
+						cores_completed == data->test_params.num_senders,
+						msecs_to_jiffies(1000));
+		complete(&test_done);
+	}
+	return fail == 0 ? 0 : -IPCLITE_TEST_FAIL;
+}
+
+static int hw_unlock_test(void *hw_mutex_byte)
+{
+	int ret = 0;
+	uint64_t param;
+
+	if (!hw_mutex_byte) {
+		pr_err("Byte for hardware mutex testing is not initialized.\n");
+		return -EFAULT;
+	}
+
+	pr_info("Testing HW Mutex Lock Acquire Functionality\n");
+	*((int *)(hw_mutex_byte)) = -1;
+	pr_debug("The initial value of the byte is %d\n", *((int *)(hw_mutex_byte)));
+	pr_debug("Locking the mutex from APPS Side\n");
+
+	ret = ipclite_hw_mutex_acquire();
+	if (ret != 0) {
+		pr_err("Could not acquire hw mutex from APPS side\n");
+		return ret;
+	}
+
+	pr_debug("Setting the value of the byte to %d\n", IPCMEM_APPS);
+	*((int *)(hw_mutex_byte)) = IPCMEM_APPS;
+	pr_debug("The new value of the byte is %d\n", *((int *)(hw_mutex_byte)));
+
+	for (int i = 0; i < IPCMEM_NUM_HOSTS; ++i) {
+		if (i == IPCMEM_APPS || !is_selected_receiver(i))
+			continue;
+		pr_debug("Pinging %s to try and release the locked mutex\n",
+						core_name[i]);
+		param = get_param_macro(TEST_CASE, HW_MUTEX,
+					HW_MUTEX_RELEASE,
+					IPCLITE_TEST_START, 0);
+		ipclite_test_msg_send(i, param);
+		// Wait for timeout here
+		udelay(1000);
+	}
+
+	if (*((int *)(hw_mutex_byte)) != IPCMEM_APPS)
+		return -IPCLITE_TEST_FAIL;
+
+	ret = ipclite_hw_mutex_release();
+	if (ret != 0)
+		pr_err("Could not release mutex lock successfully\n");
+	return ret;
+}
+
+static int hw_mutex_test(void *data_ptr)
+{
+	struct ipclite_thread_data *t_data = data_ptr;
+	struct ipclite_test_data *data = t_data->data;
+	int ret = 0;
+	void *addr = data->global_memory->virt_base;
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(t_data->wq, t_data->run);
+		if (kthread_should_stop())
+			break;
+		t_data->run = false;
+
+		ret = hw_unlock_test(addr);
+
+		if (ret == 0)
+			pr_info("HW Unlock Test Passed.\n");
+		else
+			pr_info("HW Unlock Test Failed.\n");
+
+		complete(&test_done);
+	}
+	return  ret;
+}
+/* Ping cores which are not selected for ssr in the background */
+static int send_bg_pings(void *data_ptr)
+{
+	struct ipclite_thread_data *t_data = data_ptr;
+	struct ipclite_test_data *data = t_data->data;
+	int ret;
+	uint64_t param;
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(t_data->wq, t_data->run);
+		if (kthread_should_stop())
+			break;
+		t_data->run = false;
+
+		while (!ssr_complete && !kthread_should_stop()) {
+			for (int i = 0; i < IPCMEM_NUM_HOSTS; ++i) {
+				if (i == data->ssr_client || !is_selected_receiver(i))
+					continue;
+				param = get_param_macro(TEST_CASE,
+							SSR,
+							PING_SEND, 0, 0);
+				ret = ipclite_test_msg_send(i, param);
+				if (ret != 0)
+					pr_err("Unable to ping core %d\n", i);
+			}
+			wait_event_interruptible_timeout(ssr_wq,
+							ssr_complete,
+							msecs_to_jiffies(1000));
+		}
+		pr_debug("SSR recovery of core %d completed. Exiting thread\n",
+								data->ssr_client);
+	}
+	return 0;
+}
+/* Wait for 30s and then send pings one to by one to see if core wakeup
+ *   is completed
+ */
+static int ssr_wakeup_check(void *data_ptr)
+{
+	struct ipclite_thread_data *t_data = data_ptr;
+	struct ipclite_test_data *data = t_data->data;
+	int count = 0, ret = 0;
+	uint64_t param;
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(t_data->wq, t_data->run);
+		if (kthread_should_stop())
+			break;
+		t_data->run = false;
+
+		ssr_complete = false;
+		msleep_interruptible(30000);
+		while (count < 10) {
+			pr_debug("Sent ping number %d to check if wakeup is completed\n",
+							count);
+			param = get_param_macro(TEST_CASE, SSR,
+						SSR_WAKEUP,
+						IPCLITE_TEST_START, 0);
+			ret = ipclite_test_msg_send(data->ssr_client, param);
+			++count;
+			wait_event_interruptible_timeout(ssr_wq,
+							ssr_complete,
+							msecs_to_jiffies(1000));
+		}
+		if (count == 10 && !ssr_complete) {
+			pr_info("FW Core wakeup failed.\n");
+			return -IPCLITE_TEST_FAIL;
+		}
+		pr_info("FW Core wakeup completed successfully.\n");
+		pr_info("Going for non crashing testing.\n");
+		param = get_param_macro(TEST_CASE, PING, 0,
+					IPCLITE_TEST_START, 0);
+		ipclite_test_msg_send(data->ssr_client, param);
+		complete(&crash_done);
+	}
+	return 0;
+}
+
+static int ssr_test(void *data_ptr)
+{
+	struct ipclite_thread_data *t_data = data_ptr;
+	struct ipclite_test_data *data = t_data->data;
+	uint64_t param = 0;
+	int ret = 0;
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(t_data->wq, t_data->run);
+		if (kthread_should_stop())
+			break;
+		t_data->run = false;
+
+		ssr_complete = false;
+		ret = thread_init(&wakeup_check, data, ssr_wakeup_check);
+
+		if (ret != 0) {
+			pr_err("Thread creation failed\n");
+			return -EINVAL;
+		}
+
+		ret = thread_init(&bg_pings, data, send_bg_pings);
+		if (ret != 0) {
+			pr_err("Thread creation failed\n");
+			kthread_stop(wakeup_check.thread);
+			return -EINVAL;
+		}
+		pr_info("Starting on SSR test for core %d\n", data->ssr_client);
+		memset(data->pings_sent, 0, sizeof(data->pings_sent));
+		memset(data->pings_received, 0, sizeof(data->pings_received));
+		param = get_param_macro(TEST_CASE, SSR,
+					SSR_CRASHING, IPCLITE_TEST_START, 0);
+		ipclite_test_msg_send(data->ssr_client, param);
+		wait_for_completion(&crash_done);
+		kthread_stop(wakeup_check.thread);
+		kthread_stop(bg_pings.thread);
+		complete(&test_done);
+	}
+	return 0;
+}
+
+static int inc_byte(void *data_ptr)
+{
+	struct ipclite_thread_data *t_data = data_ptr;
+	ipclite_atomic_uint32_t *addr = t_data->data;
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(t_data->wq, t_data->run);
+		if (kthread_should_stop())
+			break;
+		t_data->run = false;
+		for (int i = 0; i < data->test_params.num_itr; ++i)
+			ipclite_global_atomic_inc(addr);
+		threads_completed++;
+		wake_up_interruptible(&thread_wq);
+	}
+	return 0;
+}
+
+static int dec_byte(void *data_ptr)
+{
+	struct ipclite_thread_data *t_data = data_ptr;
+	ipclite_atomic_uint32_t *addr = t_data->data;
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(t_data->wq, t_data->run);
+		if (kthread_should_stop())
+			break;
+		t_data->run = false;
+		for (int i = 0; i < data->test_params.num_itr; ++i)
+			ipclite_global_atomic_dec(addr);
+		threads_completed++;
+		wake_up_interruptible(&thread_wq);
+	}
+	return 0;
+}
+
+static int global_atomics_test(void *byte, int test_number)
+{
+	int ret = 0;
+	int total_increment = 0;
+	uint64_t param;
+	bool fail = false;
+	struct ipclite_thread_data ga_t1, ga_t2;
+
+
+	if (!byte) {
+		pr_err("Byte not initialized. Test Failed\n");
+		return -EFAULT;
+	}
+	pr_debug("The initial value of the byte is %x\n", *((int *)byte));
+
+	threads_completed = 0;
+	threads_started = 0;
+
+	switch (test_number) {
+	case GLOBAL_ATOMICS_INC:
+		ret = thread_init(&ga_t1, byte, inc_byte);
+		if (ret != 0) {
+			pr_err("Thread creation failed\n");
+			return -EINVAL;
+		}
+		ret = thread_init(&ga_t2, byte, inc_byte);
+		if (ret != 0) {
+			pr_err("Thread creation failed\n");
+			kthread_stop(ga_t1.thread);
+			return -EINVAL;
+		}
+		break;
+	case GLOBAL_ATOMICS_DEC:
+		ret = thread_init(&ga_t1, byte, dec_byte);
+		if (ret != 0) {
+			pr_err("Thread creation failed\n");
+			return -EINVAL;
+		}
+		ret = thread_init(&ga_t2, byte, dec_byte);
+		if (ret != 0) {
+			pr_err("Thread creation failed\n");
+			kthread_stop(ga_t1.thread);
+			return -EINVAL;
+		}
+		break;
+	case GLOBAL_ATOMICS_INC_DEC:
+		ret = thread_init(&ga_t1, byte, inc_byte);
+		if (ret != 0) {
+			pr_err("Thread creation failed\n");
+			return -EINVAL;
+		}
+		ret = thread_init(&ga_t2, byte, dec_byte);
+		if (ret != 0) {
+			pr_err("Thread creation failed\n");
+			kthread_stop(ga_t1.thread);
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("Wrong input provided\n");
+		return -EINVAL;
+	}
+	param = get_param_macro(TEST_CASE,
+				GLOBAL_ATOMIC,
+				test_number,
+				IPCLITE_TEST_START, 0);
+
+	for (int i = 0; i < IPCMEM_NUM_HOSTS; ++i) {
+		if (i == IPCMEM_APPS || !is_selected_receiver(i))
+			continue;
+		ret = ipclite_test_msg_send(i, param);
+		if (ret == 0)
+			threads_started += 2;
+	}
+	if (is_selected_receiver(IPCMEM_APPS)) {
+		ga_t1.run = true;
+		wake_up_interruptible(&ga_t1.wq);
+		ga_t2.run = true;
+		wake_up_interruptible(&ga_t2.wq);
+		threads_started += 2;
+	}
+	/* Wait for all threads to complete or timeout */
+	ret = wait_event_interruptible_timeout(thread_wq,
+					threads_started == 2 * data->test_params.num_receivers &&
+					threads_completed == 2 * data->test_params.num_receivers,
+					msecs_to_jiffies(1000));
+	if (ret < 1)
+		pr_err("Threads could not complete successfully\n");
+
+	pr_debug("The value of the byte is %x\n", *((int *)byte));
+	/* Stopping threads if they have not already completed before evaluation */
+	kthread_stop(ga_t1.thread);
+	kthread_stop(ga_t2.thread);
+
+	total_increment = 2 * data->test_params.num_receivers * data->test_params.num_itr;
+
+	switch (test_number) {
+	case GLOBAL_ATOMICS_INC:
+		if (*((int *)byte) == total_increment)
+			pr_info("Increment Successful.\n");
+		else {
+			pr_err("Increment Failed.\n");
+			fail = true;
+		}
+		break;
+	case GLOBAL_ATOMICS_DEC:
+		if (*((int *)byte) == 0)
+			pr_info("Decrement Successful\n");
+		else {
+			pr_err("Decrement Failed\n");
+			fail = true;
+		}
+		break;
+	case GLOBAL_ATOMICS_INC_DEC:
+		if (*((int *)byte) == 0)
+			pr_info("Increment and Decrement Successful\n");
+		else {
+			pr_err("Increment and Decrement Failed\n");
+			fail = true;
+		}
+		break;
+	default:
+		pr_err("Wrong input provided\n");
+		return -EINVAL;
+	}
+
+	return fail ? -IPCLITE_TEST_FAIL : 0;
+}
+
+static inline uint32_t bitops_count_trailing_one(uint32_t x)
+{
+	uint32_t mask = 0;
+
+	for (int i = 0; i < BITS(ipclite_atomic_uint32_t); i++) {
+		mask = 1 << i;
+		if (!(x & mask))
+			return i;
+	}
+	return BITS(ipclite_atomic_uint32_t);
+}
+
+/**
+ * @brief Finds the first zero in the bitmap
+ *
+ * @param bmap_addr pointer to bitmap
+ * @param size the size of the bitmap indicated in number of bits
+ * @return uint32_t index of the first zero
+ */
+static uint32_t bitops_util_find_first_zero(uint32_t *bmap_addr, uint32_t size)
+{
+	uint32_t res = 0;
+
+	for (int i = 0; i * BITS(ipclite_atomic_uint32_t) < size; i++) {
+		if (bmap_addr[i] != ~(uint32_t)0) {
+			res = i * BITS(ipclite_atomic_uint32_t) +
+				bitops_count_trailing_one(bmap_addr[i]);
+			return res < size ? res : size;
+		}
+	}
+	return size;
+}
+
+static int alloc_index(int *bitmap_base)
+{
+	uint32_t prev = 0, index = 0;
+
+	do {
+		index = bitops_util_find_first_zero((unsigned int *) bitmap_base,
+							NUM_HANDLES);
+		if (index > NUM_HANDLES) {
+			pr_err("No Memory Error. Exiting\n");
+			break;
+		}
+		prev = ipclite_global_test_and_set_bit(index % 32,
+					(ipclite_atomic_uint32_t *)(bitmap_base + index/32));
+		if ((prev & (1UL << (index % 32))) == 0)
+			break;
+	} while (true);
+	return index;
+}
+
+void clear_index(int *bitmap_base, uint32_t index)
+{
+	uint32_t addr_idx = index/32, ii = index % 32;
+
+	if (bitmap_base == NULL) {
+		pr_err("Invalid pointer passed\n");
+		return;
+	}
+	ipclite_global_test_and_clear_bit(ii, (ipclite_atomic_uint32_t *)(bitmap_base + addr_idx));
+}
+
+static int global_atomics_test_set_clear(struct ipclite_test_data *data)
+{
+	int index = 0, ret = 0;
+	bool fail = false;
+	uint64_t param;
+
+	handle_ptr = data->global_memory->virt_base;
+	pr_info("Starting global atomics Test 4. Starting allocation of index\n");
+	pr_debug("The total number of handles is %d\n", NUM_HANDLES);
+	pr_debug("Global Base : %p\n", handle_ptr);
+	for (int itr = 0; itr < data->test_params.num_itr; itr++) {
+		threads_started = 0;
+		threads_completed = 0;
+		for (int j = 0; j < IPCMEM_NUM_HOSTS; ++j) {
+			if (j == IPCMEM_APPS || !is_selected_receiver(j))
+				continue;
+			param = get_param_macro(TEST_CASE,
+						GLOBAL_ATOMIC,
+						GLOBAL_ATOMICS_SET_CLR,
+						IPCLITE_TEST_START, 0);
+			ret = ipclite_test_msg_send(j, param);
+			if (ret == 0)
+				threads_started++;
+		}
+		if (is_selected_receiver(IPCMEM_APPS)) {
+			threads_started++;
+			for (int i = 0; i < 512; ++i) {
+				index = alloc_index((int *)handle_ptr);
+				handle_data[i] = index;
+				handle_ptr->handle_data[index] = IPCMEM_APPS;
+			}
+
+			for (int i = 0; i < 512; ++i) {
+				index = handle_data[i];
+				if (handle_ptr->handle_data[index] != IPCMEM_APPS) {
+					pr_err("Handle data has been overwritten.\n");
+					pr_err("This is a bug : Core : %d Index : %d\n",
+						handle_ptr->handle_data[index], index);
+					fail = true;
+				}
+			}
+
+			for (int i = 0; i < 512; ++i) {
+				index = handle_data[i];
+				clear_index((int *)handle_ptr, index);
+			}
+			threads_completed++;
+			if (fail)
+				break;
+		}
+		wait_event_interruptible_timeout(thread_wq,
+				threads_started == data->test_params.num_receivers &&
+				threads_completed == data->test_params.num_receivers,
+				msecs_to_jiffies(1000));
+	}
+	if (!fail)
+		pr_info("Global Atomics Set and Clear test passed successfully\n");
+	return fail ? -IPCLITE_TEST_FAIL  : 0;
+}
+
+static int global_atomics_test_wrapper(void *data_ptr)
+{
+	int result = 0, ret = 0;
+	struct ipclite_thread_data *t_data = data_ptr;
+	struct ipclite_test_data *data = t_data->data;
+	void *addr = data->global_memory->virt_base;
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(t_data->wq, t_data->run);
+		if (kthread_should_stop())
+			break;
+		t_data->run = false;
+		*((int *)addr) = 0;
+		result = global_atomics_test(addr, GLOBAL_ATOMICS_INC);
+		result &= global_atomics_test(addr, GLOBAL_ATOMICS_DEC);
+		result &= global_atomics_test(addr, GLOBAL_ATOMICS_INC_DEC);
+		result &= global_atomics_test_set_clear(data);
+		if (result != 0) {
+			pr_err("Global Atomics TEST FAILED\n");
+			ret = -IPCLITE_TEST_FAIL;
+		} else {
+			pr_info("Global Atomics TEST PASSED\n");
+			ret = 0;
+		}
+		complete(&test_done);
+	}
+	return ret;
+}
+
+static int ping_test(void *data_ptr)
+{
+	int ret = 0;
+	uint64_t param_macro;
+	struct ipclite_test_data *data = data_ptr;
+	struct ipclite_thread_data th_arr[IPCLITE_TEST_MAX_THREADS];
+	int count;
+
+	memset(data->pings_sent, 0, sizeof(data->pings_sent));
+	memset(data->pings_received, 0, sizeof(data->pings_received));
+	threads_completed = 0;
+	param_macro = 0;
+	for (count = 0; count < data->test_params.num_thread; ++count) {
+		ret = thread_init(&th_arr[count], data, ping_selected_receivers);
+		if (ret != 0)
+			break;
+	}
+	if (count != data->test_params.num_thread)
+		while (count > 0) {
+			kthread_stop(th_arr[count-1].thread);
+			--count;
+		}
+	if (ret != 0) {
+		pr_err("Threads could not be initialized. Ping Test Failed\n");
+		return ret;
+	}
+	for (threads_started = 0; threads_started < data->test_params.num_thread;
+							++threads_started) {
+		th_arr[threads_started].run = true;
+		wake_up_interruptible(&th_arr[threads_started].wq);
+	}
+	ret = wait_event_interruptible_timeout(thread_wq,
+				threads_started == data->test_params.num_thread &&
+				threads_completed == data->test_params.num_thread,
+				msecs_to_jiffies(1000) * data->test_params.num_thread);
+	if (ret < 1) {
+		pr_err("Threads not completed successfully. Only completed %d threads\n",
+						threads_completed);
+		return ret;
+
+	}
+	pr_info("All threads completed successfully.\n");
+	pr_debug("Going for checking\n");
+	/*Wait for the queue to get processed before checking if all replies are received*/
+	if (!data->test_params.wait)
+		msleep_interruptible(1000);
+	ret = check_pings(data);
+
+	if (ret == 0)
+		pr_debug("All replies received successfully.\n");
+	else
+		pr_debug("All replies not received successfully.\n");
+
+	while (count > 0) {
+		kthread_stop(th_arr[count-1].thread);
+		--count;
+	}
+	param_macro = get_param_macro(TEST_CASE, PING, 0,
+					IPCLITE_TEST_STOP, 0);
+	ipclite_test_msg_send(IPCMEM_APPS, param_macro);
+	return ret;
+}
+
+static int wrapper_ping_test(void *data_ptr)
+{
+	int ret = 0;
+	uint64_t param_macro;
+	struct ipclite_thread_data *t_data = data_ptr;
+	struct ipclite_test_data *data = t_data->data;
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(t_data->wq, t_data->run);
+		if (kthread_should_stop())
+			break;
+		t_data->run = false;
+
+		for (int i = 0; i < data->test_params.num_itr; ++i) {
+			cores_completed = 0;
+			param_macro = get_param_macro(TEST_CASE,
+							PING,
+							0, IPCLITE_TEST_START, 0);
+			/* Ping all senders to start sending messages.
+			 *  If APPS is one of the senders start sending
+			 */
+			ping_sel_senders(param_macro);
+			if (is_selected_sender(IPCMEM_APPS))
+				ping_test(data);
+			wait_event_interruptible_timeout(core_wq,
+						cores_completed == data->test_params.num_senders,
+						msecs_to_jiffies(1000));
+			ret = check_pings(data);
+			if (ret != 0)
+				pr_info("Iteration %d of ping test failed\n", i+1);
+			else
+				pr_info("Iteration %d of ping test passed\n", i+1);
+		}
+	if (is_selected_sender(IPCMEM_APPS))
+		complete(&test_done);
+	}
+	return 0;
+}
+
+static int debug_tests(void *data_ptr)
+{
+	struct ipclite_thread_data *t_data = data_ptr;
+	uint64_t param;
+	int disabled_core = ffz(data->test_params.enabled_cores);
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(t_data->wq, t_data->run);
+		if (kthread_should_stop())
+			break;
+		t_data->run = false;
+		param = get_param_macro(TEST_CASE, DEBUG,
+					PING_SEND, 0, 0);
+		if (disabled_core == IPCMEM_NUM_HOSTS)
+			pr_err("All cores are enabled. No Disabled cores\n");
+		/* Pinging one enabled and disabled cores to get the error and dbg prints */
+		if (disabled_core < IPCMEM_NUM_HOSTS)
+			ipclite_test_msg_send(disabled_core, param);
+
+		param = get_param_macro(TEST_CASE, PING, 0,
+						IPCLITE_TEST_STOP, 0);
+		ipclite_test_msg_send(IPCMEM_APPS, param);
+		wait_event_interruptible_timeout(core_wq,
+						cores_completed == data->test_params.num_senders,
+						msecs_to_jiffies(1000));
+		complete(&test_done);
+	}
+	return 0;
+}
+
+static void ipclite_test_set_enabled_cores(void)
+{
+	if (data->test_params.enabled_cores < 0 ||
+					data->test_params.enabled_cores > IPCLITE_TEST_ALL_CORES) {
+		pr_err("Invalid parameter value given to enabled cores\n");
+		data->test_params.enabled_cores = IPCLITE_TEST_ALL_CORES;
+		return;
+	}
+	pr_info("Enabled cores set to %d\n", data->test_params.enabled_cores);
+}
+
+static void ipclite_test_set_wait(void)
+{
+	uint64_t param;
+
+	if (data->test_params.wait < 0) {
+		pr_err("Invalid parameter value given to wait\n");
+		data->test_params.wait = 1;
+		return;
+	}
+
+	pr_info("wait set to %d\n", data->test_params.wait);
+
+	param = get_param_macro(WAIT, 0, data->test_params.wait, 0, 0);
+	ping_all_enabled_cores(param);
+}
+
+static void ipclite_test_set_num_pings(void)
+{
+	uint64_t param;
+
+	pr_info("num_pings set to %d\n", data->test_params.num_pings);
+
+	param = get_param_macro(NUM_PINGS, 0,
+				data->test_params.num_pings, 0, 0);
+	ping_all_enabled_cores(param);
+}
+
+static void ipclite_test_set_num_itr(void)
+{
+	uint64_t param;
+
+	pr_info("num_itr set to %d\n", data->test_params.num_itr);
+
+	param = get_param_macro(NUM_ITR, 1,
+				data->test_params.num_itr, 0, 0);
+	ping_all_enabled_cores(param);
+}
+
+static void ipclite_test_set_receivers(void)
+{
+	uint64_t param;
+
+	if (data->test_params.selected_receivers < 0 ||
+		data->test_params.selected_receivers > IPCLITE_TEST_ALL_CORES) {
+		pr_err("Invalid parameter value given to selected_receivers\n");
+		data->test_params.selected_receivers = 1;
+		data->test_params.num_receivers = 1;
+		return;
+	}
+	/* Check number of 1s using hamming weight function.
+	 * Number of 1s is number of receivers
+	 */
+	data->test_params.num_receivers = hweight_long(data->test_params.selected_receivers);
+
+	pr_info("selected_receivers set to %d\n", data->test_params.selected_receivers);
+
+	param = get_param_macro(RECEIVER_LIST, 0,
+				data->test_params.selected_receivers, 0, 0);
+	ping_all_enabled_cores(param);
+}
+
+static void ipclite_test_set_senders(void)
+{
+	if (data->test_params.selected_senders < 0 ||
+		data->test_params.selected_senders > IPCLITE_TEST_ALL_CORES) {
+		pr_err("Invalid parameter value given to selected_senders\n");
+		data->test_params.selected_senders = 1;
+		data->test_params.num_senders = 1;
+		return;
+	}
+
+	/* Check number of 1s using hamming weight function. */
+	data->test_params.num_senders = hweight_long(data->test_params.selected_senders);
+
+	pr_info("selected_senders set to %d\n", data->test_params.selected_senders);
+}
+
+static void ipclite_test_set_num_threads(void)
+{
+	uint64_t param;
+
+	if (data->test_params.num_thread < 0 ||
+					data->test_params.num_thread > IPCLITE_TEST_MAX_THREADS) {
+		pr_err("Invalid parameter value given to num_thread\n");
+		data->test_params.num_thread = 1;
+		return;
+	}
+
+	pr_info("num_thread set to %d\n", data->test_params.num_thread);
+
+	param = get_param_macro(NUM_THREADS, 0,
+				data->test_params.num_thread, 0, 0);
+	ping_all_enabled_cores(param);
+}
+
+static void ipclite_test_set_test(void)
+{
+	uint64_t param;
+	int ret = 0;
+
+	if (data->test_params.selected_test_case < 0 || data->test_params.selected_test_case > 8) {
+		pr_err("Invalid parameter value given to test_case\n");
+		data->test_params.selected_test_case = 0;
+		return;
+	}
+
+	pr_info("selected_test_case set to %d\n", data->test_params.selected_test_case);
+	param = get_param_macro(TEST_CASE,
+				data->test_params.selected_test_case, 0,
+				IPCLITE_TEST_START, 0);
+
+	switch (data->test_params.selected_test_case) {
+	case PING:
+		ret = thread_init(&thread_data, data, wrapper_ping_test);
+		if (ret != 0) {
+			pr_err("Could not create thread for testing\n");
+			return;
+		}
+		thread_data.run = true;
+		wake_up_interruptible(&thread_data.wq);
+		break;
+	case NEGATIVE:
+		ping_sel_senders(param);
+		if (is_selected_sender(IPCMEM_APPS)) {
+			pr_info("Starting test %d for core %s\n",
+				NEGATIVE, core_name[IPCMEM_APPS]);
+			ret = thread_init(&thread_data, data, negative_tests);
+			if (ret != 0) {
+				pr_err("Could not create thread for testing\n");
+				return;
+			}
+			thread_data.run = true;
+			wake_up_interruptible(&thread_data.wq);
+		}
+		break;
+	case GLOBAL_ATOMIC:
+		ret = thread_init(&thread_data, data, global_atomics_test_wrapper);
+		if (ret != 0) {
+			pr_err("Could not create thread for testing\n");
+			return;
+		}
+		thread_data.run = true;
+		wake_up_interruptible(&thread_data.wq);
+		break;
+	case DEBUG:
+		ping_sel_senders(param);
+		if (is_selected_sender(IPCMEM_APPS)) {
+			ret = thread_init(&thread_data, data, debug_tests);
+			if (ret != 0) {
+				pr_err("Could not create thread for testing\n");
+				return;
+			}
+			thread_data.run = true;
+			wake_up_interruptible(&thread_data.wq);
+		}
+		break;
+	case SSR:
+		if (data->test_params.num_senders != 1) {
+			pr_err("SSR Testing requires only 1 core to be selected\n");
+			return;
+		}
+		/* Find first set (ffs) to get the bit position/index of sender */
+		data->ssr_client = ffs(data->test_params.selected_senders) - 1;
+		if (data->ssr_client == 0 || !is_enabled_core(data->ssr_client)) {
+			pr_err("Invalid core selected for SSR Testing\n");
+			return;
+		}
+		pr_info("Starting test %d for core %s\n",
+			SSR, core_name[data->ssr_client]);
+		ret = thread_init(&thread_data, data, ssr_test);
+		if (ret != 0) {
+			pr_err("Could not create thread for testing\n");
+			return;
+		}
+		thread_data.run = true;
+		wake_up_interruptible(&thread_data.wq);
+		break;
+	case HW_MUTEX:
+		if (data->test_params.num_senders != 1) {
+			pr_err("HW Mutex Testing requires only 1 core to be selected\n");
+			return;
+		}
+
+		if (is_selected_sender(IPCMEM_APPS)) {
+			pr_info("Starting test %d for core %s\n",
+				HW_MUTEX, core_name[IPCMEM_APPS]);
+			ret = thread_init(&thread_data, data, hw_mutex_test);
+			if (ret != 0) {
+				pr_err("Could not create thread for testing\n");
+				return;
+			}
+			thread_data.run = true;
+			wake_up_interruptible(&thread_data.wq);
+		} else
+			ping_sel_senders(param);
+		break;
+	default:
+		pr_err("Wrong input provided\n");
+		return;
+	}
+	wait_for_completion(&test_done);
+	if (thread_data.thread != NULL)
+		ret = kthread_stop(thread_data.thread);
+	if (ret != 0)
+		pr_err("Test did not complete successfully\n");
+	else
+		pr_info("Test completed successfully\n");
+}
+
+static int parse_param(char **temp_buf, int *addr)
+{
+	char *token;
+	int ret;
+
+	token = strsep(temp_buf, " ");
+	if (!token) {
+		pr_err("Token value is NULL in parse param\n");
+		return -EINVAL;
+	}
+	ret = kstrtoint(token, 0, addr);
+	if (ret < 0) {
+		pr_err("Parameter value not read correctly\n");
+		return ret;
+	}
+	return 0;
+}
+
+static ssize_t ipclite_test_params_write(struct kobject *kobj,
+					struct kobj_attribute *attr,
+					const char *buf, size_t count)
+{
+	char *temp_buf = kmalloc(strlen(buf)+1, GFP_KERNEL);
+	char *temp_ptr = temp_buf;
+	int ret, param = 0;
+
+	if (!temp_buf) {
+		pr_err("Memory not allocated\n");
+		return -EINVAL;
+	}
+
+	ret = strscpy(temp_buf, buf, strlen(buf)+1);
+
+	if (ret < 0) {
+		pr_err("User input is too large\n");
+		goto exit;
+	}
+
+	ret = parse_param(&temp_buf, &param);
+	if (ret != 0)
+		goto exit;
+
+	if (param  == ENABLED_CORES) {
+		ret = parse_param(&temp_buf, &data->test_params.enabled_cores);
+		if (ret == 0)
+			ipclite_test_set_enabled_cores();
+		goto exit;
+	} else
+		data->test_params.selected_test_case = param;
+
+	switch (data->test_params.selected_test_case) {
+	case PING:
+		ret = parse_param(&temp_buf, &data->test_params.selected_senders);
+		if (ret != 0)
+			break;
+		ipclite_test_set_senders();
+		ret = parse_param(&temp_buf, &data->test_params.selected_receivers);
+		if (ret != 0)
+			break;
+		ipclite_test_set_receivers();
+		ret = parse_param(&temp_buf, &data->test_params.num_pings);
+		if (ret != 0)
+			break;
+		ipclite_test_set_num_pings();
+		ret = parse_param(&temp_buf, &data->test_params.wait);
+		if (ret != 0)
+			break;
+		ipclite_test_set_wait();
+		ret = parse_param(&temp_buf, &data->test_params.num_itr);
+		if (ret != 0)
+			break;
+		ipclite_test_set_num_itr();
+		ret = parse_param(&temp_buf, &data->test_params.num_thread);
+		if (ret != 0)
+			break;
+		ipclite_test_set_num_threads();
+		break;
+	case NEGATIVE:
+		ret = parse_param(&temp_buf, &data->test_params.selected_senders);
+		if (ret != 0)
+			break;
+		ipclite_test_set_senders();
+		ret = parse_param(&temp_buf, &data->test_params.selected_receivers);
+		if (ret != 0)
+			break;
+		ipclite_test_set_receivers();
+		break;
+	case GLOBAL_ATOMIC:
+		ret = parse_param(&temp_buf, &data->test_params.selected_receivers);
+		if (ret != 0)
+			break;
+		ipclite_test_set_receivers();
+		ret = parse_param(&temp_buf, &data->test_params.num_itr);
+		if (ret != 0)
+			break;
+		ipclite_test_set_num_itr();
+		break;
+	case DEBUG:
+		ret = parse_param(&temp_buf, &data->test_params.selected_senders);
+		if (ret != 0)
+			break;
+		ipclite_test_set_senders();
+		break;
+	case SSR:
+		ret = parse_param(&temp_buf, &data->test_params.selected_senders);
+		if (ret != 0)
+			break;
+		ipclite_test_set_senders();
+		ret = parse_param(&temp_buf, &data->test_params.selected_receivers);
+		if (ret != 0)
+			break;
+		ipclite_test_set_receivers();
+		ret = parse_param(&temp_buf, &data->test_params.num_pings);
+		if (ret != 0)
+			break;
+		ipclite_test_set_num_pings();
+		break;
+	case HW_MUTEX:
+		ret = parse_param(&temp_buf, &data->test_params.selected_senders);
+		if (ret != 0)
+			break;
+		ipclite_test_set_senders();
+		ret = parse_param(&temp_buf, &data->test_params.selected_receivers);
+		if (ret != 0)
+			break;
+		ipclite_test_set_receivers();
+		break;
+	default:
+		pr_err("Wrong input provided\n");
+		goto exit;
+	}
+	if (ret == 0)
+		ipclite_test_set_test();
+exit:
+	kfree(temp_ptr);
+	return count;
+}
+
+
+
+static int ipclite_test_callback_fn(unsigned int client_id, long long msg,
+					void *data_ptr)
+{
+	struct ipclite_test_data *data = data_ptr;
+	uint64_t header, parameter_info, test_info, payload_info,
+			start_stop_info, pass_fail_info;
+	uint64_t reply_macro;
+	int ret = 0;
+
+	/* Unpack the different bit fields from message value */
+	header = (msg & GENMASK(63, 56))>>56;
+	parameter_info = (msg & GENMASK(55, 48))>>48;
+	test_info = (msg & GENMASK(47, 40))>>40;
+	payload_info = (msg & GENMASK(39, 16))>>16;
+	start_stop_info = (msg & GENMASK(15, 8))>>8;
+	pass_fail_info = (msg & GENMASK(7, 0));
+
+	if (!data) {
+		pr_err("Callback data pointer not loaded successfully\n");
+		return -EFAULT;
+	}
+
+	data->client_id = client_id;
+
+	if (header != IPCLITE_TEST_HEADER) {
+		pr_err("Corrupted message packed received\n");
+		return -EINVAL;
+	}
+
+	pr_debug("The message received is %lx\n", msg);
+
+	switch (test_info) {
+	case PING:
+	case NEGATIVE:
+	case DEBUG:
+		if (payload_info == PING_SEND) {
+			reply_macro = get_param_macro(TEST_CASE,
+							test_info,
+							PING_REPLY,
+							0, 0);
+			ipclite_test_msg_send(client_id, reply_macro);
+			break;
+		}
+		if (payload_info == PING_REPLY) {
+			ping_receive(data);
+			break;
+		}
+		if (pass_fail_info == IPCLITE_TEST_PASS)
+			pr_info("Test passed on core %s\n", core_name[client_id]);
+		else if (pass_fail_info == IPCLITE_TEST_FAIL)
+			pr_info("Test failed on core %s\n", core_name[client_id]);
+		if (start_stop_info == IPCLITE_TEST_STOP) {
+			++cores_completed;
+			if (cores_completed == data->test_params.num_senders)
+				pr_info("Test completed on all cores\n");
+			if (is_selected_sender(IPCMEM_APPS))
+				wake_up_interruptible(&core_wq);
+			else
+				complete(&test_done);
+		}
+		break;
+	case HW_MUTEX:
+		if (start_stop_info == IPCLITE_TEST_START) {
+			ret = ipclite_hw_mutex_release();
+			if (ret == 0)
+				*((int *)data->global_memory->virt_base) = IPCMEM_APPS;
+			reply_macro = get_param_macro(TEST_CASE,
+							test_info,
+							HW_MUTEX_RELEASE,
+							IPCLITE_TEST_STOP, 0);
+			ipclite_test_msg_send(client_id, reply_macro);
+
+		}
+		if (pass_fail_info == IPCLITE_TEST_PASS)
+			pr_info("HW Unlock Test passed on core %s\n",
+					core_name[client_id]);
+		else if (pass_fail_info == IPCLITE_TEST_FAIL)
+			pr_info("HW Unlock Test failed on core %s\n",
+					core_name[client_id]);
+		if (start_stop_info == IPCLITE_TEST_STOP)
+			complete(&test_done);
+		break;
+	case SSR:
+		if (payload_info == PING_SEND) {
+			reply_macro = get_param_macro(TEST_CASE,
+							test_info,
+							PING_REPLY,
+							0, 0);
+			data->pings_received[client_id]++;
+			ipclite_test_msg_send(client_id, reply_macro);
+			if (data->pings_received[client_id] == data->test_params.num_pings) {
+				pr_info("Waking up ssr_wakeup_check_thread.\n");
+				pr_info("Signaling other cores to make sure there is no other crash\n");
+				wakeup_check.run = true;
+				wake_up_interruptible(&wakeup_check.wq);
+				bg_pings.run = true;
+				wake_up_interruptible(&bg_pings.wq);
+			}
+		}
+		if (payload_info == SSR_WAKEUP) {
+			if (start_stop_info == IPCLITE_TEST_STOP) {
+				ssr_complete = true;
+				pr_info("%s wakeup completed\n",
+						core_name[client_id]);
+				wake_up_interruptible(&ssr_wq);
+			}
+		}
+		if (pass_fail_info == IPCLITE_TEST_PASS)
+			pr_info("Test %d passed on core %s\n",
+						test_info, core_name[client_id]);
+		else if (pass_fail_info == IPCLITE_TEST_FAIL)
+			pr_info("Test %d failed on core %s\n",
+					test_info, core_name[client_id]);
+		break;
+	case GLOBAL_ATOMIC:
+		if (start_stop_info == IPCLITE_TEST_STOP) {
+			pr_debug("%s completed Global Atomics Test.\n",
+						core_name[client_id]);
+			if (payload_info == GLOBAL_ATOMICS_SET_CLR)
+				threads_completed++;
+			else
+				threads_completed += 2;
+			wake_up_interruptible(&thread_wq);
+		}
+		break;
+	default:
+		pr_info("Wrong input given\n");
+	}
+	return 0;
+}
+
+struct kobj_attribute ipclite_test_params = __ATTR(ipclite_test_params,
+							0660,
+							NULL,
+							ipclite_test_params_write);
+
+static int ipclite_test_sysfs_node_setup(void)
+{
+	int ret = 0;
+
+	sysfs_dir = kobject_create_and_add("ipclite_test", kernel_kobj);
+	if (sysfs_dir == NULL) {
+		pr_err("Cannot create sysfs directory\n");
+		return -ENOENT;
+	}
+
+	ret = sysfs_create_file(sysfs_dir, &ipclite_test_params.attr);
+	if (ret) {
+		pr_err("Cannot create sysfs file for ipclite test module. Error - %d\n",
+			ret);
+		return -ENOENT;
+	}
+	return 0;
+}
+
+static int __init ipclite_test_init(void)
+{
+	int ret = 0;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+	data->global_memory = kzalloc(sizeof(*(data->global_memory)),
+						GFP_KERNEL);
+	if (!data->global_memory) {
+		kfree(data);
+		data = NULL;
+		return -ENOMEM;
+	}
+	ret = get_global_partition_info(data->global_memory);
+	if (ret != 0) {
+		pr_err("Unable to load global partition information\n");
+		goto bail;
+	}
+
+	ret = ipclite_register_test_client(ipclite_test_callback_fn, data);
+	if (ret != 0) {
+		pr_err("Could not register client\n");
+		goto bail;
+	}
+
+	ret = ipclite_test_sysfs_node_setup();
+	if (ret != 0) {
+		pr_err("Failed to create sysfs interface\n");
+		goto bail;
+	}
+
+	init_test_params();
+	return 0;
+bail:
+	kfree(data->global_memory);
+	kfree(data);
+	data = NULL;
+	return ret;
+}
+
+static void __exit ipclite_test_exit(void)
+{
+	pr_info("Removing IPCLite Test Module\n");
+	sysfs_remove_file(sysfs_dir, &ipclite_test_params.attr);
+	kobject_put(sysfs_dir);
+	kfree(data->global_memory);
+	kfree(data);
+	data = NULL;
+}
+
+module_init(ipclite_test_init);
+module_exit(ipclite_test_exit);
+
+MODULE_LICENSE("GPL v2");

+ 118 - 0
qcom/opensource/synx-kernel/msm/synx/test/ipclite_test.h

@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include "../ipclite_client.h"
+#include "../ipclite.h"
+
+/* General testing related configurations */
+#define IPCLITE_TEST_MAX_THREADS 5
+#define IPCLITE_TEST_HEADER 0xaa
+#define IPCLITE_TEST_ALL_CORES GENMASK(IPCMEM_NUM_HOSTS - 1, 0)
+
+/* Synx Usecase related definitions */
+#define NUM_HANDLES   4096
+#define BITMAP_SIZE   (NUM_HANDLES/32)
+#define BITS(x) (sizeof(x)*8)
+
+struct handle_t {
+	int handle_bitmap[BITMAP_SIZE];
+	int handle_data[NUM_HANDLES];
+};
+
+/* Flags for Pass, Fail, Start, and Stop */
+#define IPCLITE_TEST_PASS 2
+#define IPCLITE_TEST_FAIL 1
+
+#define IPCLITE_TEST_START 2
+#define IPCLITE_TEST_STOP 1
+
+/* List of Cases Available for Testing */
+enum ipclite_test_type {
+	PING		= 1,
+	NEGATIVE	= 2,
+	GLOBAL_ATOMIC	= 3,
+	DEBUG		= 4,
+	SSR		= 5,
+	HW_MUTEX	= 6,
+};
+
+/* List of sysfs parameters */
+enum ipclite_test_param {
+	TEST_CASE	= 1,
+	SENDER_LIST	= 2,
+	RECEIVER_LIST	= 3,
+	NUM_PINGS	= 4,
+	WAIT		= 5,
+	NUM_ITR		= 6,
+	NUM_THREADS	= 7,
+	ENABLED_CORES	= 8,
+};
+
+/* List of subtests for HW Mutex Test */
+enum ipclite_test_hw_mutex_subtest {
+	HW_MUTEX_RELEASE	= 1,
+};
+
+/* List of messages for SSR Testing */
+enum ipclite_test_ssr_subtest {
+	SSR_CRASHING	= 1,
+	SSR_WAKEUP	= 2,
+};
+
+/* List of subtest for Global Atomics Testing */
+enum ipclite_test_global_atomics_subtest {
+	GLOBAL_ATOMICS_INC	= 1,
+	GLOBAL_ATOMICS_DEC	= 2,
+	GLOBAL_ATOMICS_INC_DEC	= 3,
+	GLOBAL_ATOMICS_SET_CLR	= 4,
+};
+
+/* Types of pings and replies to be sent and received */
+enum ipclite_test_ping {
+	PING_SEND	= 10,
+	PING_REPLY	= 11,
+};
+
+static char core_name[IPCMEM_NUM_HOSTS][13] = {
+					"IPCMEM_APPS",
+					"IPCMEM_MODEM",
+					"IPCMEM_LPASS",
+					"IPCMEM_SLPI",
+					"IPCMEM_GPU",
+					"IPCMEM_CDSP",
+					"IPCMEM_CVP",
+					"IPCMEM_CAM",
+					"IPCMEM_VPU"
+};
+
+struct ipclite_test_params {
+	int wait;
+	int num_pings;
+	int num_itr;
+	int selected_senders;
+	int selected_receivers;
+	int selected_test_case;
+	int enabled_cores;
+	int num_thread;
+	int num_senders;
+	int num_receivers;
+};
+
+struct ipclite_test_data {
+	int pings_sent[IPCMEM_NUM_HOSTS];
+	int pings_received[IPCMEM_NUM_HOSTS];
+	int client_id;
+	struct global_region_info *global_memory;
+	struct ipclite_test_params test_params;
+	int ssr_client;
+};
+
+struct ipclite_thread_data {
+	struct task_struct *thread;
+	void *data;
+	wait_queue_head_t wq;
+	bool run;
+};
+
+static int ipclite_test_callback_fn(unsigned int client_id, long long  msg, void *d);

+ 16 - 0
qcom/opensource/synx-kernel/pineapple.bzl

@@ -0,0 +1,16 @@
+load(":synx_modules.bzl", "synx_modules")
+load(":synx_module_build.bzl", "define_consolidate_gki_modules")
+
+def define_pineapple():
+    define_consolidate_gki_modules(
+        target = "pineapple",
+        registry = synx_modules,
+        modules = [
+            "synx-driver",
+            "ipclite",
+            "ipclite_test",
+        ],
+        config_options = [
+            "TARGET_SYNX_ENABLE",
+        ],
+    )

+ 28 - 0
qcom/opensource/synx-kernel/synx_kernel_board.mk

@@ -0,0 +1,28 @@
+# Build synx kernel driver
+
+TARGET_SYNX_ENABLE := false
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true)
+	ifeq ($(TARGET_KERNEL_DLKM_SYNX_OVERRIDE),true)
+		TARGET_SYNX_ENABLE := true
+	endif
+else
+TARGET_SYNX_ENABLE := true
+endif
+
+ifneq (,$(call is-board-platform-in-list2,volcano))
+TARGET_SYNX_ENABLE := false
+endif
+ifneq (,$(call is-board-platform-in-list2,pitti))
+TARGET_SYNX_ENABLE := false
+endif
+ifeq ($(TARGET_SYNX_ENABLE), true)
+ifneq (,$(call is-board-platform-in-list2,$(TARGET_BOARD_PLATFORM)))
+BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/synx-driver.ko
+BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ipclite.ko
+BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ipclite_test.ko
+BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/synx-driver.ko
+BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ipclite.ko
+#BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/synx-driver.ko
+endif
+endif
+

+ 18 - 0
qcom/opensource/synx-kernel/synx_kernel_product.mk

@@ -0,0 +1,18 @@
+TARGET_SYNX_ENABLE := false
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true)
+	ifeq ($(TARGET_KERNEL_DLKM_SYNX_OVERRIDE),true)
+		TARGET_SYNX_ENABLE := true
+	endif
+else
+TARGET_SYNX_ENABLE := true
+endif
+
+ifneq (,$(call is-board-platform-in-list2,volcano))
+TARGET_SYNX_ENABLE := false
+endif
+ifneq (,$(call is-board-platform-in-list2,pitti))
+TARGET_SYNX_ENABLE := false
+endif
+ifeq ($(TARGET_SYNX_ENABLE), true)
+PRODUCT_PACKAGES += synx-driver.ko
+endif

+ 128 - 0
qcom/opensource/synx-kernel/synx_module_build.bzl

@@ -0,0 +1,128 @@
+load(
+    "//build/kernel/kleaf:kernel.bzl",
+    "ddk_module",
+    "ddk_submodule",
+    "kernel_module",
+    "kernel_modules_install",
+)
+load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
+
+def _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps, config_deps):
+    processed_config_srcs = {}
+    processed_config_deps = {}
+
+    for config_src_name in config_srcs:
+        config_src = config_srcs[config_src_name]
+
+        if type(config_src) == "list":
+            processed_config_srcs[config_src_name] = {True: config_src}
+        else:
+            processed_config_srcs[config_src_name] = config_src
+
+    for config_deps_name in config_deps:
+        config_dep = config_deps[config_deps_name]
+
+        if type(config_dep) == "list":
+            processed_config_deps[config_deps_name] = {True: config_dep}
+        else:
+            processed_config_deps[config_deps_name] = config_dep
+
+    module = struct(
+        name = name,
+        path = path,
+        srcs = srcs,
+        config_srcs = processed_config_srcs,
+        config_option = config_option,
+        deps = deps,
+        config_deps = processed_config_deps,
+    )
+
+    module_map[name] = module
+
+def _get_config_choices(map, options):
+    choices = []
+
+    for option in map:
+        choices.extend(map[option].get(option in options, []))
+
+    return choices
+
+def _get_kernel_build_options(modules, config_options):
+    all_options = {option: True for option in config_options}
+    all_options = all_options | {module.config_option: True for module in modules if module.config_option}
+
+    return all_options
+
+def _get_kernel_build_module_srcs(module, options, formatter):
+    srcs = module.srcs + _get_config_choices(module.config_srcs, options)
+    module_path = "{}/".format(module.path) if module.path else ""
+    globbed_srcs = native.glob(["{}{}".format(module_path, formatter(src)) for src in srcs])
+
+    return globbed_srcs
+
+def _get_kernel_build_module_deps(module, options, formatter):
+    deps = module.deps + _get_config_choices(module.config_deps, options)
+    deps = [formatter(dep) for dep in deps]
+
+    return deps
+
+def create_module_registry(hdrs = []):
+    module_map = {}
+
+    def register(name, path = None, config_option = None, srcs = [], config_srcs = {}, deps = [], config_deps = {}):
+        _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps, config_deps)
+
+    return struct(
+        register = register,
+        get = module_map.get,
+        hdrs = hdrs,
+        module_map = module_map,
+    )
+
+def define_target_variant_modules(target, variant, registry, modules, config_options = []):
+    kernel_build = "{}_{}".format(target, variant)
+    kernel_build_label = "//msm-kernel:{}".format(kernel_build)
+    modules = [registry.get(module_name) for module_name in modules]
+    options = _get_kernel_build_options(modules, config_options)
+    build_print = lambda message: print("{}: {}".format(kernel_build, message))
+    formatter = lambda s: s.replace("%b", kernel_build).replace("%t", target)
+
+    headers = ["//msm-kernel:all_headers"] + registry.hdrs
+    all_module_rules = []
+
+    for module in modules:
+        rule_name = "{}_{}".format(kernel_build, module.name)
+        module_srcs = _get_kernel_build_module_srcs(module, options, formatter)
+
+        if not module_srcs:
+            continue
+
+        ddk_submodule(
+            name = rule_name,
+            srcs = module_srcs,
+            out = "{}.ko".format(module.name),
+            deps = headers + _get_kernel_build_module_deps(module, options, formatter),
+            local_defines = options.keys(),
+        )
+
+        all_module_rules.append(rule_name)
+
+    ddk_module(
+        name = "{}_modules".format(kernel_build),
+        kernel_build = kernel_build_label,
+        deps = all_module_rules,
+    )
+
+    copy_to_dist_dir(
+        name = "{}_modules_dist".format(kernel_build),
+        data = [":{}_modules".format(kernel_build)],
+        dist_dir = "out/target/product/{}/dlkm/lib/modules/".format(kernel_build),
+        flat = True,
+        wipe_dist_dir = False,
+        allow_duplicate_filenames = False,
+        mode_overrides = {"**/*": "644"},
+    )
+
+def define_consolidate_gki_modules(target, registry, modules, config_options = []):
+    define_target_variant_modules(target, "consolidate", registry, modules, config_options)
+    define_target_variant_modules(target, "gki", registry, modules, config_options)

+ 33 - 0
qcom/opensource/synx-kernel/synx_modules.bzl

@@ -0,0 +1,33 @@
+load(":synx_module_build.bzl", "create_module_registry")
+
+SYNX_KERNEL_ROOT = "synx-kernel"
+
+synx_modules = create_module_registry([":synx_headers"])
+register_synx_module = synx_modules.register
+
+register_synx_module(
+    name = "synx-driver",
+    path = "msm",
+    srcs = [
+        "synx/synx.c",
+        "synx/synx_global.c",
+        "synx/synx_util.c",
+        "synx/synx_debugfs.c",
+        "synx/synx_debugfs_util.c",
+    ],
+)
+
+register_synx_module(
+    name = "ipclite",
+    path = "msm",
+    srcs = [
+        "synx/ipclite.c",
+    ],
+)
+register_synx_module(
+    name = "ipclite_test",
+    path = "msm",
+    srcs = [
+        "synx/test/ipclite_test.c",
+    ],
+)