Browse Source

msm: synx: build script as DLKM for Vendor SI

added mk and Kbuild script to support building synx driver as external module for Vendor SI

Change-Id: Ib66325d115ca46e6b61de1e168e85d09419f73e2
Signed-off-by: NITIN LAXMIDAS NAIK <[email protected]>
NITIN LAXMIDAS NAIK 3 years ago
parent
commit
25cb61693a

+ 5 - 0
Android.bp

@@ -0,0 +1,5 @@
+cc_library_headers {
+    name: "qti_synx_kernel_headers",
+    export_include_dirs: ["include/uapi/synx/media"],
+    vendor_available: true
+}

+ 53 - 0
Android.mk

@@ -0,0 +1,53 @@
+TARGET_SYNX_ENABLE := false
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true)
+	ifeq ($(TARGET_KERNEL_DLKM_SYNX_OVERRIDE),true)
+		TARGET_SYNX_ENABLE := true
+	endif
+else
+TARGET_SYNX_ENABLE := true
+endif
+
+ifeq ($(TARGET_SYNX_ENABLE),true)
+SYNX_BLD_DIR := $(TOP)/vendor/qcom/opensource/synx-kernel
+
+
+# Build synx-driver.ko
+###########################################################
+# This is set once per LOCAL_PATH, not per (kernel) module
+KBUILD_OPTIONS := SYNX_ROOT=$(SYNX_BLD_DIR)
+KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
+###########################################################
+
+DLKM_DIR   := $(TOP)/device/qcom/common/dlkm
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+# For incremental compilation
+LOCAL_SRC_FILES           := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+LOCAL_MODULE              := synx-driver-symvers
+LOCAL_MODULE_KBUILD_NAME  := Module.symvers
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+# Include kp_module.ko in the /vendor/lib/modules (vendor.img)
+# BOARD_VENDOR_KERNEL_MODULES += $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE)
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+
+include $(CLEAR_VARS)
+# For incremental compilation
+LOCAL_SRC_FILES   := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+$(info LOCAL_SRC_FILES = $(LOCAL_SRC_FILES))
+LOCAL_MODULE      := synx-driver.ko
+LOCAL_MODULE_KBUILD_NAME := msm/synx-driver.ko
+LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
+
+# print out variables
+$(info KBUILD_OPTIONS = $(KBUILD_OPTIONS))
+$(info LOCAL_ADDITIONAL_DEPENDENCY = $(LOCAL_ADDITIONAL_DEPENDENCY))
+$(info LOCAL_ADDITIONAL_DEPENDENCIES = $(LOCAL_ADDITIONAL_DEPENDENCIES))
+$(info LOCAL_REQUIRED_MODULES = $(LOCAL_REQUIRED_MODULES))
+$(info DLKM_DIR = $(DLKM_DIR))
+
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+
+
+endif # End of check for TARGET_SYNX_ENABLE

+ 6 - 0
Kbuild

@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+CONFIG_BUILD_VENDORSI := true
+
+# auto-detect subdirs
+obj-y +=msm/

+ 11 - 0
Makefile

@@ -0,0 +1,11 @@
+KBUILD_OPTIONS+= SYNX_ROOT=$(KERNEL_SRC)/$(M)
+
+all:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS)
+modules_install:
+	$(MAKE) M=$(M) -C $(KERNEL_SRC) modules_install
+%:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
+clean:
+	rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
+	rm -rf .tmp_versions

+ 5 - 0
config/pineapplesynx.conf

@@ -0,0 +1,5 @@
+ifeq ($(CONFIG_QGKI),y)
+export TARGET_SYNX_ENABLE=y
+else
+export TARGET_SYNX_ENABLE=m
+endif

+ 6 - 0
config/pineapplesynxconf.h

@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define TARGET_SYNX_ENABLE 1

+ 326 - 0
include/uapi/synx/media/synx_header.h

@@ -0,0 +1,326 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __UAPI_SYNX_H__
+#define __UAPI_SYNX_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* Size of opaque payload sent to kernel for safekeeping until signal time */
+#define SYNX_USER_PAYLOAD_SIZE               4
+
+#define SYNX_MAX_WAITING_SYNX                16
+
+#define SYNX_CALLBACK_RESULT_SUCCESS         2
+#define SYNX_CALLBACK_RESULT_FAILED          3
+#define SYNX_CALLBACK_RESULT_CANCELED        4
+
+/**
+ * struct synx_info - Sync object creation information
+ *
+ * @name     : Optional string representation of the synx object
+ * @synx_obj : Sync object returned after creation in kernel
+ */
+struct synx_info {
+	char name[64];
+	__s32 synx_obj;
+};
+
+/**
+ * struct synx_userpayload_info - Payload info from user space
+ *
+ * @synx_obj:   Sync object for which payload has to be registered for
+ * @reserved:   Reserved
+ * @payload:    Pointer to user payload
+ */
+struct synx_userpayload_info {
+	__s32 synx_obj;
+	__u32 reserved;
+	__u64 payload[SYNX_USER_PAYLOAD_SIZE];
+};
+
+/**
+ * struct synx_signal - Sync object signaling struct
+ *
+ * @synx_obj   : Sync object to be signaled
+ * @synx_state : State of the synx object to which it should be signaled
+ */
+struct synx_signal {
+	__s32 synx_obj;
+	__u32 synx_state;
+};
+
+/**
+ * struct synx_merge - Merge information for synx objects
+ *
+ * @synx_objs :  Pointer to synx object array to merge
+ * @num_objs  :  Number of objects in the array
+ * @merged    :  Merged synx object
+ */
+struct synx_merge {
+	__u64 synx_objs;
+	__u32 num_objs;
+	__s32 merged;
+};
+
+/**
+ * struct synx_wait - Sync object wait information
+ *
+ * @synx_obj   : Sync object to wait on
+ * @reserved   : Reserved
+ * @timeout_ms : Timeout in milliseconds
+ */
+struct synx_wait {
+	__s32 synx_obj;
+	__u32 reserved;
+	__u64 timeout_ms;
+};
+
+/**
+ * struct synx_external_desc - info of external sync object
+ *
+ * @type     : Synx type
+ * @reserved : Reserved
+ * @id       : Sync object id
+ *
+ */
+struct synx_external_desc {
+	__u32 type;
+	__u32 reserved;
+	__s32 id[2];
+};
+
+/**
+ * struct synx_bind - info for binding two synx objects
+ *
+ * @synx_obj      : Synx object
+ * @Reserved      : Reserved
+ * @ext_sync_desc : External synx to bind to
+ *
+ */
+struct synx_bind {
+	__s32 synx_obj;
+	__u32 reserved;
+	struct synx_external_desc ext_sync_desc;
+};
+
+/**
+ * struct synx_addrefcount - info for refcount increment
+ *
+ * @synx_obj : Synx object
+ * @count    : Count to increment
+ *
+ */
+struct synx_addrefcount {
+	__s32 synx_obj;
+	__u32 count;
+};
+
+/**
+ * struct synx_id_info - info for import and export of a synx object
+ *
+ * @synx_obj     : Synx object to be exported
+ * @secure_key   : Secure key created in export and used in import
+ * @new_synx_obj : Synx object created in import
+ *
+ */
+struct synx_id_info {
+	__s32 synx_obj;
+	__u32 secure_key;
+	__s32 new_synx_obj;
+	__u32 padding;
+};
+
+/**
+ * struct synx_fence_desc - info of external fence object
+ *
+ * @type     : Fence type
+ * @reserved : Reserved
+ * @id       : Fence object id
+ *
+ */
+struct synx_fence_desc {
+	__u32 type;
+	__u32 reserved;
+	__s32 id[2];
+};
+
+/**
+ * struct synx_create - Sync object creation information
+ *
+ * @name     : Optional string representation of the synx object
+ * @synx_obj : Synx object allocated
+ * @flags    : Create flags
+ * @desc     : External fence desc
+ */
+struct synx_create_v2 {
+	char name[64];
+	__u32 synx_obj;
+	__u32 flags;
+	struct synx_fence_desc desc;
+};
+
+/**
+ * struct synx_userpayload_info - Payload info from user space
+ *
+ * @synx_obj  : Sync object for which payload has to be registered for
+ * @reserved  : Reserved
+ * @payload   : Pointer to user payload
+ */
+struct synx_userpayload_info_v2 {
+	__u32 synx_obj;
+	__u32 reserved;
+	__u64 payload[SYNX_USER_PAYLOAD_SIZE];
+};
+
+/**
+ * struct synx_signal - Sync object signaling struct
+ *
+ * @synx_obj   : Sync object to be signaled
+ * @synx_state : State of the synx object to which it should be signaled
+ * @reserved   : Reserved
+ */
+struct synx_signal_v2 {
+	__u32 synx_obj;
+	__u32 synx_state;
+	__u64 reserved;
+};
+
+/**
+ * struct synx_merge - Merge information for synx objects
+ *
+ * @synx_objs :  Pointer to synx object array to merge
+ * @num_objs  :  Number of objects in the array
+ * @merged    :  Merged synx object
+ * @flags     :  Merge flags
+ * @reserved  :  Reserved
+ */
+struct synx_merge_v2 {
+	__u64 synx_objs;
+	__u32 num_objs;
+	__u32 merged;
+	__u32 flags;
+	__u32 reserved;
+};
+
+/**
+ * struct synx_wait - Sync object wait information
+ *
+ * @synx_obj   : Sync object to wait on
+ * @reserved   : Reserved
+ * @timeout_ms : Timeout in milliseconds
+ */
+struct synx_wait_v2 {
+	__u32 synx_obj;
+	__u32 reserved;
+	__u64 timeout_ms;
+};
+
+/**
+ * struct synx_external_desc - info of external sync object
+ *
+ * @type     : Synx type
+ * @reserved : Reserved
+ * @id       : Sync object id
+ *
+ */
+struct synx_external_desc_v2 {
+	__u64 id;
+	__u32 type;
+	__u32 reserved;
+};
+
+/**
+ * struct synx_bind - info for binding two synx objects
+ *
+ * @synx_obj      : Synx object
+ * @Reserved      : Reserved
+ * @ext_sync_desc : External synx to bind to
+ *
+ */
+struct synx_bind_v2 {
+	__u32 synx_obj;
+	__u32 reserved;
+	struct synx_external_desc_v2 ext_sync_desc;
+};
+
+/**
+ * struct synx_import_info - import info
+ *
+ * @synx_obj     : Synx handle to be imported
+ * @flags        : Import flags
+ * @new_synx_obj : Synx object created in import
+ * @reserved     : Reserved
+ * @desc         : External fence descriptor
+ */
+struct synx_import_info {
+	__u32 synx_obj;
+	__u32 flags;
+	__u32 new_synx_obj;
+	__u32 reserved;
+	struct synx_fence_desc desc;
+};
+
+/**
+ * struct synx_import_arr_info - import list info
+ *
+ * @list     : List of synx_import_info
+ * @num_objs : No of fences to import
+ */
+struct synx_import_arr_info {
+	__u64 list;
+	__u32 num_objs;
+};
+
+/**
+ * struct synx_fence_fd - get fd for synx fence
+ *
+ * @synx_obj : Synx handle
+ * @fd       : fd for synx handle fence
+ */
+struct synx_fence_fd {
+	__u32 synx_obj;
+	__s32 fd;
+};
+
+/**
+ * struct synx_private_ioctl_arg - Sync driver ioctl argument
+ *
+ * @id        : IOCTL command id
+ * @size      : Size of command payload
+ * @result    : Result of command execution
+ * @reserved  : Reserved
+ * @ioctl_ptr : Pointer to user data
+ */
+struct synx_private_ioctl_arg {
+	__u32 id;
+	__u32 size;
+	__u32 result;
+	__u32 reserved;
+	__u64 ioctl_ptr;
+};
+
+#define SYNX_PRIVATE_MAGIC_NUM 's'
+
+#define SYNX_PRIVATE_IOCTL_CMD \
+	_IOWR(SYNX_PRIVATE_MAGIC_NUM, 130, struct synx_private_ioctl_arg)
+
+#define SYNX_CREATE                          0
+#define SYNX_RELEASE                         1
+#define SYNX_SIGNAL                          2
+#define SYNX_MERGE                           3
+#define SYNX_REGISTER_PAYLOAD                4
+#define SYNX_DEREGISTER_PAYLOAD              5
+#define SYNX_WAIT                            6
+#define SYNX_BIND                            7
+#define SYNX_ADDREFCOUNT                     8
+#define SYNX_GETSTATUS                       9
+#define SYNX_IMPORT                          10
+#define SYNX_EXPORT                          11
+#define SYNX_IMPORT_ARR                      12
+#define SYNX_GETFENCE_FD                     13
+
+#endif /* __UAPI_SYNX_H__ */

+ 37 - 0
msm/Kbuild

@@ -0,0 +1,37 @@
+LINUXINCLUDE    += -I$(SYNX_ROOT)/include \
+                   -I$(SYNX_ROOT)/include/uapi \
+				   -I$(SYNX_ROOT)/include/uapi/synx/media
+
+ccflags-y += -I$(SYNX_ROOT)/msm/synx/
+
+# add flag to compile mmrm actual implementatio instead of stub version.
+# to follow up with mmrm team if techpack users need to define this for long term?
+#KBUILD_CPPFLAGS += -DCONFIG_MSM_MMRM
+
+# ported from Android.mk
+$(info within KBUILD file KBUILD_EXTRA_SYMBOLS = $(KBUILD_EXTRA_SYMBOLS))
+
+ifeq ($(CONFIG_ARCH_WAIPIO), y)
+$(info within KBUILD file CONFIG_ARCH_WAIPIO = $(CONFIG_ARCH_WAIPIO))
+# include $(SYNX_ROOT)/config/waipio.mk
+KBUILD_CPPFLAGS += -DCONFIG_SYNX_WAIPIO=1
+ccflags-y += -DCONFIG_SYNX_WAIPIO=1
+endif
+
+ifeq ($(CONFIG_ARCH_KALAMA), y)
+$(info within KBUILD file CONFIG_ARCH_KALAMA = $(CONFIG_ARCH_KALAMA))
+# include $(SYNX_ROOT)/config/waipio.mk
+KBUILD_CPPFLAGS += -DCONFIG_SYNX_KALAMA=1
+ccflags-y += -DCONFIG_SYNX_KALAMA=1
+endif
+
+ifeq ($(CONFIG_ARCH_PINEAPPLE), y)
+$(info within KBUILD file CONFIG_ARCH_PINEAPPLE = $(CONFIG_ARCH_PINEAPPLE))
+# include $(SYNX_ROOT)/config/pineapple.mk
+KBUILD_CPPFLAGS += -DCONFIG_SYNX_PINEAPPLE=1
+ccflags-y += -DCONFIG_SYNX_PINEAPPLE=1
+endif
+
+obj-m += synx-driver.o
+obj-m += synx/ipclite.o
+synx-driver-objs := synx/synx.o synx/synx_global.o synx/synx_util.o synx/synx_debugfs.o

+ 5 - 0
msm/Makefile

@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-m += synx/ipclite.o
+obj-m += synx-driver.o
+synx-driver-objs := synx/synx.o synx/synx_util.o synx/synx_debugfs.o synx/synx_global.o

+ 1030 - 0
msm/synx/ipclite.c

@@ -0,0 +1,1030 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/platform_device.h>
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <asm/memory.h>
+#include <linux/sizes.h>
+
+#include <linux/hwspinlock.h>
+#include <soc/qcom/secure_buffer.h>
+
+#include "ipclite_client.h"
+#include "ipclite.h"
+
+#define VMID_HLOS       3
+#define VMID_SSC_Q6     5
+#define VMID_ADSP_Q6    6
+#define VMID_CDSP       30
+#define GLOBAL_ATOMICS_ENABLED	1
+#define GLOBAL_ATOMICS_DISABLED	0
+
+static struct ipclite_info *ipclite;
+static struct ipclite_client synx_client;
+static struct ipclite_client test_client;
+struct ipclite_hw_mutex_ops *ipclite_hw_mutex;
+
+u32 global_atomic_support = GLOBAL_ATOMICS_ENABLED;
+
+#define FIFO_FULL_RESERVE 8
+#define FIFO_ALIGNMENT 8
+
+void ipclite_hwlock_reset(enum ipcmem_host_type core_id)
+{
+	/* verify and reset the hw mutex lock */
+	if (core_id == ipclite->ipcmem.toc->global_atomic_hwlock_owner) {
+		ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
+		hwspin_unlock_raw(ipclite->hwlock);
+	}
+}
+EXPORT_SYMBOL(ipclite_hwlock_reset);
+
+static void ipclite_hw_mutex_acquire(void)
+{
+	int32_t ret;
+
+	if (ipclite != NULL) {
+		if (!ipclite->ipcmem.toc->ipclite_features.global_atomic_support) {
+			ret = hwspin_lock_timeout_irqsave(ipclite->hwlock,
+					HWSPINLOCK_TIMEOUT,
+					&ipclite->ipclite_hw_mutex->flags);
+			if (ret)
+				pr_err("Hw mutex lock acquire failed\n");
+
+			ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_APPS;
+
+			pr_debug("Hw mutex lock acquired\n");
+		}
+	}
+}
+
+static void ipclite_hw_mutex_release(void)
+{
+	if (ipclite != NULL) {
+		if (!ipclite->ipcmem.toc->ipclite_features.global_atomic_support) {
+			ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
+			hwspin_unlock_irqrestore(ipclite->hwlock,
+				&ipclite->ipclite_hw_mutex->flags);
+			pr_debug("Hw mutex lock release\n");
+		}
+	}
+}
+
+void ipclite_atomic_init_u32(ipclite_atomic_uint32_t *addr, uint32_t data)
+{
+	atomic_set(addr, data);
+	pr_debug("%s new_val = %d\n", __func__, (*(uint32_t *)addr));
+}
+EXPORT_SYMBOL(ipclite_atomic_init_u32);
+
+void ipclite_atomic_init_i32(ipclite_atomic_int32_t *addr, int32_t data)
+{
+	atomic_set(addr, data);
+	pr_debug("%s new_val = %d\n", __func__, (*(int32_t *)addr));
+}
+EXPORT_SYMBOL(ipclite_atomic_init_i32);
+
+void ipclite_global_atomic_store_u32(ipclite_atomic_uint32_t *addr, uint32_t data)
+{
+	/* callback to acquire hw mutex lock if atomic support is not enabled */
+	ipclite->ipclite_hw_mutex->acquire();
+
+	atomic_set(addr, data);
+	pr_debug("%s new_val = %d\n", __func__, (*(uint32_t *)addr));
+
+	/* callback to release hw mutex lock if atomic support is not enabled */
+	ipclite->ipclite_hw_mutex->release();
+}
+EXPORT_SYMBOL(ipclite_global_atomic_store_u32);
+
+void ipclite_global_atomic_store_i32(ipclite_atomic_int32_t *addr, int32_t data)
+{
+	/* callback to acquire hw mutex lock if atomic support is not enabled */
+	ipclite->ipclite_hw_mutex->acquire();
+
+	atomic_set(addr, data);
+	pr_debug("%s new_val = %d\n", __func__, (*(int32_t *)addr));
+
+	/* callback to release hw mutex lock if atomic support is not enabled */
+	ipclite->ipclite_hw_mutex->release();
+}
+EXPORT_SYMBOL(ipclite_global_atomic_store_i32);
+
+uint32_t ipclite_global_atomic_load_u32(ipclite_atomic_uint32_t *addr)
+{
+	uint32_t ret;
+
+	/* callback to acquire hw mutex lock if atomic support is not enabled */
+	ipclite->ipclite_hw_mutex->acquire();
+
+	ret = atomic_read(addr);
+	pr_debug("%s ret = %d, new_val = %d\n", __func__,  ret, (*(uint32_t *)addr));
+
+	/* callback to release hw mutex lock if atomic support is not enabled */
+	ipclite->ipclite_hw_mutex->release();
+
+	return ret;
+}
+EXPORT_SYMBOL(ipclite_global_atomic_load_u32);
+
+int32_t ipclite_global_atomic_load_i32(ipclite_atomic_int32_t *addr)
+{
+	int32_t ret;
+
+	/* callback to acquire hw mutex lock if atomic support is not enabled */
+	ipclite->ipclite_hw_mutex->acquire();
+
+	ret = atomic_read(addr);
+	pr_debug("%s ret = %d, new_val = %d\n", __func__, ret, (*(int32_t *)addr));
+
+	/* callback to release hw mutex lock if atomic support is not enabled */
+	ipclite->ipclite_hw_mutex->release();
+
+	return ret;
+}
+EXPORT_SYMBOL(ipclite_global_atomic_load_i32);
+
+uint32_t ipclite_global_test_and_set_bit(uint32_t nr, ipclite_atomic_uint32_t *addr)
+{
+	uint32_t ret;
+	uint32_t mask = (1 << nr);
+
+	/* callback to acquire hw mutex lock if atomic support is not enabled */
+	ipclite->ipclite_hw_mutex->acquire();
+
+	ret = atomic_fetch_or(mask, addr);
+	pr_debug("%s ret = %d, new_val = %d\n", __func__, ret, (*(uint32_t *)addr));
+
+	/* callback to release hw mutex lock if atomic support is not enabled */
+	ipclite->ipclite_hw_mutex->release();
+
+	return ret;
+}
+EXPORT_SYMBOL(ipclite_global_test_and_set_bit);
+
+uint32_t ipclite_global_test_and_clear_bit(uint32_t nr, ipclite_atomic_uint32_t *addr)
+{
+	uint32_t ret;
+	uint32_t mask = (1 << nr);
+
+	/* callback to acquire hw mutex lock if atomic support is not enabled */
+	ipclite->ipclite_hw_mutex->acquire();
+
+	ret = atomic_fetch_and(~mask, addr);
+	pr_debug("%s ret = %d, new_val = %d\n", __func__, ret, (*(uint32_t *)addr));
+
+	/* callback to release hw mutex lock if atomic support is not enabled */
+	ipclite->ipclite_hw_mutex->release();
+
+	return ret;
+}
+EXPORT_SYMBOL(ipclite_global_test_and_clear_bit);
+
+int32_t ipclite_global_atomic_inc(ipclite_atomic_int32_t *addr)
+{
+	int32_t ret = 0;
+
+	/* callback to acquire hw mutex lock if atomic support is not enabled */
+	ipclite->ipclite_hw_mutex->acquire();
+
+	ret = atomic_fetch_add(1, addr);
+	pr_debug("%s ret = %d new_val = %d\n", __func__, ret, (*(int32_t *)addr));
+
+	/* callback to release hw mutex lock if atomic support is not enabled */
+	ipclite->ipclite_hw_mutex->release();
+
+	return ret;
+}
+EXPORT_SYMBOL(ipclite_global_atomic_inc);
+
+int32_t ipclite_global_atomic_dec(ipclite_atomic_int32_t *addr)
+{
+	int32_t ret = 0;
+
+	/* callback to acquire hw mutex lock if atomic support is not enabled */
+	ipclite->ipclite_hw_mutex->acquire();
+
+	ret = atomic_fetch_sub(1, addr);
+	pr_debug("%s ret = %d new_val = %d\n", __func__, ret, (*(int32_t *)addr));
+
+	/* callback to release hw mutex lock if atomic support is not enabled */
+	ipclite->ipclite_hw_mutex->release();
+
+	return ret;
+}
+EXPORT_SYMBOL(ipclite_global_atomic_dec);
+
+static size_t ipcmem_rx_avail(struct ipclite_fifo *rx_fifo)
+{
+	size_t len;
+	u32 head;
+	u32 tail;
+
+	head = le32_to_cpu(*rx_fifo->head);
+	tail = le32_to_cpu(*rx_fifo->tail);
+	pr_debug("head=%d, tail=%d\n", head, tail);
+	if (head < tail)
+		len = rx_fifo->length - tail + head;
+	else
+		len = head - tail;
+
+	if (WARN_ON_ONCE(len > rx_fifo->length))
+		len = 0;
+	pr_debug("len=%d\n", len);
+	return len;
+}
+
+static void ipcmem_rx_peak(struct ipclite_fifo *rx_fifo,
+			       void *data, size_t count)
+{
+	size_t len;
+	u32 tail;
+
+	tail = le32_to_cpu(*rx_fifo->tail);
+
+	if (WARN_ON_ONCE(tail > rx_fifo->length))
+		return;
+
+	if (tail >= rx_fifo->length)
+		tail -= rx_fifo->length;
+
+	len = min_t(size_t, count, rx_fifo->length - tail);
+	if (len)
+		memcpy_fromio(data, rx_fifo->fifo + tail, len);
+
+	if (len != count)
+		memcpy_fromio(data + len, rx_fifo->fifo, (count - len));
+}
+
+static void ipcmem_rx_advance(struct ipclite_fifo *rx_fifo,
+				  size_t count)
+{
+	u32 tail;
+
+	tail = le32_to_cpu(*rx_fifo->tail);
+
+	tail += count;
+	if (tail >= rx_fifo->length)
+		tail %= rx_fifo->length;
+
+	*rx_fifo->tail = cpu_to_le32(tail);
+}
+
+static size_t ipcmem_tx_avail(struct ipclite_fifo *tx_fifo)
+{
+	u32 head;
+	u32 tail;
+	u32 avail;
+
+	head = le32_to_cpu(*tx_fifo->head);
+	tail = le32_to_cpu(*tx_fifo->tail);
+
+	if (tail <= head)
+		avail = tx_fifo->length - head + tail;
+	else
+		avail = tail - head;
+
+	if (avail < FIFO_FULL_RESERVE)
+		avail = 0;
+	else
+		avail -= FIFO_FULL_RESERVE;
+
+	if (WARN_ON_ONCE(avail > tx_fifo->length))
+		avail = 0;
+
+	return avail;
+}
+
+static unsigned int ipcmem_tx_write_one(struct ipclite_fifo *tx_fifo,
+					    unsigned int head,
+					    const void *data, size_t count)
+{
+	size_t len;
+
+	if (WARN_ON_ONCE(head > tx_fifo->length))
+		return head;
+
+	len = min_t(size_t, count, tx_fifo->length - head);
+	if (len)
+		memcpy(tx_fifo->fifo + head, data, len);
+
+	if (len != count)
+		memcpy(tx_fifo->fifo, data + len, count - len);
+
+	head += count;
+	if (head >= tx_fifo->length)
+		head -= tx_fifo->length;
+
+	return head;
+}
+
+static void ipcmem_tx_write(struct ipclite_fifo *tx_fifo,
+				const void *data, size_t dlen)
+{
+	unsigned int head;
+
+	head = le32_to_cpu(*tx_fifo->head);
+	head = ipcmem_tx_write_one(tx_fifo, head, data, dlen);
+
+	head = ALIGN(head, 8);
+	if (head >= tx_fifo->length)
+		head -= tx_fifo->length;
+
+	/* Ensure ordering of fifo and head update */
+	wmb();
+
+	*tx_fifo->head = cpu_to_le32(head);
+	pr_debug("head = %d\n", *tx_fifo->head);
+}
+
+static size_t ipclite_rx_avail(struct ipclite_channel *channel)
+{
+	return channel->rx_fifo->avail(channel->rx_fifo);
+}
+
+static void ipclite_rx_peak(struct ipclite_channel *channel,
+			       void *data, size_t count)
+{
+	channel->rx_fifo->peak(channel->rx_fifo, data, count);
+}
+
+static void ipclite_rx_advance(struct ipclite_channel *channel,
+					size_t count)
+{
+	channel->rx_fifo->advance(channel->rx_fifo, count);
+}
+
+static size_t ipclite_tx_avail(struct ipclite_channel *channel)
+{
+	return channel->tx_fifo->avail(channel->tx_fifo);
+}
+
+static void ipclite_tx_write(struct ipclite_channel *channel,
+				const void *data, size_t dlen)
+{
+	channel->tx_fifo->write(channel->tx_fifo, data, dlen);
+}
+
+static int ipclite_rx_data(struct ipclite_channel *channel, size_t avail)
+{
+	uint64_t data;
+	int ret = 0;
+
+	if (avail < sizeof(data)) {
+		pr_err("Not enough data in fifo\n");
+		return -EAGAIN;
+	}
+
+	ipclite_rx_peak(channel, &data, sizeof(data));
+
+	if (synx_client.reg_complete == 1) {
+		if (synx_client.callback)
+			synx_client.callback(channel->remote_pid, data,
+								synx_client.priv_data);
+	}
+	ipclite_rx_advance(channel, ALIGN(sizeof(data), 8));
+	return ret;
+}
+
+static int ipclite_rx_test_data(struct ipclite_channel *channel, size_t avail)
+{
+	uint64_t data;
+	int ret = 0;
+
+	if (avail < sizeof(data)) {
+		pr_err("Not enough data in fifo\n");
+		return -EAGAIN;
+	}
+
+	ipclite_rx_peak(channel, &data, sizeof(data));
+
+	if (test_client.reg_complete == 1) {
+		if (test_client.callback)
+			test_client.callback(channel->remote_pid, data,
+								test_client.priv_data);
+	}
+	ipclite_rx_advance(channel, ALIGN(sizeof(data), 8));
+	return ret;
+}
+
+static irqreturn_t ipclite_intr(int irq, void *data)
+{
+	struct ipclite_channel *channel;
+	struct ipclite_irq_info *irq_info;
+	unsigned int avail = 0;
+	int ret = 0;
+	uint64_t msg;
+
+	pr_debug("Interrupt received\n");
+	irq_info = (struct ipclite_irq_info *)data;
+	channel = container_of(irq_info, struct ipclite_channel, irq_info[irq_info->signal_id]);
+
+	if (irq_info->signal_id == IPCLITE_MSG_SIGNAL) {
+		for (;;) {
+			avail = ipclite_rx_avail(channel);
+			if (avail < sizeof(msg))
+				break;
+
+			ret = ipclite_rx_data(channel, avail);
+		}
+		pr_debug("checking messages in rx_fifo done\n");
+	} else if (irq_info->signal_id == IPCLITE_VERSION_SIGNAL) {
+		/* check_version_compatibility();*/
+		pr_debug("version matching sequence completed\n");
+	} else if (irq_info->signal_id == IPCLITE_TEST_SIGNAL) {
+		for (;;) {
+			avail = ipclite_rx_avail(channel);
+			if (avail < sizeof(msg))
+				break;
+
+			ret = ipclite_rx_test_data(channel, avail);
+		}
+		pr_debug("checking messages in rx_fifo done\n");
+	} else {
+		pr_err("wrong interrupt signal received, signal_id =%d\n", irq_info->signal_id);
+	}
+	return IRQ_HANDLED;
+}
+
+static int ipclite_tx(struct ipclite_channel *channel,
+			uint64_t data, size_t dlen, uint32_t ipclite_signal)
+{
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&channel->tx_lock, flags);
+	if (ipclite_tx_avail(channel) < dlen) {
+		spin_unlock_irqrestore(&channel->tx_lock, flags);
+		ret = -EAGAIN;
+		return ret;
+	}
+
+	ipclite_tx_write(channel, &data, dlen);
+
+	mbox_send_message(channel->irq_info[ipclite_signal].mbox_chan, NULL);
+	mbox_client_txdone(channel->irq_info[ipclite_signal].mbox_chan, 0);
+
+	spin_unlock_irqrestore(&channel->tx_lock, flags);
+
+	return ret;
+}
+
+int ipclite_msg_send(int32_t proc_id, uint64_t data)
+{
+	int ret = 0;
+
+	if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
+		pr_err("Invalid proc_id %d\n", proc_id);
+		return -EINVAL;
+	}
+
+	if (ipclite->channel[proc_id].channel_status != ACTIVE_CHANNEL) {
+		pr_err("Cannot send msg to remote client. Channel inactive\n");
+		return -ENXIO;
+	}
+
+	ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data),
+								IPCLITE_MSG_SIGNAL);
+	pr_debug("Message send completed with ret=%d\n", ret);
+	return ret;
+}
+EXPORT_SYMBOL(ipclite_msg_send);
+
+int ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv)
+{
+	if (!cb_func_ptr) {
+		pr_err("Invalid callback pointer\n");
+		return -EINVAL;
+	}
+	synx_client.callback = cb_func_ptr;
+	synx_client.priv_data = priv;
+	synx_client.reg_complete = 1;
+	pr_debug("Client Registration completed\n");
+	return 0;
+}
+EXPORT_SYMBOL(ipclite_register_client);
+
+int ipclite_test_msg_send(int32_t proc_id, uint64_t data)
+{
+	int ret = 0;
+
+	if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
+		pr_err("Invalid proc_id %d\n", proc_id);
+		return -EINVAL;
+	}
+
+	/* Limit Message Sending without Client Registration */
+	if (ipclite->channel[proc_id].channel_status != ACTIVE_CHANNEL) {
+		pr_err("Cannot send msg to remote client. Channel inactive\n");
+		return -ENXIO;
+	}
+
+	ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data),
+									IPCLITE_TEST_SIGNAL);
+	pr_debug("Message send completed with ret=%d\n", ret);
+	return ret;
+}
+EXPORT_SYMBOL(ipclite_test_msg_send);
+
+int ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv)
+{
+	if (!cb_func_ptr) {
+		pr_err("Invalid callback pointer\n");
+		return -EINVAL;
+	}
+	test_client.callback = cb_func_ptr;
+	test_client.priv_data = priv;
+	test_client.reg_complete = 1;
+	pr_debug("Test Client Registration Completed\n");
+	return 0;
+}
+EXPORT_SYMBOL(ipclite_register_test_client);
+
+static int map_ipcmem(struct ipclite_info *ipclite, const char *name)
+{
+	struct device *dev;
+	struct device_node *np;
+	struct resource r;
+	int ret = 0;
+
+	dev = ipclite->dev;
+
+	np = of_parse_phandle(dev->of_node, name, 0);
+	if (!np) {
+		pr_err("No %s specified\n", name);
+		return -EINVAL;
+	}
+
+	ret = of_address_to_resource(np, 0, &r);
+	of_node_put(np);
+	if (ret)
+		return ret;
+
+	ipclite->ipcmem.mem.aux_base = (u64)r.start;
+	ipclite->ipcmem.mem.size = resource_size(&r);
+	ipclite->ipcmem.mem.virt_base = devm_ioremap_wc(dev, r.start,
+					resource_size(&r));
+	if (!ipclite->ipcmem.mem.virt_base)
+		return -ENOMEM;
+
+	pr_debug("aux_base = %lx, size=%d,virt_base=%p\n",
+			ipclite->ipcmem.mem.aux_base, ipclite->ipcmem.mem.size,
+			ipclite->ipcmem.mem.virt_base);
+
+	return ret;
+}
+
+static void ipcmem_init(struct ipclite_mem *ipcmem)
+{
+	int host0, host1;
+	int i = 0;
+
+	ipcmem->toc = ipcmem->mem.virt_base;
+	pr_debug("toc_base = %p\n", ipcmem->toc);
+
+	ipcmem->toc->hdr.size = IPCMEM_TOC_SIZE;
+	pr_debug("toc->hdr.size = %d\n", ipcmem->toc->hdr.size);
+
+	/*Fill in global partition details*/
+	ipcmem->toc->toc_entry_global = ipcmem_toc_global_partition_entry;
+	ipcmem->global_partition = (struct ipcmem_global_partition *)
+								((char *)ipcmem->mem.virt_base +
+						ipcmem_toc_global_partition_entry.base_offset);
+
+	pr_debug("base_offset =%x,ipcmem->global_partition = %p\n",
+				ipcmem_toc_global_partition_entry.base_offset,
+				ipcmem->global_partition);
+
+	ipcmem->global_partition->hdr = global_partition_hdr;
+
+	pr_debug("hdr.type = %x,hdr.offset = %x,hdr.size = %d\n",
+				ipcmem->global_partition->hdr.partition_type,
+				ipcmem->global_partition->hdr.region_offset,
+				ipcmem->global_partition->hdr.region_size);
+
+	/* Fill in each IPCMEM TOC entry from ipcmem_toc_partition_entries config*/
+	for (i = 0; i < MAX_PARTITION_COUNT; i++) {
+		host0 = ipcmem_toc_partition_entries[i].host0;
+		host1 = ipcmem_toc_partition_entries[i].host1;
+		pr_debug("host0 = %d, host1=%d\n", host0, host1);
+
+		ipcmem->toc->toc_entry[host0][host1] = ipcmem_toc_partition_entries[i];
+		ipcmem->toc->toc_entry[host1][host0] = ipcmem_toc_partition_entries[i];
+
+		ipcmem->partition[i] = (struct ipcmem_partition *)
+								((char *)ipcmem->mem.virt_base +
+						ipcmem_toc_partition_entries[i].base_offset);
+
+		pr_debug("partition[%d] = %p,partition_base_offset[%d]=%lx\n",
+					i, ipcmem->partition[i],
+					i, ipcmem_toc_partition_entries[i].base_offset);
+
+		if (host0 == host1)
+			ipcmem->partition[i]->hdr = loopback_partition_hdr;
+		else
+			ipcmem->partition[i]->hdr = default_partition_hdr;
+
+		pr_debug("hdr.type = %x,hdr.offset = %x,hdr.size = %d\n",
+					ipcmem->partition[i]->hdr.type,
+					ipcmem->partition[i]->hdr.desc_offset,
+					ipcmem->partition[i]->hdr.desc_size);
+	}
+
+	/*Making sure all writes for ipcmem configurations are completed*/
+	wmb();
+
+	ipcmem->toc->hdr.init_done = IPCMEM_INIT_COMPLETED;
+	pr_debug("Ipcmem init completed\n");
+}
+
+
+/*Add VMIDs corresponding to EVA, CDSP and VPU to set IPCMEM access control*/
+static int set_ipcmem_access_control(struct ipclite_info *ipclite)
+{
+	int ret = 0;
+	int srcVM[1] = {VMID_HLOS};
+	int destVM[2] = {VMID_HLOS, VMID_CDSP};
+	int destVMperm[2] = {PERM_READ | PERM_WRITE,
+				PERM_READ | PERM_WRITE};
+
+	ret = hyp_assign_phys(ipclite->ipcmem.mem.aux_base,
+				ipclite->ipcmem.mem.size, srcVM, 1,
+				destVM, destVMperm, 2);
+	return ret;
+}
+
+static int ipclite_channel_irq_init(struct device *parent, struct device_node *node,
+								struct ipclite_channel *channel)
+{
+	int ret = 0;
+	u32 index;
+	char strs[4][9] = {"msg", "mem-init", "version", "test"};
+	struct ipclite_irq_info *irq_info;
+	struct device *dev;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->parent = parent;
+	dev->of_node = node;
+	dev_set_name(dev, "%s:%pOFn", dev_name(parent->parent), node);
+	pr_debug("Registering %s device\n", dev_name(parent->parent));
+	ret = device_register(dev);
+	if (ret) {
+		pr_err("failed to register ipclite child node\n");
+		put_device(dev);
+		return ret;
+	}
+
+	ret = of_property_read_u32(dev->of_node, "index",
+				   &index);
+	if (ret) {
+		pr_err("failed to parse index\n");
+		goto err_dev;
+	}
+
+	irq_info = &channel->irq_info[index];
+	pr_debug("irq_info[%d]=%p\n", index, irq_info);
+
+	irq_info->mbox_client.dev = dev;
+	irq_info->mbox_client.knows_txdone = true;
+	irq_info->mbox_chan = mbox_request_channel(&irq_info->mbox_client, 0);
+	pr_debug("irq_info[%d].mbox_chan=%p\n", index, irq_info->mbox_chan);
+	if (IS_ERR(irq_info->mbox_chan)) {
+		if (PTR_ERR(irq_info->mbox_chan) != -EPROBE_DEFER)
+			pr_err("failed to acquire IPC channel\n");
+		goto err_dev;
+	}
+
+	snprintf(irq_info->irqname, 32, "ipclite-signal-%s", strs[index]);
+	irq_info->irq = of_irq_get(dev->of_node, 0);
+	pr_debug("irq[%d] = %d\n", index, irq_info->irq);
+	irq_info->signal_id = index;
+	ret = devm_request_irq(dev, irq_info->irq,
+			       ipclite_intr,
+			       IRQF_NO_SUSPEND | IRQF_SHARED,
+			       irq_info->irqname, irq_info);
+	if (ret) {
+		pr_err("failed to request IRQ\n");
+		goto err_dev;
+	}
+	pr_debug("Interrupt init completed, ret = %d\n", ret);
+	return 0;
+
+err_dev:
+	device_unregister(dev);
+	kfree(dev);
+	return ret;
+}
+
+int32_t get_global_partition_info(struct global_region_info *global_ipcmem)
+{
+	struct ipcmem_global_partition *global_partition;
+
+	if (!global_ipcmem)
+		return -EINVAL;
+
+	global_partition = ipclite->ipcmem.global_partition;
+	global_ipcmem->virt_base = (void *)((char *)global_partition +
+							global_partition->hdr.region_offset);
+	global_ipcmem->size = (size_t)(global_partition->hdr.region_size);
+
+	pr_debug("base = %p, size=%lx\n", global_ipcmem->virt_base,
+									global_ipcmem->size);
+	return 0;
+}
+EXPORT_SYMBOL(get_global_partition_info);
+
+static struct ipcmem_partition_header *get_ipcmem_partition_hdr(struct ipclite_mem ipcmem, int local_pid,
+								int remote_pid)
+{
+	return (struct ipcmem_partition_header *)((char *)ipcmem.mem.virt_base +
+				ipcmem.toc->toc_entry[local_pid][remote_pid].base_offset);
+}
+
+static void ipclite_channel_release(struct device *dev)
+{
+	pr_info("Releasing ipclite channel\n");
+	kfree(dev);
+}
+
+/* Sets up following fields of IPCLite channel structure:
+ *	remote_pid,tx_fifo, rx_fifo
+ */
+static int ipclite_channel_init(struct device *parent,
+								struct device_node *node)
+{
+	struct ipclite_fifo *rx_fifo;
+	struct ipclite_fifo *tx_fifo;
+
+	struct device *dev;
+	u32 local_pid, remote_pid, global_atomic;
+	u32 *descs;
+	int ret = 0;
+
+	struct device_node *child;
+
+	struct ipcmem_partition_header *partition_hdr;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->parent = parent;
+	dev->of_node = node;
+	dev->release = ipclite_channel_release;
+	dev_set_name(dev, "%s:%pOFn", dev_name(parent->parent), node);
+	pr_debug("Registering %s device\n", dev_name(parent->parent));
+	ret = device_register(dev);
+	if (ret) {
+		pr_err("failed to register ipclite device\n");
+		put_device(dev);
+		kfree(dev);
+		return ret;
+	}
+
+	local_pid = LOCAL_HOST;
+
+	ret = of_property_read_u32(dev->of_node, "qcom,remote-pid",
+				   &remote_pid);
+	if (ret) {
+		dev_err(dev, "failed to parse qcom,remote-pid\n");
+		goto err_put_dev;
+	}
+	pr_debug("remote_pid = %d, local_pid=%d\n", remote_pid, local_pid);
+
+	ipclite_hw_mutex = devm_kzalloc(dev, sizeof(*ipclite_hw_mutex), GFP_KERNEL);
+	if (!ipclite_hw_mutex) {
+		ret = -ENOMEM;
+		goto err_put_dev;
+	}
+
+	ret = of_property_read_u32(dev->of_node, "global_atomic", &global_atomic);
+	if (ret) {
+		dev_err(dev, "failed to parse global_atomic\n");
+		goto err_put_dev;
+	}
+	if (global_atomic == 0)
+		global_atomic_support = GLOBAL_ATOMICS_DISABLED;
+
+	rx_fifo = devm_kzalloc(dev, sizeof(*rx_fifo), GFP_KERNEL);
+	tx_fifo = devm_kzalloc(dev, sizeof(*tx_fifo), GFP_KERNEL);
+	if (!rx_fifo || !tx_fifo) {
+		ret = -ENOMEM;
+		goto err_put_dev;
+	}
+	pr_debug("rx_fifo = %p, tx_fifo=%p\n", rx_fifo, tx_fifo);
+
+	partition_hdr = get_ipcmem_partition_hdr(ipclite->ipcmem,
+						local_pid, remote_pid);
+	pr_debug("partition_hdr = %p\n", partition_hdr);
+	descs = (u32 *)((char *)partition_hdr + partition_hdr->desc_offset);
+	pr_debug("descs = %p\n", descs);
+
+	if (local_pid < remote_pid) {
+		tx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo0_offset;
+		tx_fifo->length = partition_hdr->fifo0_size;
+		rx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo1_offset;
+		rx_fifo->length = partition_hdr->fifo1_size;
+
+		tx_fifo->tail = &descs[0];
+		tx_fifo->head = &descs[1];
+		rx_fifo->tail = &descs[2];
+		rx_fifo->head = &descs[3];
+
+	} else {
+		tx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo1_offset;
+		tx_fifo->length = partition_hdr->fifo1_size;
+		rx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo0_offset;
+		rx_fifo->length = partition_hdr->fifo0_size;
+
+		rx_fifo->tail = &descs[0];
+		rx_fifo->head = &descs[1];
+		tx_fifo->tail = &descs[2];
+		tx_fifo->head = &descs[3];
+	}
+
+	if (partition_hdr->type == LOOPBACK_PARTITION_TYPE) {
+		rx_fifo->tail = tx_fifo->tail;
+		rx_fifo->head = tx_fifo->head;
+	}
+
+	/* rx_fifo->reset = ipcmem_rx_reset;*/
+	rx_fifo->avail = ipcmem_rx_avail;
+	rx_fifo->peak = ipcmem_rx_peak;
+	rx_fifo->advance = ipcmem_rx_advance;
+
+	/* tx_fifo->reset = ipcmem_tx_reset;*/
+	tx_fifo->avail = ipcmem_tx_avail;
+	tx_fifo->write = ipcmem_tx_write;
+
+	*rx_fifo->tail = 0;
+	*tx_fifo->head = 0;
+
+	/*Store Channel Information*/
+	ipclite->channel[remote_pid].remote_pid = remote_pid;
+	ipclite->channel[remote_pid].tx_fifo = tx_fifo;
+	ipclite->channel[remote_pid].rx_fifo = rx_fifo;
+
+	spin_lock_init(&ipclite->channel[remote_pid].tx_lock);
+
+	for_each_available_child_of_node(dev->of_node, child) {
+		ret = ipclite_channel_irq_init(dev, child,
+				&ipclite->channel[remote_pid]);
+		if (ret) {
+			pr_err("irq setup for ipclite channel failed\n");
+			goto err_put_dev;
+		}
+	}
+	ipclite->channel[remote_pid].channel_status = ACTIVE_CHANNEL;
+	pr_debug("Channel init completed, ret = %d\n", ret);
+	return ret;
+
+err_put_dev:
+	ipclite->channel[remote_pid].channel_status = 0;
+	device_unregister(dev);
+	kfree(dev);
+	return ret;
+}
+
+static void probe_subsystem(struct device *dev, struct device_node *np)
+{
+	int ret = 0;
+
+	ret = ipclite_channel_init(dev, np);
+	if (ret)
+		pr_err("IPCLite Channel init failed\n");
+}
+
+static int ipclite_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	int hwlock_id;
+	struct ipcmem_region *mem;
+	struct device_node *cn;
+	struct device_node *pn = pdev->dev.of_node;
+	struct ipclite_channel broadcast;
+
+	ipclite = kzalloc(sizeof(*ipclite), GFP_KERNEL);
+	if (!ipclite) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	ipclite->dev = &pdev->dev;
+
+	hwlock_id = of_hwspin_lock_get_id(pn, 0);
+	if (hwlock_id < 0) {
+		if (hwlock_id != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "failed to retrieve hwlock\n");
+		ret = hwlock_id;
+		goto error;
+	}
+	pr_debug("Hwlock id retrieved, hwlock_id=%d\n", hwlock_id);
+
+	ipclite->hwlock = hwspin_lock_request_specific(hwlock_id);
+	if (!ipclite->hwlock) {
+		pr_err("Failed to assign hwlock_id\n");
+		ret = -ENXIO;
+		goto error;
+	}
+	pr_debug("Hwlock id assigned successfully, hwlock=%p\n", ipclite->hwlock);
+
+	ret = map_ipcmem(ipclite, "memory-region");
+	if (ret) {
+		pr_err("failed to map ipcmem\n");
+		goto release;
+	}
+	mem = &(ipclite->ipcmem.mem);
+	memset(mem->virt_base, 0, mem->size);
+
+	ret = set_ipcmem_access_control(ipclite);
+	if (ret) {
+		pr_err("failed to set access control policy\n");
+		goto release;
+	}
+
+	ipcmem_init(&ipclite->ipcmem);
+
+	/* Setup Channel for each Remote Subsystem */
+	for_each_available_child_of_node(pn, cn)
+		probe_subsystem(&pdev->dev, cn);
+	/* Broadcast init_done signal to all subsystems once mbox channels
+	 * are set up
+	 */
+	broadcast = ipclite->channel[IPCMEM_APPS];
+	ret = mbox_send_message(broadcast.irq_info[IPCLITE_MEM_INIT_SIGNAL].mbox_chan,
+								 NULL);
+	if (ret < 0)
+		goto mem_release;
+
+	mbox_client_txdone(broadcast.irq_info[IPCLITE_MEM_INIT_SIGNAL].mbox_chan, 0);
+
+	if (global_atomic_support) {
+		ipclite->ipcmem.toc->ipclite_features.global_atomic_support =
+							GLOBAL_ATOMICS_ENABLED;
+	} else {
+		ipclite->ipcmem.toc->ipclite_features.global_atomic_support =
+							GLOBAL_ATOMICS_DISABLED;
+	}
+
+	pr_debug("global_atomic_support : %d\n",
+		ipclite->ipcmem.toc->ipclite_features.global_atomic_support);
+
+	/* hw mutex callbacks */
+	ipclite_hw_mutex->acquire = ipclite_hw_mutex_acquire;
+	ipclite_hw_mutex->release = ipclite_hw_mutex_release;
+
+	/* store to ipclite structure */
+	ipclite->ipclite_hw_mutex = ipclite_hw_mutex;
+
+	/* initialize hwlock owner to invalid host */
+	ipclite->ipcmem.toc->global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
+
+	pr_info("IPCLite probe completed successfully\n");
+	return ret;
+
+mem_release:
+	/* If the remote subsystem has already completed the init and actively
+	 * using IPCMEM, re-assigning IPCMEM memory back to HLOS can lead to crash
+	 * Solution: Either we don't take back the memory or make sure APPS completes
+	 * init before any other subsystem initializes IPCLite (we won't have to send
+	 * braodcast)
+	 */
+release:
+	kfree(ipclite);
+error:
+	pr_err("IPCLite probe failed\n");
+	return ret;
+}
+
+static const struct of_device_id ipclite_of_match[] = {
+	{ .compatible = "qcom,ipclite"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, ipclite_of_match);
+
+static struct platform_driver ipclite_driver = {
+	.probe = ipclite_probe,
+	.driver = {
+		.name = "ipclite",
+		.of_match_table = ipclite_of_match,
+	},
+};
+
+module_platform_driver(ipclite_driver);
+
+MODULE_DESCRIPTION("IPCLite Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_SOFTDEP("pre: qcom_hwspinlock");

+ 321 - 0
msm/synx/ipclite.h

@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved..
+ */
+#include <linux/hwspinlock.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/soc/qcom,ipcc.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+#include "ipclite_client.h"
+
+#define IPCMEM_INIT_COMPLETED	0x1
+#define ACTIVE_CHANNEL			0x1
+
+#define IPCMEM_TOC_SIZE			(4*1024)
+#define MAX_CHANNEL_SIGNALS		4
+
+#define MAX_PARTITION_COUNT		7	/*7 partitions other than global partition*/
+
+#define IPCLITE_MSG_SIGNAL		0
+#define IPCLITE_MEM_INIT_SIGNAL 1
+#define IPCLITE_VERSION_SIGNAL  2
+#define IPCLITE_TEST_SIGNAL		3
+
+/** Flag definitions for the entries */
+#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_READ_PROTECTION   (0x01)
+#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_WRITE_PROTECTION  (0x02)
+#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION \
+		(IPCMEM_TOC_ENTRY_FLAGS_ENABLE_READ_PROTECTION | \
+		IPCMEM_TOC_ENTRY_FLAGS_ENABLE_WRITE_PROTECTION)
+
+#define IPCMEM_TOC_ENTRY_FLAGS_IGNORE_PARTITION         (0x00000004)
+
+/*Hardcoded macro to identify local host on each core*/
+#define LOCAL_HOST		IPCMEM_APPS
+
+/* Timeout (ms) for the trylock of remote spinlocks */
+#define HWSPINLOCK_TIMEOUT	1000
+
+/*IPCMEM Structure Definitions*/
+
+struct ipclite_features {
+	uint32_t global_atomic_support;
+	uint32_t version_finalised;
+};
+
+struct ipcmem_partition_header {
+	uint32_t type;			   /*partition type*/
+	uint32_t desc_offset;      /*descriptor offset*/
+	uint32_t desc_size;        /*descriptor size*/
+	uint32_t fifo0_offset;     /*fifo 0 offset*/
+	uint32_t fifo0_size;       /*fifo 0 size*/
+	uint32_t fifo1_offset;     /*fifo 1 offset*/
+	uint32_t fifo1_size;       /*fifo 1 size*/
+};
+
+struct ipcmem_toc_entry {
+	uint32_t base_offset;	/*partition offset from IPCMEM base*/
+	uint32_t size;			/*partition size*/
+	uint32_t flags;			/*partition flags if required*/
+	uint32_t host0;			/*subsystem 0 who can access this partition*/
+	uint32_t host1;			/*subsystem 1 who can access this partition*/
+	uint32_t status;		/*partition active status*/
+};
+
+struct ipcmem_toc_header {
+	uint32_t size;
+	uint32_t init_done;
+};
+
+struct ipcmem_toc {
+	struct ipcmem_toc_header hdr;
+	struct ipcmem_toc_entry toc_entry_global;
+	struct ipcmem_toc_entry toc_entry[IPCMEM_NUM_HOSTS][IPCMEM_NUM_HOSTS];
+	/* Need to have a better implementation here */
+	/* as ipcmem is 4k and if host number increases */
+	/* it would create problems*/
+	struct ipclite_features ipclite_features;
+	uint32_t global_atomic_hwlock_owner;
+};
+
+struct ipcmem_region {
+	u64 aux_base;
+	void __iomem *virt_base;
+	uint32_t size;
+};
+
+struct ipcmem_partition {
+	struct ipcmem_partition_header hdr;
+};
+
+struct global_partition_header {
+	uint32_t partition_type;
+	uint32_t region_offset;
+	uint32_t region_size;
+};
+
+struct ipcmem_global_partition {
+	struct global_partition_header hdr;
+};
+
+struct ipclite_mem {
+	struct ipcmem_toc *toc;
+	struct ipcmem_region mem;
+	struct ipcmem_global_partition *global_partition;
+	struct ipcmem_partition *partition[MAX_PARTITION_COUNT];
+};
+
+struct ipclite_fifo {
+	uint32_t length;
+
+	__le32 *tail;
+	__le32 *head;
+
+	void *fifo;
+
+	size_t (*avail)(struct ipclite_fifo *fifo);
+
+	void (*peak)(struct ipclite_fifo *fifo,
+			       void *data, size_t count);
+
+	void (*advance)(struct ipclite_fifo *fifo,
+				  size_t count);
+
+	void (*write)(struct ipclite_fifo *fifo,
+				const void *data, size_t dlen);
+
+	void (*reset)(struct ipclite_fifo *fifo);
+};
+
+struct ipclite_hw_mutex_ops {
+	unsigned long flags;
+	void (*acquire)(void);
+	void (*release)(void);
+};
+
+struct ipclite_irq_info {
+	struct mbox_client mbox_client;
+	struct mbox_chan *mbox_chan;
+	int irq;
+	int signal_id;
+	char irqname[32];
+};
+
+struct ipclite_client {
+	IPCLite_Client callback;
+	void *priv_data;
+	int reg_complete;
+};
+
+struct ipclite_channel {
+	uint32_t remote_pid;
+
+	struct ipclite_fifo *tx_fifo;
+	struct ipclite_fifo *rx_fifo;
+	spinlock_t tx_lock;
+
+	struct ipclite_irq_info irq_info[MAX_CHANNEL_SIGNALS];
+
+	struct ipclite_client client;
+
+	uint32_t channel_version;
+	uint32_t version_finalised;
+
+	uint32_t channel_status;
+};
+
+/*Single structure that defines everything about IPCLite*/
+struct ipclite_info {
+	struct device *dev;
+	struct ipclite_channel channel[IPCMEM_NUM_HOSTS];
+	struct ipclite_mem ipcmem;
+	struct hwspinlock *hwlock;
+	struct ipclite_hw_mutex_ops *ipclite_hw_mutex;
+};
+
+const struct ipcmem_toc_entry ipcmem_toc_global_partition_entry = {
+	/* Global partition. */
+	  4 * 1024,
+	  128 * 1024,
+	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
+	  IPCMEM_GLOBAL_HOST,
+	  IPCMEM_GLOBAL_HOST,
+};
+
+const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = {
+	/* Global partition. */
+	/* {
+	 *   4 * 1024,
+	 *   128 * 1024,
+	 *   IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
+	 *   IPCMEM_GLOBAL_HOST,
+	 *   IPCMEM_GLOBAL_HOST,
+	 * },
+	 */
+
+	/* Apps<->CDSP partition. */
+	{
+	  132 * 1024,
+	  32 * 1024,
+	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
+	  IPCMEM_APPS,
+	  IPCMEM_CDSP,
+	  1,
+	},
+	/* APPS<->CVP (EVA) partition. */
+	{
+	  164 * 1024,
+	  32 * 1024,
+	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
+	  IPCMEM_APPS,
+	  IPCMEM_CVP,
+	  1,
+	},
+	/* APPS<->VPU partition. */
+	{
+	  196 * 1024,
+	  32 * 1024,
+	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
+	  IPCMEM_APPS,
+	  IPCMEM_VPU,
+	  1,
+	},
+	/* CDSP<->CVP (EVA) partition. */
+	{
+	  228 * 1024,
+	  32 * 1024,
+	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
+	  IPCMEM_CDSP,
+	  IPCMEM_CVP,
+	  1,
+	},
+	/* CDSP<->VPU partition. */
+	{
+	  260 * 1024,
+	  32 * 1024,
+	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
+	  IPCMEM_CDSP,
+	  IPCMEM_VPU,
+	  1,
+	},
+	/* VPU<->CVP (EVA) partition. */
+	{
+	  292 * 1024,
+	  32 * 1024,
+	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
+	  IPCMEM_VPU,
+	  IPCMEM_CVP,
+	  1,
+	},
+	/* APPS<->APPS partition. */
+	{
+	  326 * 1024,
+	  32 * 1024,
+	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
+	  IPCMEM_APPS,
+	  IPCMEM_APPS,
+	  1,
+	}
+	/* Last entry uses invalid hosts and no protections to signify the end. */
+	/* {
+	 *   0,
+	 *   0,
+	 *   IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
+	 *   IPCMEM_INVALID_HOST,
+	 *   IPCMEM_INVALID_HOST,
+	 * }
+	 */
+};
+
+/*D:wefault partition parameters*/
+#define	DEFAULT_PARTITION_TYPE			0x0
+#define	DEFAULT_PARTITION_HDR_SIZE		1024
+
+#define	DEFAULT_DESCRIPTOR_OFFSET		1024
+#define	DEFAULT_DESCRIPTOR_SIZE			(3*1024)
+#define DEFAULT_FIFO0_OFFSET			(4*1024)
+#define DEFAULT_FIFO0_SIZE				(8*1024)
+#define DEFAULT_FIFO1_OFFSET			(12*1024)
+#define DEFAULT_FIFO1_SIZE				(8*1024)
+
+/*Loopback partition parameters*/
+#define	LOOPBACK_PARTITION_TYPE			0x1
+
+/*Global partition parameters*/
+#define	GLOBAL_PARTITION_TYPE			0xFF
+#define GLOBAL_PARTITION_HDR_SIZE		(4*1024)
+
+#define GLOBAL_REGION_OFFSET			(4*1024)
+#define GLOBAL_REGION_SIZE				(124*1024)
+
+
+const struct ipcmem_partition_header default_partition_hdr = {
+	DEFAULT_PARTITION_TYPE,
+	DEFAULT_DESCRIPTOR_OFFSET,
+	DEFAULT_DESCRIPTOR_SIZE,
+	DEFAULT_FIFO0_OFFSET,
+	DEFAULT_FIFO0_SIZE,
+	DEFAULT_FIFO1_OFFSET,
+	DEFAULT_FIFO1_SIZE,
+};
+
+/* TX and RX FIFO point to same location for such loopback partition type
+ * (FIFO0 offset = FIFO1 offset)
+ */
+const struct ipcmem_partition_header loopback_partition_hdr = {
+	LOOPBACK_PARTITION_TYPE,
+	DEFAULT_DESCRIPTOR_OFFSET,
+	DEFAULT_DESCRIPTOR_SIZE,
+	DEFAULT_FIFO0_OFFSET,
+	DEFAULT_FIFO0_SIZE,
+	DEFAULT_FIFO0_OFFSET,
+	DEFAULT_FIFO0_SIZE,
+};
+
+const struct global_partition_header global_partition_hdr = {
+	GLOBAL_PARTITION_TYPE,
+	GLOBAL_REGION_OFFSET,
+	GLOBAL_REGION_SIZE,
+};

+ 191 - 0
msm/synx/ipclite_client.h

@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef __IPCLITE_CLIENT_H__
+#define __IPCLITE_CLIENT_H__
+
+typedef atomic_t ipclite_atomic_uint32_t;
+typedef atomic_t ipclite_atomic_int32_t;
+
+/**
+ * A list of hosts supported in IPCMEM
+ */
+enum ipcmem_host_type {
+	IPCMEM_APPS         =  0,                     /**< Apps Processor */
+	IPCMEM_MODEM        =  1,                     /**< Modem processor */
+	IPCMEM_LPASS        =  2,                     /**< Audio processor */
+	IPCMEM_SLPI         =  3,                     /**< Sensor processor */
+	IPCMEM_GPU          =  4,                     /**< Graphics processor */
+	IPCMEM_CDSP         =  5,                     /**< Compute DSP processor */
+	IPCMEM_CVP          =  6,                     /**< Computer Vision processor */
+	IPCMEM_CAM          =  7,                     /**< Camera processor */
+	IPCMEM_VPU          =  8,                     /**< Video processor */
+	IPCMEM_NUM_HOSTS    =  9,                     /**< Max number of host in target */
+
+	IPCMEM_GLOBAL_HOST  =  0xFE,                  /**< Global Host */
+	IPCMEM_INVALID_HOST =  0xFF,				  /**< Invalid processor */
+};
+
+struct global_region_info {
+	void *virt_base;
+	uint32_t size;
+};
+
+typedef int32_t (*IPCLite_Client)(uint32_t proc_id,  int64_t data,  void *priv);
+
+/**
+ * ipclite_msg_send() - Sends message to remote client.
+ *
+ * @proc_id  : Identifier for remote client or subsystem.
+ * @data       : 64 bit message value.
+ *
+ * @return Zero on successful registration, negative on failure.
+ */
+int32_t ipclite_msg_send(int32_t proc_id, uint64_t data);
+
+/**
+ * ipclite_register_client() - Registers client callback with framework.
+ *
+ * @cb_func_ptr : Client callback function to be called on message receive.
+ * @priv        : Private data required by client for handling callback.
+ *
+ * @return Zero on successful registration, negative on failure.
+ */
+int32_t ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv);
+
+/**
+ * ipclite_test_msg_send() - Sends message to remote client.
+ *
+ * @proc_id  : Identifier for remote client or subsystem.
+ * @data       : 64 bit message value.
+ *
+ * @return Zero on successful registration, negative on failure.
+ */
+int32_t ipclite_test_msg_send(int32_t proc_id, uint64_t data);
+
+/**
+ * ipclite_register_test_client() - Registers client callback with framework.
+ *
+ * @cb_func_ptr : Client callback function to be called on message receive.
+ * @priv        : Private data required by client for handling callback.
+ *
+ * @return Zero on successful registration, negative on failure.
+ */
+int32_t ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv);
+
+/**
+ * get_global_partition_info() - Gets info about IPCMEM's global partitions.
+ *
+ * @global_ipcmem : Pointer to global_region_info structure.
+ *
+ * @return Zero on successful registration, negative on failure.
+ */
+int32_t get_global_partition_info(struct global_region_info *global_ipcmem);
+
+/**
+ * ipclite_hwlock_reset() - Resets the lock if the lock is currently held by core_id
+ *
+ * core_id	: takes the core id of which the lock needs to be resetted.
+ *
+ * @return None.
+ */
+void ipclite_hwlock_reset(enum ipcmem_host_type core_id);
+
+/**
+ * ipclite_atomic_init_u32() - Initializes the global memory with uint32_t value.
+ *
+ * @addr	: Pointer to global memory
+ * @data	: Value to store in global memory
+ *
+ * @return None.
+ */
+void ipclite_atomic_init_u32(ipclite_atomic_uint32_t *addr, uint32_t data);
+
+/**
+ * ipclite_atomic_init_i32() - Initializes the global memory with int32_t value.
+ *
+ * @addr	: Pointer to global memory
+ * @data	: Value to store in global memory
+ *
+ * @return None.
+ */
+void ipclite_atomic_init_i32(ipclite_atomic_int32_t *addr, int32_t data);
+
+/**
+ * ipclite_global_atomic_store_u32() - Writes uint32_t value to global memory.
+ *
+ * @addr	: Pointer to global memory
+ * @data	: Value to store in global memory
+ *
+ * @return None.
+ */
+void ipclite_global_atomic_store_u32(ipclite_atomic_uint32_t *addr, uint32_t data);
+
+/**
+ * ipclite_global_atomic_store_i32() - Writes int32_t value to global memory.
+ *
+ * @addr	: Pointer to global memory
+ * @data	: Value to store in global memory
+ *
+ * @return None.
+ */
+void ipclite_global_atomic_store_i32(ipclite_atomic_int32_t *addr, int32_t data);
+
+/**
+ * ipclite_global_atomic_load_u32() - Reads the value from global memory.
+ *
+ * @addr	: Pointer to global memory
+ *
+ * @return uint32_t value.
+ */
+uint32_t ipclite_global_atomic_load_u32(ipclite_atomic_uint32_t *addr);
+
+/**
+ * ipclite_global_atomic_load_i32() - Reads the value from global memory.
+ *
+ * @addr	: Pointer to global memory
+ *
+ * @return int32_t value.
+ */
+int32_t ipclite_global_atomic_load_i32(ipclite_atomic_int32_t *addr);
+
+/**
+ * ipclite_global_test_and_set_bit() - Sets a bit in global memory.
+ *
+ * @nr		: Bit position to set.
+ * @addr	: Pointer to global memory
+ *
+ * @return previous value.
+ */
+uint32_t ipclite_global_test_and_set_bit(uint32_t nr, ipclite_atomic_uint32_t *addr);
+
+/**
+ * ipclite_global_test_and_clear_bit() - Clears a bit in global memory.
+ *
+ * @nr		: Bit position to clear.
+ * @addr	: Pointer to global memory
+ *
+ * @return previous value.
+ */
+uint32_t ipclite_global_test_and_clear_bit(uint32_t nr, ipclite_atomic_uint32_t *addr);
+
+/**
+ * ipclite_global_atomic_inc() - Increments an atomic variable by one.
+ *
+ * @addr	: Pointer to global memory
+ *
+ * @return previous value.
+ */
+int32_t ipclite_global_atomic_inc(ipclite_atomic_int32_t *addr);
+
+/**
+ * ipclite_global_atomic_dec() - Decrements an atomic variable by one.
+ *
+ * @addr	: Pointer to global variable
+ *
+ * @return previous value.
+ */
+int32_t ipclite_global_atomic_dec(ipclite_atomic_int32_t *addr);
+
+#endif

+ 2636 - 0
msm/synx/synx.c

@@ -0,0 +1,2636 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/atomic.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/random.h>
+#include <linux/remoteproc/qcom_rproc.h>
+#include <linux/slab.h>
+#include <linux/sync_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "synx_debugfs.h"
+#include "synx_private.h"
+#include "synx_util.h"
+
+struct synx_device *synx_dev;
+static atomic64_t synx_counter = ATOMIC64_INIT(1);
+
+void synx_external_callback(s32 sync_obj, int status, void *data)
+{
+	struct synx_signal_cb *signal_cb = data;
+
+	if (IS_ERR_OR_NULL(signal_cb)) {
+		dprintk(SYNX_ERR,
+			"invalid payload from external obj %d [%d]\n",
+			sync_obj, status);
+		return;
+	}
+
+	signal_cb->status = status;
+	signal_cb->ext_sync_id = sync_obj;
+	signal_cb->flag = SYNX_SIGNAL_FROM_CALLBACK;
+
+	dprintk(SYNX_DBG,
+		"external callback from %d on handle %u\n",
+		sync_obj, signal_cb->handle);
+
+	/*
+	 * invoke the handler directly as external callback
+	 * is invoked from separate task.
+	 * avoids creation of separate task again.
+	 */
+	synx_signal_handler(&signal_cb->cb_dispatch);
+}
+EXPORT_SYMBOL(synx_external_callback);
+
+bool synx_fence_enable_signaling(struct dma_fence *fence)
+{
+	return true;
+}
+
+const char *synx_fence_driver_name(struct dma_fence *fence)
+{
+	return "Global Synx driver";
+}
+
+void synx_fence_release(struct dma_fence *fence)
+{
+	/* release the memory allocated during create */
+	kfree(fence->lock);
+	kfree(fence);
+	dprintk(SYNX_MEM, "released backing fence %pK\n", fence);
+}
+EXPORT_SYMBOL(synx_fence_release);
+
+static struct dma_fence_ops synx_fence_ops = {
+	.wait = dma_fence_default_wait,
+	.enable_signaling = synx_fence_enable_signaling,
+	.get_driver_name = synx_fence_driver_name,
+	.get_timeline_name = synx_fence_driver_name,
+	.release = synx_fence_release,
+};
+
+static int synx_create_sync_fd(struct dma_fence *fence)
+{
+	int fd;
+	struct sync_file *sync_file;
+
+	if (IS_ERR_OR_NULL(fence))
+		return -SYNX_INVALID;
+
+	fd = get_unused_fd_flags(O_CLOEXEC);
+	if (fd < 0)
+		return fd;
+
+	sync_file = sync_file_create(fence);
+	if (IS_ERR_OR_NULL(sync_file)) {
+		dprintk(SYNX_ERR, "error creating sync file\n");
+		goto err;
+	}
+
+	fd_install(fd, sync_file->file);
+	return fd;
+
+err:
+	put_unused_fd(fd);
+	return -SYNX_INVALID;
+}
+
+void *synx_get_fence(struct synx_session *session,
+	u32 h_synx)
+{
+	struct synx_client *client;
+	struct synx_handle_coredata *synx_data;
+	struct synx_coredata *synx_obj;
+	struct dma_fence *fence = NULL;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return NULL;
+
+	synx_data = synx_util_acquire_handle(client, h_synx);
+	synx_obj = synx_util_obtain_object(synx_data);
+	if (IS_ERR_OR_NULL(synx_obj) ||
+		 IS_ERR_OR_NULL(synx_obj->fence)) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] invalid handle access %u\n",
+			client->id, h_synx);
+		goto fail;
+	}
+
+	mutex_lock(&synx_obj->obj_lock);
+	fence = synx_obj->fence;
+	/* obtain an additional reference to the fence */
+	dma_fence_get(fence);
+	mutex_unlock(&synx_obj->obj_lock);
+
+fail:
+	synx_util_release_handle(synx_data);
+	synx_put_client(client);
+	return fence;
+}
+EXPORT_SYMBOL(synx_get_fence);
+
+static int synx_native_check_bind(struct synx_client *client,
+	struct synx_create_params *params)
+{
+	int rc;
+	u32 h_synx;
+	struct synx_entry_64 *ext_entry;
+	struct synx_map_entry *entry;
+
+	if (IS_ERR_OR_NULL(params->fence))
+		return -SYNX_INVALID;
+
+	ext_entry = synx_util_retrieve_data(params->fence,
+					synx_util_map_params_to_type(params->flags));
+	if (IS_ERR_OR_NULL(ext_entry))
+		return -SYNX_NOENT;
+
+	h_synx = ext_entry->data[0];
+	synx_util_remove_data(params->fence,
+		synx_util_map_params_to_type(params->flags));
+
+	entry = synx_util_get_map_entry(h_synx);
+	if (IS_ERR_OR_NULL(entry))
+		/* possible cleanup, retry to alloc new handle */
+		return -SYNX_NOENT;
+
+	rc = synx_util_init_handle(client, entry->synx_obj,
+			&h_synx, entry);
+	if (rc != SYNX_SUCCESS) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] new handle init failed\n",
+			client->id);
+		goto fail;
+	}
+
+	*params->h_synx = h_synx;
+	return SYNX_SUCCESS;
+
+fail:
+	synx_util_release_map_entry(entry);
+	return rc;
+}
+
+static int synx_native_create_core(struct synx_client *client,
+	struct synx_create_params *params)
+{
+	int rc;
+	struct synx_coredata *synx_obj;
+	struct synx_map_entry *map_entry;
+
+	if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) ||
+		IS_ERR_OR_NULL(params->h_synx))
+		return -SYNX_INVALID;
+
+	synx_obj = kzalloc(sizeof(*synx_obj), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(synx_obj))
+		return -SYNX_NOMEM;
+
+	rc = synx_util_init_coredata(synx_obj, params,
+			&synx_fence_ops, client->dma_context);
+	if (rc) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] handle allocation failed\n",
+			client->id);
+		kfree(synx_obj);
+		goto fail;
+	}
+
+	map_entry = synx_util_insert_to_map(synx_obj,
+					*params->h_synx, 0);
+	if (IS_ERR_OR_NULL(map_entry)) {
+		rc = PTR_ERR(map_entry);
+		synx_util_put_object(synx_obj);
+		goto fail;
+	}
+
+	rc = synx_util_add_callback(synx_obj, *params->h_synx);
+	if (rc != SYNX_SUCCESS) {
+		synx_util_release_map_entry(map_entry);
+		goto fail;
+	}
+
+	rc = synx_util_init_handle(client, synx_obj,
+			params->h_synx, map_entry);
+	if (rc < 0) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] unable to init new handle\n",
+			client->id);
+		synx_util_release_map_entry(map_entry);
+		goto fail;
+	}
+
+	dprintk(SYNX_MEM,
+		"[sess :%llu] allocated %u, core %pK, fence %pK\n",
+		client->id, *params->h_synx, synx_obj, synx_obj->fence);
+	return SYNX_SUCCESS;
+
+fail:
+	return rc;
+}
+
+int synx_create(struct synx_session *session,
+	struct synx_create_params *params)
+{
+	int rc = -SYNX_NOENT;
+	struct synx_client *client;
+	struct synx_external_desc_v2 ext_desc = {0};
+
+	if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->h_synx) ||
+		params->flags > SYNX_CREATE_MAX_FLAGS) {
+		dprintk(SYNX_ERR, "invalid create arguments\n");
+		return -SYNX_INVALID;
+	}
+
+	if (params->flags & SYNX_CREATE_DMA_FENCE) {
+		dprintk(SYNX_ERR,
+			"handle create with native fence not supported\n");
+		return -SYNX_NOSUPPORT;
+	}
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	*params->h_synx = 0;
+
+	do {
+		/* create with external fence */
+		if (!IS_ERR_OR_NULL(params->fence))
+			rc = synx_native_check_bind(client, params);
+
+		if (rc == -SYNX_NOENT) {
+			rc = synx_native_create_core(client, params);
+			if (rc == SYNX_SUCCESS &&
+				 !IS_ERR_OR_NULL(params->fence)) {
+				/* save external fence details */
+				rc = synx_util_save_data(params->fence,
+					synx_util_map_params_to_type(params->flags),
+					*params->h_synx);
+				if (rc == -SYNX_ALREADY) {
+					/*
+					 * raced with create on same fence from
+					 * another client. clear the allocated
+					 * handle and retry.
+					 */
+					synx_native_release_core(client, *params->h_synx);
+					*params->h_synx = 0;
+					rc = -SYNX_NOENT;
+					continue;
+				} else if (rc != SYNX_SUCCESS) {
+					dprintk(SYNX_ERR,
+						"allocating handle failed=%d", rc);
+					synx_native_release_core(client, *params->h_synx);
+					break;
+				}
+
+				/* bind with external fence */
+				ext_desc.id = *((u32 *)params->fence);
+				ext_desc.type = synx_util_map_params_to_type(params->flags);
+				rc = synx_bind(session, *params->h_synx, ext_desc);
+				if (rc != SYNX_SUCCESS) {
+					dprintk(SYNX_ERR,
+						"[sess :%llu] bind external fence failed\n",
+						client->id);
+					synx_native_release_core(client, *params->h_synx);
+					goto fail;
+				}
+			}
+		}
+
+		if (rc == SYNX_SUCCESS)
+			dprintk(SYNX_VERB,
+				"[sess :%llu] handle allocated %u\n",
+				client->id, *params->h_synx);
+
+		break;
+	} while (true);
+
+fail:
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_create);
+
+int synx_native_signal_core(struct synx_coredata *synx_obj,
+	u32 status,
+	bool cb_signal,
+	u64 ext_sync_id)
+{
+	int rc = 0;
+	int ret;
+	u32 i = 0;
+	u32 idx = 0;
+	s32 sync_id;
+	u32 type;
+	void *data = NULL;
+	struct synx_bind_desc bind_descs[SYNX_MAX_NUM_BINDINGS];
+	struct bind_operations *bind_ops = NULL;
+
+	if (IS_ERR_OR_NULL(synx_obj))
+		return -SYNX_INVALID;
+
+	synx_util_callback_dispatch(synx_obj, status);
+
+	/*
+	 * signal the external bound sync obj/s even if fence signal fails,
+	 * w/ error signal state (set above) to prevent deadlock
+	 */
+	if (synx_obj->num_bound_synxs > 0) {
+		memset(bind_descs, 0,
+			sizeof(struct synx_bind_desc) * SYNX_MAX_NUM_BINDINGS);
+		for (i = 0; i < synx_obj->num_bound_synxs; i++) {
+			/* signal invoked by external sync obj */
+			if (cb_signal &&
+				 (ext_sync_id ==
+				 synx_obj->bound_synxs[i].external_desc.id)) {
+				dprintk(SYNX_VERB,
+					"skipping signaling inbound sync: %llu\n",
+					ext_sync_id);
+				type = synx_obj->bound_synxs[i].external_desc.type;
+				memset(&synx_obj->bound_synxs[i], 0,
+					sizeof(struct synx_bind_desc));
+				/* clear the hash table entry */
+				synx_util_remove_data(&ext_sync_id, type);
+				continue;
+			}
+			memcpy(&bind_descs[idx++],
+				&synx_obj->bound_synxs[i],
+				sizeof(struct synx_bind_desc));
+			/* clear the memory, its been backed up above */
+			memset(&synx_obj->bound_synxs[i], 0,
+				sizeof(struct synx_bind_desc));
+		}
+		synx_obj->num_bound_synxs = 0;
+	}
+
+	for (i = 0; i < idx; i++) {
+		sync_id = bind_descs[i].external_desc.id;
+		data = bind_descs[i].external_data;
+		type = bind_descs[i].external_desc.type;
+		bind_ops = synx_util_get_bind_ops(type);
+		if (IS_ERR_OR_NULL(bind_ops)) {
+			dprintk(SYNX_ERR,
+				"invalid bind ops for type: %u\n", type);
+			kfree(data);
+			continue;
+		}
+
+		/* clear the hash table entry */
+		synx_util_remove_data(&sync_id, type);
+
+		/*
+		 * we are already signaled, so don't want to
+		 * recursively be signaled
+		 */
+		ret = bind_ops->deregister_callback(
+				synx_external_callback, data, sync_id);
+		if (ret < 0) {
+			dprintk(SYNX_ERR,
+				"deregistration fail on %d, type: %u, err=%d\n",
+				sync_id, type, ret);
+			continue;
+		}
+		dprintk(SYNX_VERB,
+			"signal external sync: %d, type: %u, status: %u\n",
+			sync_id, type, status);
+		/* optional function to enable external signaling */
+		if (bind_ops->enable_signaling) {
+			ret = bind_ops->enable_signaling(sync_id);
+			if (ret < 0)
+				dprintk(SYNX_ERR,
+					"enabling fail on %d, type: %u, err=%d\n",
+					sync_id, type, ret);
+		}
+		ret = bind_ops->signal(sync_id, status);
+		if (ret < 0)
+			dprintk(SYNX_ERR,
+				"signaling fail on %d, type: %u, err=%d\n",
+				sync_id, type, ret);
+		/*
+		 * release the memory allocated for external data.
+		 * It is safe to release this memory as external cb
+		 * has been already deregistered before this.
+		 */
+		kfree(data);
+	}
+
+	return rc;
+}
+
+int synx_native_signal_fence(struct synx_coredata *synx_obj,
+	u32 status)
+{
+	int rc = 0;
+	unsigned long flags;
+
+	if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(synx_obj->fence))
+		return -SYNX_INVALID;
+
+	if (status <= SYNX_STATE_ACTIVE) {
+		dprintk(SYNX_ERR, "signaling with wrong status: %u\n",
+			status);
+		return -SYNX_INVALID;
+	}
+
+	if (synx_util_is_merged_object(synx_obj)) {
+		dprintk(SYNX_ERR, "signaling a composite handle\n");
+		return -SYNX_INVALID;
+	}
+
+	if (synx_util_get_object_status(synx_obj) !=
+		SYNX_STATE_ACTIVE)
+		return -SYNX_ALREADY;
+
+	if (IS_ERR_OR_NULL(synx_obj->signal_cb)) {
+		dprintk(SYNX_ERR, "signal cb in bad state\n");
+		return -SYNX_INVALID;
+	}
+
+	/*
+	 * remove registered callback for the fence
+	 * so it does not invoke the signal through callback again
+	 */
+	if (!dma_fence_remove_callback(synx_obj->fence,
+		&synx_obj->signal_cb->fence_cb)) {
+		dprintk(SYNX_ERR, "callback could not be removed\n");
+		return -SYNX_INVALID;
+	}
+
+	dprintk(SYNX_MEM, "signal cb destroyed %pK\n",
+		synx_obj->signal_cb);
+	kfree(synx_obj->signal_cb);
+	synx_obj->signal_cb = NULL;
+
+	/* releasing reference held by signal cb */
+	synx_util_put_object(synx_obj);
+
+	spin_lock_irqsave(synx_obj->fence->lock, flags);
+	/* check the status again acquiring lock to avoid errors */
+	if (synx_util_get_object_status_locked(synx_obj) !=
+		SYNX_STATE_ACTIVE) {
+		spin_unlock_irqrestore(synx_obj->fence->lock, flags);
+		return -SYNX_ALREADY;
+	}
+
+	/* set fence error to model {signal w/ error} */
+	if (status != SYNX_STATE_SIGNALED_SUCCESS)
+		dma_fence_set_error(synx_obj->fence, -status);
+
+	rc = dma_fence_signal_locked(synx_obj->fence);
+	if (rc)
+		dprintk(SYNX_ERR,
+			"signaling fence %pK failed=%d\n",
+			synx_obj->fence, rc);
+	spin_unlock_irqrestore(synx_obj->fence->lock, flags);
+
+	return rc;
+}
+
+void synx_signal_handler(struct work_struct *cb_dispatch)
+{
+	int rc = SYNX_SUCCESS;
+	u32 idx;
+	struct synx_signal_cb *signal_cb =
+		container_of(cb_dispatch, struct synx_signal_cb, cb_dispatch);
+	struct synx_coredata *synx_obj = signal_cb->synx_obj;
+
+	u32 h_synx = signal_cb->handle;
+	u32 status = signal_cb->status;
+
+	if ((signal_cb->flag & SYNX_SIGNAL_FROM_FENCE) &&
+			(synx_util_is_global_handle(h_synx) ||
+			synx_util_is_global_object(synx_obj))) {
+		idx = (IS_ERR_OR_NULL(synx_obj)) ?
+				synx_util_global_idx(h_synx) :
+				synx_obj->global_idx;
+		rc = synx_global_update_status(idx, status);
+		if (rc != SYNX_SUCCESS)
+			dprintk(SYNX_ERR,
+				"global status update of %u failed=%d\n",
+				h_synx, rc);
+		synx_global_put_ref(idx);
+	}
+
+	/*
+	 * when invoked from external callback, possible for
+	 * all local clients to have released the handle coredata.
+	 */
+	if (IS_ERR_OR_NULL(synx_obj)) {
+		dprintk(SYNX_WARN,
+			"handle %d has no local clients\n",
+			h_synx);
+		dprintk(SYNX_MEM, "signal cb destroyed %pK\n",
+			signal_cb);
+		kfree(signal_cb);
+		return;
+	}
+
+	if (rc != SYNX_SUCCESS) {
+		dprintk(SYNX_ERR,
+			"global status update for %u failed=%d\n",
+			h_synx, rc);
+		goto fail;
+	}
+
+	mutex_lock(&synx_obj->obj_lock);
+
+	if (signal_cb->flag & SYNX_SIGNAL_FROM_IPC)
+		rc = synx_native_signal_fence(synx_obj, status);
+
+	if (rc == SYNX_SUCCESS)
+		rc = synx_native_signal_core(synx_obj, status,
+				(signal_cb->flag & SYNX_SIGNAL_FROM_CALLBACK) ?
+				true : false, signal_cb->ext_sync_id);
+
+	mutex_unlock(&synx_obj->obj_lock);
+
+	if (rc != SYNX_SUCCESS)
+		dprintk(SYNX_ERR,
+			"internal signaling %u failed=%d",
+			h_synx, rc);
+
+fail:
+	/* release reference held by signal cb */
+	synx_util_put_object(synx_obj);
+	dprintk(SYNX_MEM, "signal cb destroyed %pK\n", signal_cb);
+	kfree(signal_cb);
+	dprintk(SYNX_VERB, "signal handle %u dispatch complete=%d",
+		h_synx, rc);
+}
+
+/* function would be called from atomic context */
+void synx_fence_callback(struct dma_fence *fence,
+	struct dma_fence_cb *cb)
+{
+	struct synx_signal_cb *signal_cb =
+		container_of(cb, struct synx_signal_cb, fence_cb);
+
+	dprintk(SYNX_DBG,
+		"callback from external fence %pK for handle %u\n",
+		fence, signal_cb->handle);
+
+	/* other signal_cb members would be set during cb registration */
+	signal_cb->status = dma_fence_get_status_locked(fence);
+
+	INIT_WORK(&signal_cb->cb_dispatch, synx_signal_handler);
+	queue_work(synx_dev->wq_cb, &signal_cb->cb_dispatch);
+}
+EXPORT_SYMBOL(synx_fence_callback);
+
+static int synx_signal_offload_job(
+	struct synx_client *client,
+	struct synx_coredata *synx_obj,
+	u32 h_synx, u32 status)
+{
+	int rc = SYNX_SUCCESS;
+	struct synx_signal_cb *signal_cb;
+
+	signal_cb = kzalloc(sizeof(*signal_cb), GFP_ATOMIC);
+	if (IS_ERR_OR_NULL(signal_cb)) {
+		rc = -SYNX_NOMEM;
+		goto fail;
+	}
+
+	/*
+	 * since the signal will be queued to separate thread,
+	 * to ensure the synx coredata pointer remain valid, get
+	 * additional reference, thus avoiding any potential
+	 * use-after-free.
+	 */
+	synx_util_get_object(synx_obj);
+
+	signal_cb->handle = h_synx;
+	signal_cb->status = status;
+	signal_cb->synx_obj = synx_obj;
+	signal_cb->flag = SYNX_SIGNAL_FROM_CLIENT;
+
+	dprintk(SYNX_VERB,
+		"[sess :%llu] signal work queued for %u\n",
+		client->id, h_synx);
+
+	INIT_WORK(&signal_cb->cb_dispatch, synx_signal_handler);
+	queue_work(synx_dev->wq_cb, &signal_cb->cb_dispatch);
+
+fail:
+	return rc;
+}
+
+int synx_signal(struct synx_session *session, u32 h_synx, u32 status)
+{
+	int rc = SYNX_SUCCESS;
+	struct synx_client *client;
+	struct synx_handle_coredata *synx_data = NULL;
+	struct synx_coredata *synx_obj;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	if (status <= SYNX_STATE_ACTIVE) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] signaling with wrong status: %u\n",
+			client->id, status);
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	synx_data = synx_util_acquire_handle(client, h_synx);
+	synx_obj = synx_util_obtain_object(synx_data);
+	if (IS_ERR_OR_NULL(synx_obj) ||
+			IS_ERR_OR_NULL(synx_obj->fence)) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] invalid handle access %u\n",
+			client->id, h_synx);
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	if (synx_util_is_global_handle(h_synx) ||
+			synx_util_is_global_object(synx_obj))
+		rc = synx_global_update_status(
+				synx_obj->global_idx, status);
+
+	if (rc != SYNX_SUCCESS) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] status update %d failed=%d\n",
+			client->id, h_synx, rc);
+		goto fail;
+	}
+
+	/*
+	 * offload callback dispatch and external fence
+	 * notification to separate worker thread, if any.
+	 */
+	if (synx_obj->num_bound_synxs ||
+			!list_empty(&synx_obj->reg_cbs_list))
+		rc = synx_signal_offload_job(client, synx_obj,
+				h_synx, status);
+
+	mutex_lock(&synx_obj->obj_lock);
+	rc = synx_native_signal_fence(synx_obj, status);
+	if (rc != SYNX_SUCCESS)
+		dprintk(SYNX_ERR,
+			"[sess :%llu] signaling %u failed=%d\n",
+			client->id, h_synx, rc);
+	mutex_unlock(&synx_obj->obj_lock);
+
+fail:
+	synx_util_release_handle(synx_data);
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_signal);
+
+static int synx_match_payload(struct synx_kernel_payload *cb_payload,
+	struct synx_kernel_payload *payload)
+{
+	int rc = 0;
+
+	if (IS_ERR_OR_NULL(cb_payload) || IS_ERR_OR_NULL(payload))
+		return -SYNX_INVALID;
+
+	if ((cb_payload->cb_func == payload->cb_func) &&
+			(cb_payload->data == payload->data)) {
+		if (payload->cancel_cb_func) {
+			cb_payload->cb_func =
+				payload->cancel_cb_func;
+			rc = 1;
+		} else {
+			rc = 2;
+			dprintk(SYNX_VERB,
+				"kernel cb de-registration success\n");
+		}
+	}
+
+	return rc;
+}
+
+int synx_async_wait(struct synx_session *session,
+	struct synx_callback_params *params)
+{
+	int rc = 0;
+	u32 idx;
+	u32 status;
+	struct synx_client *client;
+	struct synx_handle_coredata *synx_data;
+	struct synx_coredata *synx_obj;
+	struct synx_cb_data *synx_cb;
+	struct synx_kernel_payload payload;
+
+	if (IS_ERR_OR_NULL(session) || IS_ERR_OR_NULL(params))
+		return -SYNX_INVALID;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	synx_data = synx_util_acquire_handle(client, params->h_synx);
+	synx_obj = synx_util_obtain_object(synx_data);
+	if (IS_ERR_OR_NULL(synx_obj)) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] invalid handle access %u\n",
+			client->id, params->h_synx);
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	mutex_lock(&synx_obj->obj_lock);
+	if (synx_util_is_merged_object(synx_obj)) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] cannot async wait on merged handle %u\n",
+			client->id, params->h_synx);
+		rc = -SYNX_INVALID;
+		goto release;
+	}
+
+	synx_cb = kzalloc(sizeof(*synx_cb), GFP_ATOMIC);
+	if (IS_ERR_OR_NULL(synx_cb)) {
+		rc = -SYNX_NOMEM;
+		goto release;
+	}
+
+	payload.h_synx = params->h_synx;
+	payload.cb_func = params->cb_func;
+	payload.data = params->userdata;
+
+	/* allocate a free index from client cb table */
+	rc = synx_util_alloc_cb_entry(client, &payload, &idx);
+	if (rc) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] error allocating cb entry\n",
+			client->id);
+		kfree(synx_cb);
+		goto release;
+	}
+
+	if (synx_util_is_global_handle(params->h_synx) ||
+			synx_util_is_global_object(synx_obj))
+		status = synx_global_test_status_set_wait(
+					synx_util_global_idx(params->h_synx),
+					SYNX_CORE_APSS);
+	else
+		status = synx_util_get_object_status(synx_obj);
+
+	synx_cb->session = session;
+	synx_cb->idx = idx;
+	INIT_WORK(&synx_cb->cb_dispatch, synx_util_cb_dispatch);
+
+	/* add callback if object still ACTIVE, dispatch if SIGNALED */
+	if (status == SYNX_STATE_ACTIVE) {
+		dprintk(SYNX_VERB,
+			"[sess :%llu] callback added for handle %u\n",
+			client->id, params->h_synx);
+		list_add(&synx_cb->node, &synx_obj->reg_cbs_list);
+	} else {
+		synx_cb->status = status;
+		dprintk(SYNX_VERB,
+			"[sess :%llu] callback queued for handle %u\n",
+			client->id, params->h_synx);
+		queue_work(synx_dev->wq_cb,
+			&synx_cb->cb_dispatch);
+	}
+
+release:
+	mutex_unlock(&synx_obj->obj_lock);
+fail:
+	synx_util_release_handle(synx_data);
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_async_wait);
+
+int synx_cancel_async_wait(
+	struct synx_session *session,
+	struct synx_callback_params *params)
+{
+	int rc = 0, ret = 0;
+	u32 status;
+	bool match_found = false;
+	struct synx_client *client;
+	struct synx_handle_coredata *synx_data;
+	struct synx_coredata *synx_obj;
+	struct synx_kernel_payload payload;
+	struct synx_cb_data *synx_cb, *synx_cb_temp;
+	struct synx_client_cb *cb_payload;
+
+	if (IS_ERR_OR_NULL(session) || IS_ERR_OR_NULL(params))
+		return -SYNX_INVALID;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	synx_data = synx_util_acquire_handle(client, params->h_synx);
+	synx_obj = synx_util_obtain_object(synx_data);
+	if (IS_ERR_OR_NULL(synx_obj)) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] invalid handle access %u\n",
+			client->id, params->h_synx);
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	mutex_lock(&synx_obj->obj_lock);
+	if (synx_util_is_merged_object(synx_obj) ||
+		synx_util_is_external_object(synx_obj)) {
+		dprintk(SYNX_ERR,
+			"cannot cancel wait on composite handle\n");
+		goto release;
+	}
+
+	payload.h_synx = params->h_synx;
+	payload.cb_func = params->cb_func;
+	payload.data = params->userdata;
+	payload.cancel_cb_func = params->cancel_cb_func;
+
+	status = synx_util_get_object_status(synx_obj);
+	if (status != SYNX_STATE_ACTIVE) {
+		dprintk(SYNX_ERR,
+			"handle %u already signaled cannot cancel\n",
+			params->h_synx);
+		rc = -SYNX_INVALID;
+		goto release;
+	}
+
+	status = SYNX_CALLBACK_RESULT_CANCELED;
+	/* remove all cb payloads mayching the deregister call */
+	list_for_each_entry_safe(synx_cb, synx_cb_temp,
+			&synx_obj->reg_cbs_list, node) {
+		if (synx_cb->session != session) {
+			continue;
+		} else if (synx_cb->idx == 0 ||
+			synx_cb->idx >= SYNX_MAX_OBJS) {
+			/*
+			 * this should not happen. Even if it does,
+			 * the allocated memory will be cleaned up
+			 * when object is destroyed, preventing any
+			 * memory leaks.
+			 */
+			dprintk(SYNX_ERR,
+				"[sess :%llu] invalid callback data\n",
+				client->id);
+			continue;
+		}
+
+		cb_payload = &client->cb_table[synx_cb->idx];
+		ret = synx_match_payload(&cb_payload->kernel_cb, &payload);
+		switch (ret) {
+		case 1:
+			/* queue the cancel cb work */
+			list_del_init(&synx_cb->node);
+			synx_cb->status = status;
+			queue_work(synx_dev->wq_cb,
+				&synx_cb->cb_dispatch);
+			match_found = true;
+			break;
+		case 2:
+			/* no cancellation cb */
+			if (synx_util_clear_cb_entry(client, cb_payload))
+				dprintk(SYNX_ERR,
+				"[sess :%llu] error clearing cb %u\n",
+				client->id, params->h_synx);
+			list_del_init(&synx_cb->node);
+			kfree(synx_cb);
+			match_found = true;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (!match_found)
+		rc = -SYNX_INVALID;
+
+release:
+	mutex_unlock(&synx_obj->obj_lock);
+fail:
+	synx_util_release_handle(synx_data);
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_cancel_async_wait);
+
+int synx_merge(struct synx_session *session,
+	struct synx_merge_params *params)
+{
+	int rc, i, j = 0;
+	u32 h_child;
+	u32 count = 0;
+	u32 *h_child_list;
+	struct synx_client *client;
+	struct dma_fence **fences = NULL;
+	struct synx_coredata *synx_obj;
+	struct synx_map_entry *map_entry;
+
+	if (IS_ERR_OR_NULL(session) || IS_ERR_OR_NULL(params))
+		return -SYNX_INVALID;
+
+	if (IS_ERR_OR_NULL(params->h_synxs) ||
+		IS_ERR_OR_NULL(params->h_merged_obj)) {
+		dprintk(SYNX_ERR, "invalid arguments\n");
+		return -SYNX_INVALID;
+	}
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	rc = synx_util_validate_merge(client, params->h_synxs,
+			params->num_objs, &fences, &count);
+	if (rc < 0) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] merge validation failed\n",
+			client->id);
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	synx_obj = kzalloc(sizeof(*synx_obj), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(synx_obj)) {
+		rc = -SYNX_NOMEM;
+		goto fail;
+	}
+
+	rc = synx_util_init_group_coredata(synx_obj, fences,
+			params, count, client->dma_context);
+	if (rc) {
+		dprintk(SYNX_ERR,
+		"[sess :%llu] error initializing merge handle\n",
+			client->id);
+		goto clean_up;
+	}
+
+	map_entry = synx_util_insert_to_map(synx_obj,
+					*params->h_merged_obj, 0);
+	if (IS_ERR_OR_NULL(map_entry)) {
+		rc = PTR_ERR(map_entry);
+		goto clean_up;
+	}
+
+	rc = synx_util_init_handle(client, synx_obj,
+			params->h_merged_obj, map_entry);
+	if (rc) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] unable to init merge handle %u\n",
+			client->id, *params->h_merged_obj);
+		dma_fence_put(synx_obj->fence);
+		goto clear;
+	}
+
+	if (params->flags & SYNX_MERGE_GLOBAL_FENCE) {
+		h_child_list = kzalloc(count*4, GFP_KERNEL);
+		if (IS_ERR_OR_NULL(synx_obj)) {
+			rc = -SYNX_NOMEM;
+			goto clear;
+		}
+
+		for (i = 0; i < count; i++) {
+			h_child = synx_util_get_fence_entry((u64)fences[i], 1);
+			if (!synx_util_is_global_handle(h_child))
+				continue;
+
+			h_child_list[j++] = synx_util_global_idx(h_child);
+		}
+
+		rc = synx_global_merge(h_child_list, j,
+			synx_util_global_idx(*params->h_merged_obj));
+		if (rc != SYNX_SUCCESS) {
+			dprintk(SYNX_ERR, "global merge failed\n");
+			goto clear;
+		}
+	}
+
+	dprintk(SYNX_MEM,
+		"[sess :%llu] merge allocated %u, core %pK, fence %pK\n",
+		client->id, *params->h_merged_obj, synx_obj,
+		synx_obj->fence);
+	synx_put_client(client);
+	return SYNX_SUCCESS;
+
+clear:
+	synx_util_release_map_entry(map_entry);
+clean_up:
+	kfree(synx_obj);
+fail:
+	synx_util_merge_error(client, params->h_synxs, count);
+	if (params->num_objs && params->num_objs <= count)
+		kfree(fences);
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_merge);
+
+int synx_native_release_core(struct synx_client *client,
+	u32 h_synx)
+{
+	int rc = -SYNX_INVALID;
+	struct synx_handle_coredata *curr, *synx_handle = NULL;
+
+	spin_lock_bh(&client->handle_map_lock);
+	hash_for_each_possible(client->handle_map,
+			curr, node, h_synx) {
+		if (curr->key == h_synx &&
+			curr->rel_count != 0) {
+			curr->rel_count--;
+			synx_handle = curr;
+			rc = SYNX_SUCCESS;
+			break;
+		}
+	}
+	spin_unlock_bh(&client->handle_map_lock);
+
+	/* release the reference obtained at synx creation */
+	synx_util_release_handle(synx_handle);
+
+	return rc;
+}
+
+int synx_release(struct synx_session *session, u32 h_synx)
+{
+	int rc = 0;
+	struct synx_client *client;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	rc = synx_native_release_core(client, h_synx);
+
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_release);
+
+int synx_wait(struct synx_session *session,
+	u32 h_synx, u64 timeout_ms)
+{
+	int rc = 0;
+	unsigned long timeleft;
+	struct synx_client *client;
+	struct synx_handle_coredata *synx_data;
+	struct synx_coredata *synx_obj;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	synx_data = synx_util_acquire_handle(client, h_synx);
+	synx_obj = synx_util_obtain_object(synx_data);
+	if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(synx_obj->fence)) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] invalid handle access %u\n",
+			client->id, h_synx);
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	if (synx_util_is_global_handle(h_synx)) {
+		rc = synx_global_test_status_set_wait(
+			synx_util_global_idx(h_synx), SYNX_CORE_APSS);
+		if (rc != SYNX_STATE_ACTIVE)
+			goto fail;
+	}
+
+	timeleft = dma_fence_wait_timeout(synx_obj->fence, (bool) 0,
+					msecs_to_jiffies(timeout_ms));
+	if (timeleft <= 0) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] wait timeout for handle %u\n",
+			client->id, h_synx);
+		rc = -ETIMEDOUT;
+		goto fail;
+	}
+
+	mutex_lock(&synx_obj->obj_lock);
+	rc = synx_util_get_object_status(synx_obj);
+	mutex_unlock(&synx_obj->obj_lock);
+
+fail:
+	synx_util_release_handle(synx_data);
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_wait);
+
+int synx_bind(struct synx_session *session,
+	u32 h_synx,
+	struct synx_external_desc_v2 external_sync)
+{
+	int rc = 0;
+	u32 i;
+	u32 bound_idx;
+	struct synx_client *client;
+	struct synx_handle_coredata *synx_data = NULL;
+	struct synx_coredata *synx_obj;
+	struct synx_signal_cb *data = NULL;
+	struct bind_operations *bind_ops = NULL;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	synx_data = synx_util_acquire_handle(client, h_synx);
+	synx_obj = synx_util_obtain_object(synx_data);
+	if (IS_ERR_OR_NULL(synx_obj)) {
+		if (rc || synx_data)
+			dprintk(SYNX_ERR,
+				"[sess :%llu] invalid handle access %u\n",
+				client->id, h_synx);
+		goto fail;
+	}
+
+	bind_ops = synx_util_get_bind_ops(external_sync.type);
+	if (IS_ERR_OR_NULL(bind_ops)) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] invalid bind ops for %u\n",
+			client->id, external_sync.type);
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	mutex_lock(&synx_obj->obj_lock);
+	if (synx_util_is_merged_object(synx_obj)) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] cannot bind to composite handle %u\n",
+			client->id, h_synx);
+		rc = -SYNX_INVALID;
+		goto release;
+	}
+
+	if (synx_obj->num_bound_synxs >= SYNX_MAX_NUM_BINDINGS) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] max bindings reached for handle %u\n",
+			client->id, h_synx);
+		rc = -SYNX_NOMEM;
+		goto release;
+	}
+
+	/* don't bind external sync obj if already done */
+	for (i = 0; i < synx_obj->num_bound_synxs; i++) {
+		if ((external_sync.id ==
+				synx_obj->bound_synxs[i].external_desc.id) &&
+				(external_sync.type ==
+				synx_obj->bound_synxs[i].external_desc.type)){
+			dprintk(SYNX_ERR,
+				"[sess :%llu] duplicate bind for sync %llu\n",
+				client->id, external_sync.id);
+			rc = -SYNX_ALREADY;
+			goto release;
+		}
+	}
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(data)) {
+		rc = -SYNX_NOMEM;
+		goto release;
+	}
+
+	/* get additional reference since passing pointer to cb */
+	synx_util_get_object(synx_obj);
+
+	/* data passed to external callback */
+	data->handle = h_synx;
+	data->synx_obj = synx_obj;
+
+	bound_idx = synx_obj->num_bound_synxs;
+	memcpy(&synx_obj->bound_synxs[bound_idx],
+		&external_sync, sizeof(struct synx_external_desc_v2));
+	synx_obj->bound_synxs[bound_idx].external_data = data;
+	synx_obj->num_bound_synxs++;
+	mutex_unlock(&synx_obj->obj_lock);
+
+	rc = bind_ops->register_callback(synx_external_callback,
+			data, external_sync.id);
+	if (rc) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] callback reg failed for %llu\n",
+			client->id, external_sync.id);
+		mutex_lock(&synx_obj->obj_lock);
+		memset(&synx_obj->bound_synxs[bound_idx], 0,
+			sizeof(struct synx_external_desc_v2));
+		synx_obj->num_bound_synxs--;
+		mutex_unlock(&synx_obj->obj_lock);
+		synx_util_put_object(synx_obj);
+		kfree(data);
+		goto fail;
+	}
+
+	synx_util_release_handle(synx_data);
+	dprintk(SYNX_DBG,
+		"[sess :%llu] ext sync %llu bound to handle %u\n",
+		client->id, external_sync.id, h_synx);
+	synx_put_client(client);
+	return SYNX_SUCCESS;
+
+release:
+	mutex_unlock(&synx_obj->obj_lock);
+fail:
+	synx_util_release_handle(synx_data);
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_bind);
+
+int synx_get_status(struct synx_session *session,
+	u32 h_synx)
+{
+	int rc = 0;
+	struct synx_client *client;
+	struct synx_handle_coredata *synx_data;
+	struct synx_coredata *synx_obj;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	synx_data = synx_util_acquire_handle(client, h_synx);
+	synx_obj = synx_util_obtain_object(synx_data);
+	if (IS_ERR_OR_NULL(synx_obj) ||
+		IS_ERR_OR_NULL(synx_obj->fence)) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] invalid handle access %u\n",
+			client->id, h_synx);
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	if (synx_util_is_global_handle(h_synx)) {
+		rc = synx_global_get_status(
+				synx_util_global_idx(h_synx));
+		if (rc != SYNX_STATE_ACTIVE) {
+			dprintk(SYNX_VERB,
+				"[sess :%llu] handle %u in status %d\n",
+				client->id, h_synx, rc);
+			goto fail;
+		}
+	}
+
+	mutex_lock(&synx_obj->obj_lock);
+	rc = synx_util_get_object_status(synx_obj);
+	mutex_unlock(&synx_obj->obj_lock);
+	dprintk(SYNX_VERB,
+		"[sess :%llu] handle %u status %d\n",
+		client->id, h_synx, rc);
+
+fail:
+	synx_util_release_handle(synx_data);
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_get_status);
+
+static struct synx_map_entry *synx_handle_conversion(
+	struct synx_client *client,
+	u32 *h_synx, struct synx_map_entry *old_entry)
+{
+	int rc;
+	struct synx_map_entry *map_entry = NULL;
+	struct synx_coredata *synx_obj;
+
+	if (IS_ERR_OR_NULL(old_entry)) {
+		old_entry = synx_util_get_map_entry(*h_synx);
+		if (IS_ERR_OR_NULL(old_entry)) {
+			rc = PTR_ERR(old_entry);
+			dprintk(SYNX_ERR,
+				"invalid import handle %u err=%d",
+				*h_synx, rc);
+			return old_entry;
+		}
+	}
+
+	synx_obj = old_entry->synx_obj;
+	BUG_ON(synx_obj == NULL);
+
+	mutex_lock(&synx_obj->obj_lock);
+	synx_util_get_object(synx_obj);
+	if (synx_obj->global_idx != 0) {
+		*h_synx = synx_encode_handle(
+				synx_obj->global_idx, SYNX_CORE_APSS, true);
+
+		map_entry = synx_util_get_map_entry(*h_synx);
+		if (IS_ERR_OR_NULL(map_entry)) {
+			/* raced with release from last global client */
+			map_entry = synx_util_insert_to_map(synx_obj,
+						*h_synx, 0);
+			if (IS_ERR_OR_NULL(map_entry)) {
+				rc = PTR_ERR(map_entry);
+				dprintk(SYNX_ERR,
+					"addition of %u to map failed=%d",
+					*h_synx, rc);
+			}
+		}
+	} else {
+		rc = synx_alloc_global_handle(h_synx);
+		if (rc == SYNX_SUCCESS) {
+			synx_obj->global_idx =
+				synx_util_global_idx(*h_synx);
+			synx_obj->type |= SYNX_CREATE_GLOBAL_FENCE;
+
+			map_entry = synx_util_insert_to_map(synx_obj,
+						*h_synx, 0);
+			if (IS_ERR_OR_NULL(map_entry)) {
+				rc = PTR_ERR(map_entry);
+				synx_global_put_ref(
+					synx_util_global_idx(*h_synx));
+				dprintk(SYNX_ERR,
+					"insertion of %u to map failed=%d",
+					*h_synx, rc);
+			}
+		}
+	}
+	mutex_unlock(&synx_obj->obj_lock);
+
+	if (IS_ERR_OR_NULL(map_entry))
+		synx_util_put_object(synx_obj);
+
+	synx_util_release_map_entry(old_entry);
+	return map_entry;
+}
+
+static int synx_native_import_handle(struct synx_client *client,
+	struct synx_import_indv_params *params)
+{
+	int rc = SYNX_SUCCESS;
+	u32 h_synx, core_id;
+	struct synx_map_entry *map_entry, *old_entry;
+	struct synx_coredata *synx_obj;
+	struct synx_handle_coredata *synx_data = NULL, *curr;
+	char name[SYNX_OBJ_NAME_LEN] = {0};
+	struct synx_create_params c_params = {0};
+
+	if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) ||
+		IS_ERR_OR_NULL(params->fence) ||
+		IS_ERR_OR_NULL(params->new_h_synx))
+		return -SYNX_INVALID;
+
+	h_synx = *((u32 *)params->fence);
+
+	/* check if already mapped to client */
+	spin_lock_bh(&client->handle_map_lock);
+	hash_for_each_possible(client->handle_map,
+			curr, node, h_synx) {
+		if (curr->key == h_synx &&
+				curr->rel_count != 0 &&
+				(synx_util_is_global_handle(h_synx) ||
+				params->flags & SYNX_IMPORT_LOCAL_FENCE)) {
+			curr->rel_count++;
+			kref_get(&curr->refcount);
+			synx_data = curr;
+			break;
+		}
+	}
+	spin_unlock_bh(&client->handle_map_lock);
+
+	if (synx_data) {
+		*params->new_h_synx = h_synx;
+		return SYNX_SUCCESS;
+	}
+
+	map_entry = synx_util_get_map_entry(h_synx);
+	if (IS_ERR_OR_NULL(map_entry)) {
+		core_id = (h_synx & SYNX_OBJ_CORE_ID_MASK)
+					>> SYNX_HANDLE_INDEX_BITS;
+		if (core_id == SYNX_CORE_APSS) {
+			dprintk(SYNX_ERR,
+				"[sess :%llu] invalid import handle %u\n",
+				client->id, h_synx);
+			return -SYNX_INVALID;
+		} else if (synx_util_is_global_handle(h_synx)) {
+			/* import global handle created in another core */
+			synx_util_map_import_params_to_create(params, &c_params);
+			scnprintf(name, SYNX_OBJ_NAME_LEN, "import-client-%d",
+				current->pid);
+			c_params.name = name;
+			c_params.h_synx = &h_synx;
+
+			rc = synx_native_create_core(client, &c_params);
+			if (rc != SYNX_SUCCESS)
+				return rc;
+
+			*params->new_h_synx = h_synx;
+			return SYNX_SUCCESS;
+		}
+		dprintk(SYNX_ERR,
+			"[sess :%llu] invalid handle %u\n",
+			client->id, h_synx);
+		return -SYNX_INVALID;
+	}
+
+	synx_obj = map_entry->synx_obj;
+	BUG_ON(synx_obj == NULL);
+
+	if ((params->flags & SYNX_IMPORT_GLOBAL_FENCE) &&
+		!synx_util_is_global_handle(h_synx)) {
+		old_entry = map_entry;
+		map_entry = synx_handle_conversion(client, &h_synx,
+						old_entry);
+	}
+
+	if (rc != SYNX_SUCCESS)
+		return rc;
+
+	*params->new_h_synx = h_synx;
+
+	rc = synx_util_init_handle(client, map_entry->synx_obj,
+		params->new_h_synx, map_entry);
+	if (rc != SYNX_SUCCESS) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] init of imported handle %u failed=%d\n",
+			client->id, h_synx, rc);
+		synx_util_release_map_entry(map_entry);
+	}
+
+	return rc;
+}
+
+static int synx_native_import_fence(struct synx_client *client,
+	struct synx_import_indv_params *params)
+{
+	int rc = SYNX_SUCCESS;
+	u32 curr_h_synx;
+	u32 global;
+	struct synx_create_params c_params = {0};
+	char name[SYNX_OBJ_NAME_LEN] = {0};
+	struct synx_fence_entry *entry;
+	struct synx_map_entry *map_entry = NULL;
+	struct synx_handle_coredata *synx_data = NULL, *curr;
+
+	if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) ||
+			IS_ERR_OR_NULL(params->fence) ||
+			IS_ERR_OR_NULL(params->new_h_synx))
+		return -SYNX_INVALID;
+
+	global = SYNX_IMPORT_GLOBAL_FENCE & params->flags;
+
+retry:
+	*params->new_h_synx =
+		synx_util_get_fence_entry((u64)params->fence, global);
+	if (*params->new_h_synx == 0) {
+		/* create a new synx obj and add to fence map */
+		synx_util_map_import_params_to_create(params, &c_params);
+		scnprintf(name, SYNX_OBJ_NAME_LEN, "import-client-%d",
+			current->pid);
+		c_params.name = name;
+		c_params.h_synx = params->new_h_synx;
+		c_params.fence = params->fence;
+
+		rc = synx_native_create_core(client, &c_params);
+		if (rc != SYNX_SUCCESS)
+			return rc;
+
+		curr_h_synx = *params->new_h_synx;
+
+		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+		if (IS_ERR_OR_NULL(entry)) {
+			rc = -SYNX_NOMEM;
+			curr_h_synx = *c_params.h_synx;
+			goto fail;
+		}
+
+		do {
+			entry->key = (u64)params->fence;
+			if (global)
+				entry->g_handle = *params->new_h_synx;
+			else
+				entry->l_handle = *params->new_h_synx;
+
+			rc = synx_util_insert_fence_entry(entry,
+					params->new_h_synx, global);
+			if (rc == SYNX_SUCCESS) {
+				dprintk(SYNX_DBG,
+					"mapped fence %pK to new handle %u\n",
+					params->fence, *params->new_h_synx);
+				break;
+			} else if (rc == -SYNX_ALREADY) {
+				/*
+				 * release the new handle allocated
+				 * and use the available handle
+				 * already mapped instead.
+				 */
+				map_entry = synx_util_get_map_entry(
+								*params->new_h_synx);
+				if (IS_ERR_OR_NULL(map_entry)) {
+					/* race with fence release, need to retry */
+					dprintk(SYNX_DBG,
+						"re-attempting handle import\n");
+					*params->new_h_synx = curr_h_synx;
+					continue;
+				}
+
+				rc = synx_util_init_handle(client,
+						map_entry->synx_obj,
+						params->new_h_synx, map_entry);
+
+				dprintk(SYNX_DBG, "mapped fence %pK to handle %u\n",
+					params->fence, *params->new_h_synx);
+				goto release;
+			} else {
+				dprintk(SYNX_ERR,
+					"importing fence %pK failed, err=%d\n",
+					params->fence, rc);
+				goto release;
+			}
+		} while (true);
+	} else {
+		/* check if already mapped to client */
+		spin_lock_bh(&client->handle_map_lock);
+		hash_for_each_possible(client->handle_map,
+				curr, node, *params->new_h_synx) {
+			if (curr->key == *params->new_h_synx &&
+					curr->rel_count != 0) {
+				curr->rel_count++;
+				kref_get(&curr->refcount);
+				synx_data = curr;
+				break;
+			}
+		}
+		spin_unlock_bh(&client->handle_map_lock);
+
+		if (synx_data) {
+			dprintk(SYNX_DBG, "mapped fence %pK to handle %u\n",
+				params->fence, *params->new_h_synx);
+			return SYNX_SUCCESS;
+		}
+
+		if (global && !synx_util_is_global_handle(
+				*params->new_h_synx))
+			map_entry = synx_handle_conversion(client,
+				params->new_h_synx, NULL);
+		else
+			map_entry = synx_util_get_map_entry(
+						*params->new_h_synx);
+
+		if (IS_ERR_OR_NULL(map_entry)) {
+			/* race with fence release, need to retry */
+			dprintk(SYNX_DBG, "re-attempting handle import\n");
+			goto retry;
+		}
+
+		rc = synx_util_init_handle(client, map_entry->synx_obj,
+			params->new_h_synx, map_entry);
+
+		dprintk(SYNX_DBG, "mapped fence %pK to existing handle %u\n",
+			params->fence, *params->new_h_synx);
+	}
+
+	return rc;
+
+release:
+	kfree(entry);
+fail:
+	synx_native_release_core(client, curr_h_synx);
+	return rc;
+}
+
+static int synx_native_import_indv(struct synx_client *client,
+	struct synx_import_indv_params *params)
+{
+	int rc = -SYNX_INVALID;
+
+	if (IS_ERR_OR_NULL(params) ||
+		IS_ERR_OR_NULL(params->new_h_synx) ||
+		IS_ERR_OR_NULL(params->fence)) {
+		dprintk(SYNX_ERR, "invalid import arguments\n");
+		return -SYNX_INVALID;
+	}
+
+	if (likely(params->flags & SYNX_IMPORT_DMA_FENCE))
+		rc = synx_native_import_fence(client, params);
+	else if (params->flags & SYNX_IMPORT_SYNX_FENCE)
+		rc = synx_native_import_handle(client, params);
+
+	dprintk(SYNX_DBG,
+		"[sess :%llu] import of fence %pK %s, handle %u\n",
+		client->id, params->fence,
+		rc ? "failed" : "successful",
+		rc ? 0 : *params->new_h_synx);
+
+	return rc;
+}
+
+static int synx_native_import_arr(struct synx_client *client,
+	struct synx_import_arr_params *params)
+{
+	u32 i;
+	int rc = SYNX_SUCCESS;
+
+	if (IS_ERR_OR_NULL(params) || params->num_fences == 0) {
+		dprintk(SYNX_ERR, "invalid import arr arguments\n");
+		return -SYNX_INVALID;
+	}
+
+	for (i = 0; i < params->num_fences; i++) {
+		rc = synx_native_import_indv(client, &params->list[i]);
+		if (rc != SYNX_SUCCESS) {
+			dprintk(SYNX_ERR,
+				"importing fence[%u] %pK failed=%d\n",
+				i, params->list[i].fence, rc);
+			break;
+		}
+	}
+
+	if (rc != SYNX_SUCCESS)
+		while (i--) {
+			/* release the imported handles and cleanup */
+			if (synx_native_release_core(client,
+				*params->list[i].new_h_synx) != SYNX_SUCCESS)
+				dprintk(SYNX_ERR,
+					"error cleaning up imported handle[%u] %u\n",
+					i, *params->list[i].new_h_synx);
+		}
+
+	return rc;
+}
+
+int synx_import(struct synx_session *session,
+	struct synx_import_params *params)
+{
+	int rc = 0;
+	struct synx_client *client;
+
+	if (IS_ERR_OR_NULL(params)) {
+		dprintk(SYNX_ERR, "invalid import arguments\n");
+		return -SYNX_INVALID;
+	}
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	/* import fence based on its type */
+	if (params->type == SYNX_IMPORT_ARR_PARAMS)
+		rc = synx_native_import_arr(client, &params->arr);
+	else
+		rc = synx_native_import_indv(client, &params->indv);
+
+	synx_put_client(client);
+	return rc;
+}
+EXPORT_SYMBOL(synx_import);
+
+static int synx_handle_create(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	int result;
+	int csl_fence;
+	struct synx_create_v2 create_info;
+	struct synx_create_params params = {0};
+
+	if (k_ioctl->size != sizeof(create_info))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&create_info,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	params.h_synx = &create_info.synx_obj;
+	params.name = create_info.name;
+	params.flags = create_info.flags;
+	if (create_info.flags & SYNX_CREATE_CSL_FENCE) {
+		csl_fence = create_info.desc.id[0];
+		params.fence = &csl_fence;
+	}
+	result = synx_create(session, &params);
+
+	if (!result)
+		if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
+				&create_info,
+				k_ioctl->size))
+			return -EFAULT;
+
+	return result;
+}
+
+static int synx_handle_getstatus(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	struct synx_signal_v2 signal_info;
+
+	if (k_ioctl->size != sizeof(signal_info))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&signal_info,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	signal_info.synx_state =
+		synx_get_status(session, signal_info.synx_obj);
+
+	if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			&signal_info,
+			k_ioctl->size))
+		return -EFAULT;
+
+	return SYNX_SUCCESS;
+}
+
+static int synx_handle_import(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	struct synx_import_info import_info;
+	struct synx_import_params params = {0};
+
+	if (k_ioctl->size != sizeof(import_info))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&import_info,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	if (import_info.flags & SYNX_IMPORT_SYNX_FENCE)
+		params.indv.fence = &import_info.synx_obj;
+	else if (import_info.flags & SYNX_IMPORT_DMA_FENCE)
+		params.indv.fence =
+			sync_file_get_fence(import_info.desc.id[0]);
+
+	params.type = SYNX_IMPORT_INDV_PARAMS;
+	params.indv.flags = import_info.flags;
+	params.indv.new_h_synx = &import_info.new_synx_obj;
+
+	if (synx_import(session, &params))
+		return -SYNX_INVALID;
+
+	if (import_info.flags & SYNX_IMPORT_DMA_FENCE)
+		dma_fence_put(params.indv.fence);
+
+	if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			&import_info,
+			k_ioctl->size))
+		return -EFAULT;
+
+	return SYNX_SUCCESS;
+}
+
+static int synx_handle_import_arr(
+	struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	int rc = -SYNX_INVALID;
+	u32 idx = 0;
+	struct synx_client *client;
+	struct synx_import_arr_info arr_info;
+	struct synx_import_info *arr;
+	struct synx_import_indv_params params = {0};
+
+	if (k_ioctl->size != sizeof(arr_info))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&arr_info,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	arr = kcalloc(arr_info.num_objs,
+				sizeof(*arr), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(arr))
+		return -ENOMEM;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client)) {
+		rc = PTR_ERR(client);
+		goto clean;
+	}
+
+	if (copy_from_user(arr,
+			u64_to_user_ptr(arr_info.list),
+			sizeof(*arr) * arr_info.num_objs)) {
+		rc = -EFAULT;
+		goto fail;
+	}
+
+	while (idx < arr_info.num_objs) {
+		params.new_h_synx = &arr[idx].new_synx_obj;
+		params.flags = arr[idx].flags;
+		if (arr[idx].flags & SYNX_IMPORT_SYNX_FENCE)
+			params.fence = &arr[idx].synx_obj;
+		if (arr[idx].flags & SYNX_IMPORT_DMA_FENCE)
+			params.fence =
+				sync_file_get_fence(arr[idx].desc.id[0]);
+		rc = synx_native_import_indv(client, &params);
+		if (rc != SYNX_SUCCESS)
+			break;
+		idx++;
+	}
+
+	/* release allocated handles in case of failure */
+	if (rc != SYNX_SUCCESS) {
+		while (idx > 0)
+			synx_native_release_core(client,
+				arr[--idx].new_synx_obj);
+	} else {
+		if (copy_to_user(u64_to_user_ptr(arr_info.list),
+			arr,
+			sizeof(*arr) * arr_info.num_objs)) {
+			rc = -EFAULT;
+			goto fail;
+		}
+	}
+
+fail:
+	synx_put_client(client);
+clean:
+	kfree(arr);
+	return rc;
+}
+
+static int synx_handle_export(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	return -SYNX_INVALID;
+}
+
+static int synx_handle_signal(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	struct synx_signal_v2 signal_info;
+
+	if (k_ioctl->size != sizeof(signal_info))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&signal_info,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	return synx_signal(session, signal_info.synx_obj,
+		signal_info.synx_state);
+}
+
+static int synx_handle_merge(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	u32 *h_synxs;
+	int result;
+	struct synx_merge_v2 merge_info;
+	struct synx_merge_params params = {0};
+
+	if (k_ioctl->size != sizeof(merge_info))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&merge_info,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	if (merge_info.num_objs >= SYNX_MAX_OBJS)
+		return -SYNX_INVALID;
+
+	h_synxs = kcalloc(merge_info.num_objs,
+				sizeof(*h_synxs), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(h_synxs))
+		return -ENOMEM;
+
+	if (copy_from_user(h_synxs,
+			u64_to_user_ptr(merge_info.synx_objs),
+			sizeof(u32) * merge_info.num_objs)) {
+		kfree(h_synxs);
+		return -EFAULT;
+	}
+
+	params.num_objs = merge_info.num_objs;
+	params.h_synxs = h_synxs;
+	params.flags = merge_info.flags;
+	params.h_merged_obj = &merge_info.merged;
+
+	result = synx_merge(session, &params);
+	if (!result)
+		if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
+				&merge_info,
+				k_ioctl->size)) {
+			kfree(h_synxs);
+			return -EFAULT;
+	}
+
+	kfree(h_synxs);
+	return result;
+}
+
+static int synx_handle_wait(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	struct synx_wait_v2 wait_info;
+
+	if (k_ioctl->size != sizeof(wait_info))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&wait_info,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	k_ioctl->result = synx_wait(session,
+		wait_info.synx_obj, wait_info.timeout_ms);
+
+	return SYNX_SUCCESS;
+}
+
+static int synx_handle_async_wait(
+	struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	int rc = 0;
+	struct synx_userpayload_info_v2 user_data;
+	struct synx_callback_params params = {0};
+
+	if (k_ioctl->size != sizeof(user_data))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&user_data,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	params.h_synx = user_data.synx_obj;
+	params.cb_func = synx_util_default_user_callback;
+	params.userdata = (void *)user_data.payload[0];
+
+	rc = synx_async_wait(session, &params);
+	if (rc)
+		dprintk(SYNX_ERR,
+			"user cb registration failed for handle %d\n",
+			user_data.synx_obj);
+
+	return rc;
+}
+
+static int synx_handle_cancel_async_wait(
+	struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	int rc = 0;
+	struct synx_userpayload_info_v2 user_data;
+	struct synx_callback_params params = {0};
+
+	if (k_ioctl->size != sizeof(user_data))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&user_data,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	params.h_synx = user_data.synx_obj;
+	params.cb_func = synx_util_default_user_callback;
+	params.userdata = (void *)user_data.payload[0];
+
+	rc = synx_cancel_async_wait(session, &params);
+	if (rc)
+		dprintk(SYNX_ERR,
+			"user cb deregistration failed for handle %d\n",
+			user_data.synx_obj);
+
+	return rc;
+}
+
+static int synx_handle_bind(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	struct synx_bind_v2 synx_bind_info;
+
+	if (k_ioctl->size != sizeof(synx_bind_info))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&synx_bind_info,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	k_ioctl->result = synx_bind(session,
+		synx_bind_info.synx_obj,
+		synx_bind_info.ext_sync_desc);
+
+	return k_ioctl->result;
+}
+
+static int synx_handle_release(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	struct synx_info release_info;
+
+	if (k_ioctl->size != sizeof(release_info))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&release_info,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	return synx_release(session, release_info.synx_obj);
+}
+
+static int synx_handle_get_fence(struct synx_private_ioctl_arg *k_ioctl,
+	struct synx_session *session)
+{
+	struct synx_fence_fd fence_fd;
+	struct dma_fence *fence;
+
+	if (k_ioctl->size != sizeof(fence_fd))
+		return -SYNX_INVALID;
+
+	if (copy_from_user(&fence_fd,
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			k_ioctl->size))
+		return -EFAULT;
+
+	fence = synx_get_fence(session, fence_fd.synx_obj);
+	fence_fd.fd = synx_create_sync_fd(fence);
+	/*
+	 * release additional reference taken in synx_get_fence.
+	 * additional reference ensures the fence is valid and
+	 * does not race with handle/fence release.
+	 */
+	dma_fence_put(fence);
+
+	if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			&fence_fd, k_ioctl->size))
+		return -EFAULT;
+
+	return SYNX_SUCCESS;
+}
+
+static long synx_ioctl(struct file *filep,
+	unsigned int cmd,
+	unsigned long arg)
+{
+	s32 rc = 0;
+	struct synx_private_ioctl_arg k_ioctl;
+	struct synx_session *session = filep->private_data;
+
+	if (cmd != SYNX_PRIVATE_IOCTL_CMD) {
+		dprintk(SYNX_ERR, "invalid ioctl cmd\n");
+		return -ENOIOCTLCMD;
+	}
+
+	if (copy_from_user(&k_ioctl,
+			(struct synx_private_ioctl_arg *)arg,
+			sizeof(k_ioctl))) {
+		dprintk(SYNX_ERR, "invalid ioctl args\n");
+		return -EFAULT;
+	}
+
+	if (!k_ioctl.ioctl_ptr)
+		return -SYNX_INVALID;
+
+	dprintk(SYNX_VERB, "[sess :%llu] Enter cmd %u from pid %d\n",
+		((struct synx_client *)session)->id,
+		k_ioctl.id, current->pid);
+
+	switch (k_ioctl.id) {
+	case SYNX_CREATE:
+		rc = synx_handle_create(&k_ioctl, session);
+		break;
+	case SYNX_RELEASE:
+		rc = synx_handle_release(&k_ioctl, session);
+		break;
+	case SYNX_REGISTER_PAYLOAD:
+		rc = synx_handle_async_wait(&k_ioctl,
+				session);
+		break;
+	case SYNX_DEREGISTER_PAYLOAD:
+		rc = synx_handle_cancel_async_wait(&k_ioctl,
+				session);
+		break;
+	case SYNX_SIGNAL:
+		rc = synx_handle_signal(&k_ioctl, session);
+		break;
+	case SYNX_MERGE:
+		rc = synx_handle_merge(&k_ioctl, session);
+		break;
+	case SYNX_WAIT:
+		rc = synx_handle_wait(&k_ioctl, session);
+		if (copy_to_user((void *)arg,
+			&k_ioctl,
+			sizeof(k_ioctl))) {
+			dprintk(SYNX_ERR, "invalid ioctl args\n");
+			rc = -EFAULT;
+		}
+		break;
+	case SYNX_BIND:
+		rc = synx_handle_bind(&k_ioctl, session);
+		break;
+	case SYNX_GETSTATUS:
+		rc = synx_handle_getstatus(&k_ioctl, session);
+		break;
+	case SYNX_IMPORT:
+		rc = synx_handle_import(&k_ioctl, session);
+		break;
+	case SYNX_IMPORT_ARR:
+		rc = synx_handle_import_arr(&k_ioctl, session);
+		break;
+	case SYNX_EXPORT:
+		rc = synx_handle_export(&k_ioctl, session);
+		break;
+	case SYNX_GETFENCE_FD:
+		rc = synx_handle_get_fence(&k_ioctl, session);
+		break;
+	default:
+		rc = -SYNX_INVALID;
+	}
+
+	dprintk(SYNX_VERB, "[sess :%llu] exit with status %d\n",
+		((struct synx_client *)session)->id, rc);
+
+	return rc;
+}
+
+static ssize_t synx_read(struct file *filep,
+	char __user *buf, size_t size, loff_t *f_pos)
+{
+	ssize_t rc = 0;
+	struct synx_client *client = NULL;
+	struct synx_client_cb *cb;
+	struct synx_session *session = filep->private_data;
+	struct synx_userpayload_info_v2 data;
+
+	if (size != sizeof(struct synx_userpayload_info_v2)) {
+		dprintk(SYNX_ERR, "invalid read size\n");
+		return -SYNX_INVALID;
+	}
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client))
+		return -SYNX_INVALID;
+
+	mutex_lock(&client->event_q_lock);
+	cb = list_first_entry_or_null(&client->event_q,
+			struct synx_client_cb, node);
+	if (IS_ERR_OR_NULL(cb)) {
+		mutex_unlock(&client->event_q_lock);
+		rc = 0;
+		goto fail;
+	}
+
+	if (cb->idx == 0 || cb->idx >= SYNX_MAX_OBJS) {
+		dprintk(SYNX_ERR, "invalid index\n");
+		mutex_unlock(&client->event_q_lock);
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	list_del_init(&cb->node);
+	mutex_unlock(&client->event_q_lock);
+
+	rc = size;
+	data.synx_obj = cb->kernel_cb.h_synx;
+	data.reserved = cb->kernel_cb.status;
+	data.payload[0] = (u64)cb->kernel_cb.data;
+	if (copy_to_user(buf,
+			&data,
+			sizeof(struct synx_userpayload_info_v2))) {
+		dprintk(SYNX_ERR, "couldn't copy user callback data\n");
+		rc = -EFAULT;
+	}
+
+	if (synx_util_clear_cb_entry(client, cb))
+		dprintk(SYNX_ERR,
+			"[sess :%llu] error clearing cb for handle %u\n",
+			client->id, data.synx_obj);
+fail:
+	synx_put_client(client);
+	return rc;
+}
+
+static unsigned int synx_poll(struct file *filep,
+	struct poll_table_struct *poll_table)
+{
+	int rc = 0;
+	struct synx_client *client;
+	struct synx_session *session = filep->private_data;
+
+	client = synx_get_client(session);
+	if (IS_ERR_OR_NULL(client)) {
+		dprintk(SYNX_ERR, "invalid session in poll\n");
+		return SYNX_SUCCESS;
+	}
+
+	poll_wait(filep, &client->event_wq, poll_table);
+	mutex_lock(&client->event_q_lock);
+	if (!list_empty(&client->event_q))
+		rc = POLLPRI;
+	mutex_unlock(&client->event_q_lock);
+
+	synx_put_client(client);
+	return rc;
+}
+
+struct synx_session *synx_initialize(
+	struct synx_initialization_params *params)
+{
+	struct synx_client *client;
+
+	if (IS_ERR_OR_NULL(params))
+		return ERR_PTR(-SYNX_INVALID);
+
+	client = vzalloc(sizeof(*client));
+	if (IS_ERR_OR_NULL(client))
+		return ERR_PTR(-SYNX_NOMEM);
+
+	if (params->name)
+		strlcpy(client->name, params->name, sizeof(client->name));
+
+	client->active = true;
+	client->dma_context = dma_fence_context_alloc(1);
+	client->id = atomic64_inc_return(&synx_counter);
+	kref_init(&client->refcount);
+	spin_lock_init(&client->handle_map_lock);
+	mutex_init(&client->event_q_lock);
+	INIT_LIST_HEAD(&client->event_q);
+	init_waitqueue_head(&client->event_wq);
+	/* zero idx not allowed */
+	set_bit(0, client->cb_bitmap);
+
+	spin_lock_bh(&synx_dev->native->metadata_map_lock);
+	hash_add(synx_dev->native->client_metadata_map,
+		&client->node, (u64)client);
+	spin_unlock_bh(&synx_dev->native->metadata_map_lock);
+
+	dprintk(SYNX_INFO, "[sess :%llu] session created %s\n",
+		client->id, params->name);
+
+	return (struct synx_session *)client;
+}
+EXPORT_SYMBOL(synx_initialize);
+
+int synx_uninitialize(struct synx_session *session)
+{
+	struct synx_client *client = NULL, *curr;
+
+	spin_lock_bh(&synx_dev->native->metadata_map_lock);
+	hash_for_each_possible(synx_dev->native->client_metadata_map,
+			curr, node, (u64)session) {
+		if (curr == (struct synx_client *)session) {
+			if (curr->active) {
+				curr->active = false;
+				client = curr;
+			}
+			break;
+		}
+	}
+	spin_unlock_bh(&synx_dev->native->metadata_map_lock);
+
+	/* release the reference obtained at synx init */
+	synx_put_client(client);
+	return SYNX_SUCCESS;
+}
+EXPORT_SYMBOL(synx_uninitialize);
+
+static int synx_open(struct inode *inode, struct file *filep)
+{
+	int rc = 0;
+	char name[SYNX_OBJ_NAME_LEN];
+	struct synx_initialization_params params = {0};
+
+	dprintk(SYNX_VERB, "Enter pid: %d\n", current->pid);
+
+	scnprintf(name, SYNX_OBJ_NAME_LEN, "umd-client-%d", current->pid);
+	params.name = name;
+	params.id = SYNX_CLIENT_NATIVE;
+
+	filep->private_data = synx_initialize(&params);
+	if (IS_ERR_OR_NULL(filep->private_data)) {
+		dprintk(SYNX_ERR, "session allocation failed for pid: %d\n",
+			current->pid);
+		rc = PTR_ERR(filep->private_data);
+	} else {
+		dprintk(SYNX_VERB, "allocated new session for pid: %d\n",
+			current->pid);
+	}
+
+	return rc;
+}
+
+static int synx_close(struct inode *inode, struct file *filep)
+{
+	struct synx_session *session = filep->private_data;
+
+	return synx_uninitialize(session);
+}
+
+static const struct file_operations synx_fops = {
+	.owner = THIS_MODULE,
+	.open  = synx_open,
+	.read  = synx_read,
+	.release = synx_close,
+	.poll  = synx_poll,
+	.unlocked_ioctl = synx_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = synx_ioctl,
+#endif
+};
+
+int synx_register_ops(
+	const struct synx_register_params *params)
+{
+	s32 rc = 0;
+	struct synx_registered_ops *client_ops;
+
+	if (!synx_dev || !params || !params->name ||
+		 !synx_util_is_valid_bind_type(params->type) ||
+		 !params->ops.register_callback ||
+		 !params->ops.deregister_callback ||
+		 !params->ops.signal) {
+		dprintk(SYNX_ERR, "invalid register params\n");
+		return -SYNX_INVALID;
+	}
+
+	mutex_lock(&synx_dev->vtbl_lock);
+	client_ops = &synx_dev->bind_vtbl[params->type];
+	if (!client_ops->valid) {
+		client_ops->valid = true;
+		memcpy(&client_ops->ops, &params->ops,
+			sizeof(client_ops->ops));
+		strlcpy(client_ops->name, params->name,
+			sizeof(client_ops->name));
+		client_ops->type = params->type;
+		dprintk(SYNX_INFO,
+			"registered bind ops type %u for %s\n",
+			params->type, params->name);
+	} else {
+		dprintk(SYNX_WARN,
+			"client already registered for type %u by %s\n",
+			client_ops->type, client_ops->name);
+		rc = -SYNX_ALREADY;
+	}
+	mutex_unlock(&synx_dev->vtbl_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(synx_register_ops);
+
+int synx_deregister_ops(
+	const struct synx_register_params *params)
+{
+	struct synx_registered_ops *client_ops;
+
+	if (IS_ERR_OR_NULL(params) || params->name ||
+		!synx_util_is_valid_bind_type(params->type)) {
+		dprintk(SYNX_ERR, "invalid params\n");
+		return -SYNX_INVALID;
+	}
+
+	mutex_lock(&synx_dev->vtbl_lock);
+	client_ops = &synx_dev->bind_vtbl[params->type];
+	memset(client_ops, 0, sizeof(*client_ops));
+	dprintk(SYNX_INFO, "deregistered bind ops for %s\n",
+		params->name);
+	mutex_unlock(&synx_dev->vtbl_lock);
+
+	return SYNX_SUCCESS;
+}
+EXPORT_SYMBOL(synx_deregister_ops);
+
+void synx_ipc_handler(struct work_struct *cb_dispatch)
+{
+	struct synx_signal_cb *signal_cb =
+		container_of(cb_dispatch, struct synx_signal_cb, cb_dispatch);
+	struct synx_map_entry *map_entry;
+
+	map_entry = synx_util_get_map_entry(signal_cb->handle);
+	if (IS_ERR_OR_NULL(map_entry)) {
+		dprintk(SYNX_WARN,
+			"no clients to notify for %u\n",
+			signal_cb->handle);
+		dprintk(SYNX_MEM, "signal cb destroyed %pK\n", signal_cb);
+		kfree(signal_cb);
+		return;
+	}
+
+	/* get reference on synx coredata for signal cb */
+	synx_util_get_object(map_entry->synx_obj);
+	signal_cb->synx_obj = map_entry->synx_obj;
+	synx_util_release_map_entry(map_entry);
+	synx_signal_handler(&signal_cb->cb_dispatch);
+}
+
+int synx_ipc_callback(u32 client_id,
+	s64 data, void *priv)
+{
+	struct synx_signal_cb *signal_cb;
+	u32 status = (u32)data;
+	u32 handle = (u32)(data >> 32);
+
+	signal_cb = kzalloc(sizeof(*signal_cb), GFP_ATOMIC);
+	if (IS_ERR_OR_NULL(signal_cb))
+		return -SYNX_NOMEM;
+
+	dprintk(SYNX_INFO,
+		"signal notification for %u received with status %u\n",
+		handle, status);
+
+	signal_cb->status = status;
+	signal_cb->handle = handle;
+	signal_cb->flag = SYNX_SIGNAL_FROM_IPC;
+
+	INIT_WORK(&signal_cb->cb_dispatch, synx_ipc_handler);
+	queue_work(synx_dev->wq_cb, &signal_cb->cb_dispatch);
+
+	return SYNX_SUCCESS;
+}
+EXPORT_SYMBOL(synx_ipc_callback);
+
+int synx_recover(enum synx_client_id id)
+{
+	u32 core_id;
+
+	core_id = synx_util_map_client_id_to_core(id);
+	if (core_id >= SYNX_CORE_MAX) {
+		dprintk(SYNX_ERR, "invalid client id %u\n", id);
+		return -SYNX_INVALID;
+	}
+
+	switch (core_id) {
+	case SYNX_CORE_EVA:
+	case SYNX_CORE_IRIS:
+		break;
+	default:
+		dprintk(SYNX_ERR, "recovery not supported on %u\n", id);
+		return -SYNX_NOSUPPORT;
+	}
+
+	return synx_global_recover(core_id);
+}
+EXPORT_SYMBOL(synx_recover);
+
+static int synx_local_mem_init(void)
+{
+	if (!synx_dev->native)
+		return -SYNX_INVALID;
+
+	hash_init(synx_dev->native->client_metadata_map);
+	hash_init(synx_dev->native->fence_map);
+	hash_init(synx_dev->native->global_map);
+	hash_init(synx_dev->native->local_map);
+	hash_init(synx_dev->native->csl_fence_map);
+
+	spin_lock_init(&synx_dev->native->metadata_map_lock);
+	spin_lock_init(&synx_dev->native->fence_map_lock);
+	spin_lock_init(&synx_dev->native->global_map_lock);
+	spin_lock_init(&synx_dev->native->local_map_lock);
+	spin_lock_init(&synx_dev->native->csl_map_lock);
+
+	/* zero idx not allowed */
+	set_bit(0, synx_dev->native->bitmap);
+	return 0;
+}
+
+static int synx_cdsp_restart_notifier(struct notifier_block *nb,
+	unsigned long code, void *data)
+{
+	struct synx_cdsp_ssr *cdsp_ssr = &synx_dev->cdsp_ssr;
+
+	if (&cdsp_ssr->nb != nb) {
+		dprintk(SYNX_ERR, "Invalid SSR Notifier block\n");
+		return NOTIFY_BAD;
+	}
+
+	switch (code) {
+	case QCOM_SSR_BEFORE_SHUTDOWN:
+		break;
+	case QCOM_SSR_AFTER_SHUTDOWN:
+		if (cdsp_ssr->ssrcnt != 0) {
+			dprintk(SYNX_INFO, "Cleaning up global memory\n");
+			synx_global_recover(SYNX_CORE_NSP);
+		}
+		break;
+	case QCOM_SSR_BEFORE_POWERUP:
+		break;
+	case QCOM_SSR_AFTER_POWERUP:
+		dprintk(SYNX_DBG, "CDSP is up");
+		if (cdsp_ssr->ssrcnt == 0)
+			cdsp_ssr->ssrcnt++;
+		break;
+	default:
+		dprintk(SYNX_ERR, "Unknown status code for CDSP SSR\n");
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int __init synx_init(void)
+{
+	int rc;
+
+	dprintk(SYNX_INFO, "device initialization start\n");
+
+	synx_dev = kzalloc(sizeof(*synx_dev), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(synx_dev))
+		return -SYNX_NOMEM;
+
+	rc = alloc_chrdev_region(&synx_dev->dev, 0, 1, SYNX_DEVICE_NAME);
+	if (rc < 0) {
+		dprintk(SYNX_ERR, "region allocation failed\n");
+		goto alloc_fail;
+	}
+
+	cdev_init(&synx_dev->cdev, &synx_fops);
+	synx_dev->cdev.owner = THIS_MODULE;
+	rc = cdev_add(&synx_dev->cdev, synx_dev->dev, 1);
+	if (rc < 0) {
+		dprintk(SYNX_ERR, "device registation failed\n");
+		goto reg_fail;
+	}
+
+	synx_dev->class = class_create(THIS_MODULE, SYNX_DEVICE_NAME);
+	device_create(synx_dev->class, NULL, synx_dev->dev,
+		NULL, SYNX_DEVICE_NAME);
+
+	synx_dev->wq_cb = alloc_workqueue(SYNX_WQ_CB_NAME,
+		WQ_HIGHPRI | WQ_UNBOUND, SYNX_WQ_CB_THREADS);
+	synx_dev->wq_cleanup = alloc_workqueue(SYNX_WQ_CLEANUP_NAME,
+		WQ_HIGHPRI | WQ_UNBOUND, SYNX_WQ_CLEANUP_THREADS);
+	if (!synx_dev->wq_cb || !synx_dev->wq_cleanup) {
+		dprintk(SYNX_ERR,
+			"high priority work queue creation failed\n");
+		rc = -SYNX_INVALID;
+		goto fail;
+	}
+
+	synx_dev->native = vzalloc(sizeof(*synx_dev->native));
+	if (IS_ERR_OR_NULL(synx_dev->native))
+		goto fail;
+
+	mutex_init(&synx_dev->vtbl_lock);
+	mutex_init(&synx_dev->error_lock);
+	INIT_LIST_HEAD(&synx_dev->error_list);
+	synx_dev->debugfs_root = synx_init_debugfs_dir(synx_dev);
+
+	rc = synx_global_mem_init();
+	if (rc) {
+		dprintk(SYNX_ERR, "shared mem init failed, err=%d\n", rc);
+		goto err;
+	}
+
+	synx_dev->cdsp_ssr.ssrcnt = 0;
+	synx_dev->cdsp_ssr.nb.notifier_call = synx_cdsp_restart_notifier;
+	synx_dev->cdsp_ssr.handle =
+		qcom_register_ssr_notifier("cdsp", &synx_dev->cdsp_ssr.nb);
+	if (synx_dev->cdsp_ssr.handle == NULL) {
+		dprintk(SYNX_ERR, "SSR registration failed\n");
+		goto err;
+	}
+
+	ipclite_register_client(synx_ipc_callback, NULL);
+	synx_local_mem_init();
+
+	dprintk(SYNX_INFO, "device initialization success\n");
+
+	return 0;
+
+err:
+	vfree(synx_dev->native);
+fail:
+	device_destroy(synx_dev->class, synx_dev->dev);
+	class_destroy(synx_dev->class);
+reg_fail:
+	unregister_chrdev_region(synx_dev->dev, 1);
+alloc_fail:
+	kfree(synx_dev);
+	synx_dev = NULL;
+	return rc;
+}
+
+static void __exit synx_exit(void)
+{
+	struct error_node *err_node, *err_node_tmp;
+
+	flush_workqueue(synx_dev->wq_cb);
+	flush_workqueue(synx_dev->wq_cleanup);
+	device_destroy(synx_dev->class, synx_dev->dev);
+	class_destroy(synx_dev->class);
+	cdev_del(&synx_dev->cdev);
+	unregister_chrdev_region(synx_dev->dev, 1);
+	synx_remove_debugfs_dir(synx_dev);
+	/* release uncleared error nodes */
+	list_for_each_entry_safe(
+			err_node, err_node_tmp,
+			&synx_dev->error_list,
+			node) {
+		list_del(&err_node->node);
+		kfree(err_node);
+	}
+	mutex_destroy(&synx_dev->vtbl_lock);
+	mutex_destroy(&synx_dev->error_lock);
+	vfree(synx_dev->native);
+	kfree(synx_dev);
+}
+
+module_init(synx_init);
+module_exit(synx_exit);
+
+MODULE_DESCRIPTION("Global Synx Driver");
+MODULE_LICENSE("GPL v2");

+ 542 - 0
msm/synx/synx_api.h

@@ -0,0 +1,542 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SYNX_API_H__
+#define __SYNX_API_H__
+
+#include <linux/list.h>
+#include <synx_header.h>
+
+#include "synx_err.h"
+
+/**
+ * enum synx_create_flags - Flags passed during synx_create call
+ *
+ * SYNX_CREATE_LOCAL_FENCE  : Instructs the framework to create local synx object
+ * SYNX_CREATE_GLOBAL_FENCE : Instructs the framework to create global synx object
+ * SYNX_CREATE_DMA_FENCE    : Create a synx object by wrapping the provided dma fence.
+ *                            Need to pass the dma_fence ptr through fence variable
+ *                            if this flag is set.
+ * SYNX_CREATE_CSL_FENCE    : Create a synx object with provided csl fence.
+ *                            Establishes interop with the csl fence through
+ *                            bind operations.
+ */
+enum synx_create_flags {
+	SYNX_CREATE_LOCAL_FENCE  = 0x01,
+	SYNX_CREATE_GLOBAL_FENCE = 0x02,
+	SYNX_CREATE_DMA_FENCE    = 0x04,
+	SYNX_CREATE_CSL_FENCE    = 0x08,
+	SYNX_CREATE_MAX_FLAGS    = 0x10,
+};
+
+/**
+ * enum synx_init_flags - Session initialization flag
+ */
+enum synx_init_flags {
+	SYNX_INIT_MAX = 0x01,
+};
+
+/**
+ * enum synx_import_flags - Import flags
+ *
+ * SYNX_IMPORT_LOCAL_FENCE  : Instructs the framework to create local synx object
+ * SYNX_IMPORT_GLOBAL_FENCE : Instructs the framework to create global synx object
+ * SYNX_IMPORT_SYNX_FENCE   : Import native Synx handle for synchronization
+ *                            Need to pass the Synx handle ptr through fence variable
+ *                            if this flag is set.
+ * SYNX_IMPORT_DMA_FENCE    : Import dma fence.and crate Synx handle for interop
+ *                            Need to pass the dma_fence ptr through fence variable
+ *                            if this flag is set.
+ * SYNX_IMPORT_EX_RELEASE   : Flag to inform relaxed invocation where release call
+ *                            need not be called by client on this handle after import.
+ */
+enum synx_import_flags {
+	SYNX_IMPORT_LOCAL_FENCE  = 0x01,
+	SYNX_IMPORT_GLOBAL_FENCE = 0x02,
+	SYNX_IMPORT_SYNX_FENCE   = 0x04,
+	SYNX_IMPORT_DMA_FENCE    = 0x08,
+	SYNX_IMPORT_EX_RELEASE   = 0x10,
+};
+
+/**
+ * enum synx_signal_status - Signal status
+ *
+ * SYNX_STATE_SIGNALED_SUCCESS : Signal success
+ * SYNX_STATE_SIGNALED_CANCEL  : Signal cancellation
+ * SYNX_STATE_SIGNALED_MAX     : Clients can send custom notification
+ *                               beyond the max value (only positive)
+ */
+enum synx_signal_status {
+	SYNX_STATE_SIGNALED_SUCCESS = 2,
+	SYNX_STATE_SIGNALED_CANCEL  = 4,
+	SYNX_STATE_SIGNALED_MAX     = 64,
+};
+
+/**
+ * synx_callback - Callback invoked by external fence
+ *
+ * External fence dispatch the registered callback to notify
+ * signal to synx framework.
+ */
+typedef void (*synx_callback)(s32 sync_obj, int status, void *data);
+
+/**
+ * synx_user_callback - Callback function registered by clients
+ *
+ * User callback registered for non-blocking wait. Dispatched when
+ * synx object is signaled.
+ */
+typedef void (*synx_user_callback_t)(u32 h_synx, int status, void *data);
+
+/**
+ * struct bind_operations - Function pointers that need to be defined
+ *    to achieve bind functionality for external fence with synx obj
+ *
+ * @register_callback   : Function to register with external sync object
+ * @deregister_callback : Function to deregister with external sync object
+ * @enable_signaling    : Function to enable the signaling on the external
+ *                        sync object (optional)
+ * @signal              : Function to signal the external sync object
+ */
+struct bind_operations {
+	int (*register_callback)(synx_callback cb_func,
+		void *userdata, s32 sync_obj);
+	int (*deregister_callback)(synx_callback cb_func,
+		void *userdata, s32 sync_obj);
+	int (*enable_signaling)(s32 sync_obj);
+	int (*signal)(s32 sync_obj, u32 status);
+};
+
+/**
+ * synx_bind_client_type : External fence supported for bind
+ *
+ * SYNX_TYPE_CSL : Camera CSL fence
+ */
+enum synx_bind_client_type {
+	SYNX_TYPE_CSL = 0,
+	SYNX_MAX_BIND_TYPES,
+};
+
+/**
+ * struct synx_register_params - External registration parameters
+ *
+ * @ops  : Bind operations struct
+ * @name : External client name
+ *         Only first 64 bytes are accepted, rest will be ignored
+ * @type : Synx bind client type
+ */
+struct synx_register_params {
+	struct bind_operations ops;
+	char *name;
+	enum synx_bind_client_type type;
+};
+
+/**
+ * struct synx_queue_desc - Memory descriptor of the queue allocated by
+ *                           the fence driver for each client during
+ *                           register.
+ *
+ * @vaddr    : CPU virtual address of the queue.
+ * @dev_addr : Physical address of the memory object.
+ * @size     : Size of the memory.
+ * @mem_data : Internal pointer with the attributes of the allocation.
+ */
+struct synx_queue_desc {
+	void *vaddr;
+	u64 dev_addr;
+	u64 size;
+	void *mem_data;
+};
+
+/**
+ * enum synx_client_id : Unique identifier of the supported clients
+ *
+ * @SYNX_CLIENT_NATIVE   : Native Client
+ * @SYNX_CLIENT_GFX_CTX0 : GFX Client 0
+ * @SYNX_CLIENT_DPU_CTL0 : DPU Client 0
+ * @SYNX_CLIENT_DPU_CTL1 : DPU Client 1
+ * @SYNX_CLIENT_DPU_CTL2 : DPU Client 2
+ * @SYNX_CLIENT_DPU_CTL3 : DPU Client 3
+ * @SYNX_CLIENT_DPU_CTL4 : DPU Client 4
+ * @SYNX_CLIENT_DPU_CTL5 : DPU Client 5
+ * @SYNX_CLIENT_EVA_CTX0 : EVA Client 0
+ * @SYNX_CLIENT_VID_CTX0 : Video Client 0
+ * @SYNX_CLIENT_NSP_CTX0 : NSP Client 0
+ * @SYNX_CLIENT_IFE_CTX0 : IFE Client 0
+ */
+enum synx_client_id {
+	SYNX_CLIENT_NATIVE = 0,
+	SYNX_CLIENT_GFX_CTX0,
+	SYNX_CLIENT_DPU_CTL0,
+	SYNX_CLIENT_DPU_CTL1,
+	SYNX_CLIENT_DPU_CTL2,
+	SYNX_CLIENT_DPU_CTL3,
+	SYNX_CLIENT_DPU_CTL4,
+	SYNX_CLIENT_DPU_CTL5,
+	SYNX_CLIENT_EVA_CTX0,
+	SYNX_CLIENT_VID_CTX0,
+	SYNX_CLIENT_NSP_CTX0,
+	SYNX_CLIENT_IFE_CTX0,
+	SYNX_CLIENT_MAX,
+};
+
+/**
+ * struct synx_session - Client session identifier
+ *
+ * @type   : Session type
+ * @client : Pointer to client session
+ */
+struct synx_session {
+	u32 type;
+	void *client;
+};
+
+/**
+ * struct synx_initialization_params - Session params
+ *
+ * @name  : Client session name
+ *          Only first 64 bytes are accepted, rest will be ignored
+ * @ptr   : Pointer to queue descriptor (filled by function)
+ * @id    : Client identifier
+ * @flags : Synx initialization flags
+ */
+struct synx_initialization_params {
+	const char *name;
+	struct synx_queue_desc *ptr;
+	enum synx_client_id id;
+	enum synx_init_flags flags;
+};
+
+/**
+ * struct synx_create_params - Synx creation parameters
+ *
+ * @name     : Optional parameter associating a name with the synx
+ *             object for debug purposes
+ *             Only first 64 bytes are accepted,
+ *             rest will be ignored
+ * @h_synx   : Pointer to synx object handle (filled by function)
+ * @fence    : Pointer to external fence
+ * @flags    : Synx flags for customization (mentioned below)
+ *
+ * SYNX_CREATE_GLOBAL_FENCE - Hints the framework to create global synx object
+ *     If flag not set, hints framework to create a local synx object.
+ * SYNX_CREATE_DMA_FENCE - Wrap synx object with dma fence.
+ *     Need to pass the dma_fence ptr through 'fence' variable if this flag is set.
+ * SYNX_CREATE_BIND_FENCE - Create a synx object with provided external fence.
+ *     Establishes interop with supported external fence through bind operations.
+ *     Need to fill synx_external_desc structure if this flag is set.
+ */
+
+struct synx_create_params {
+	const char *name;
+	u32 *h_synx;
+	void *fence;
+	enum synx_create_flags flags;
+};
+
+/**
+ * enum synx_merge_flags - Handle merge flags
+ *
+ * SYNX_MERGE_LOCAL_FENCE   : Create local composite object.
+ * SYNX_MERGE_GLOBAL_FENCE  : Create global composite object.
+ * SYNX_MERGE_NOTIFY_ON_ALL : Notify on signaling of ALL objects
+ * SYNX_MERGE_NOTIFY_ON_ANY : Notify on signaling of ANY object
+ */
+enum synx_merge_flags {
+	SYNX_MERGE_LOCAL_FENCE   = 0x01,
+	SYNX_MERGE_GLOBAL_FENCE  = 0x02,
+	SYNX_MERGE_NOTIFY_ON_ALL = 0x04,
+	SYNX_MERGE_NOTIFY_ON_ANY = 0x08,
+};
+
+/*
+ * struct synx_merge_params - Synx merge parameters
+ *
+ * @h_synxs      : Pointer to a array of synx handles to be merged
+ * @flags        : Merge flags
+ * @num_objs     : Number of synx objs in the block
+ * @h_merged_obj : Merged synx object handle (filled by function)
+ */
+struct synx_merge_params {
+	u32 *h_synxs;
+	enum synx_merge_flags flags;
+	u32 num_objs;
+	u32 *h_merged_obj;
+};
+
+/**
+ * enum synx_import_type - Import type
+ *
+ * SYNX_IMPORT_INDV_PARAMS : Import filled with synx_import_indv_params struct
+ * SYNX_IMPORT_ARR_PARAMS  : Import filled with synx_import_arr_params struct
+ */
+enum synx_import_type {
+	SYNX_IMPORT_INDV_PARAMS = 0x01,
+	SYNX_IMPORT_ARR_PARAMS  = 0x02,
+};
+
+/**
+ * struct synx_import_indv_params - Synx import indv parameters
+ *
+ * @new_h_synxs : Pointer to new synx object
+ *                (filled by the function)
+ *                The new handle/s should be used by importing
+ *                process for all synx api operations and
+ *                for sharing with FW cores.
+ * @flags       : Synx flags
+ * @fence       : Pointer to external fence
+ */
+struct synx_import_indv_params {
+	u32 *new_h_synx;
+	enum synx_import_flags flags;
+	void *fence;
+};
+
+/**
+ * struct synx_import_arr_params - Synx import arr parameters
+ *
+ * @list        : Array of synx_import_indv_params pointers
+ * @num_fences  : No of fences passed to framework
+ */
+struct synx_import_arr_params {
+	struct synx_import_indv_params *list;
+	u32 num_fences;
+};
+
+/**
+ * struct synx_import_params - Synx import parameters
+ *
+ * @type : Import params type filled by client
+ * @indv : Params to import an individual handle/fence
+ * @arr  : Params to import an array of handles/fences
+ */
+struct synx_import_params {
+	enum synx_import_type type;
+	union {
+		struct synx_import_indv_params indv;
+		struct synx_import_arr_params  arr;
+	};
+};
+
+/**
+ * struct synx_callback_params - Synx callback parameters
+ *
+ * @h_synx         : Synx object handle
+ * @cb_func        : Pointer to callback func to be invoked
+ * @userdata       : Opaque pointer passed back with callback
+ * @cancel_cb_func : Pointer to callback to ack cancellation (optional)
+ */
+struct synx_callback_params {
+	u32 h_synx;
+	synx_user_callback_t cb_func;
+	void *userdata;
+	synx_user_callback_t cancel_cb_func;
+};
+
+/* Kernel APIs */
+
+/* synx_register_ops - Register operations for external synchronization
+ *
+ * Register with synx for enabling external synchronization through bind
+ *
+ * @param params : Pointer to register params
+ *
+ * @return Status of operation. SYNX_SUCCESS in case of success.
+ * -SYNX_INVALID will be returned if params are invalid.
+ * -SYNX_NOMEM will be returned if bind ops cannot be registered due to
+ * insufficient memory.
+ * -SYNX_ALREADY will be returned if type already in use.
+ */
+int synx_register_ops(const struct synx_register_params *params);
+
+/**
+ * synx_deregister_ops - De-register external synchronization operations
+ *
+ * @param params : Pointer to register params
+ *
+ * @return Status of operation. SYNX_SUCCESS in case of success.
+ * -SYNX_INVALID will be returned if record not found.
+ */
+int synx_deregister_ops(const struct synx_register_params *params);
+
+/**
+ * synx_initialize - Initializes a new client session
+ *
+ * @param params : Pointer to session init params
+ *
+ * @return Client session pointer on success. NULL or error in case of failure.
+ */
+struct synx_session *synx_initialize(struct synx_initialization_params *params);
+
+/**
+ * synx_uninitialize - Destroys the client session
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ *
+ * @return Status of operation. SYNX_SUCCESS in case of success.
+ */
+int synx_uninitialize(struct synx_session *session);
+
+/**
+ * synx_create - Creates a synx object
+ *
+ *  Creates a new synx obj and returns the handle to client.
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param params  : Pointer to create params
+ *
+ * @return Status of operation. SYNX_SUCCESS in case of success.
+ * -SYNX_INVALID will be returned if params were invalid.
+ * -SYNX_NOMEM will be returned if the kernel can't allocate space for
+ * synx object.
+ */
+int synx_create(struct synx_session *session, struct synx_create_params *params);
+
+/**
+ * synx_async_wait - Registers a callback with a synx object
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param params  : Callback params
+ *
+ * @return Status of operation. SYNX_SUCCESS in case of success.
+ * -SYNX_INVALID will be returned if userdata is invalid.
+ * -SYNX_NOMEM will be returned if cb_func is invalid.
+ */
+int synx_async_wait(struct synx_session *session, struct synx_callback_params *params);
+
+/**
+ * synx_cancel_async_wait - De-registers a callback with a synx object
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param params  : Callback params
+ *
+ * @return Status of operation. SYNX_SUCCESS in case of success.
+ * -SYNX_ALREADY if object has already been signaled, and cannot be cancelled.
+ * -SYNX_INVALID will be returned if userdata is invalid.
+ * -SYNX_NOMEM will be returned if cb_func is invalid.
+ */
+int synx_cancel_async_wait(struct synx_session *session,
+	struct synx_callback_params *params);
+
+/**
+ * synx_signal - Signals a synx object with the status argument.
+ *
+ * This function will signal the synx object referenced by h_synx
+ * and invoke any external binding synx objs.
+ * The status parameter will indicate whether the entity
+ * performing the signaling wants to convey an error case or a success case.
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param h_synx  : Synx object handle
+ * @param status  : Status of signaling.
+ *                  Clients can send custom signaling status
+ *                  beyond SYNX_STATE_SIGNALED_MAX.
+ *
+ * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
+ */
+int synx_signal(struct synx_session *session, u32 h_synx,
+	enum synx_signal_status status);
+
+/**
+ * synx_merge - Merges multiple synx objects
+ *
+ * This function will merge multiple synx objects into a synx group.
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param params  : Merge params
+ *
+ * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
+ */
+int synx_merge(struct synx_session *session, struct synx_merge_params *params);
+
+/**
+ * synx_wait - Waits for a synx object synchronously
+ *
+ * Does a wait on the synx object identified by h_synx for a maximum
+ * of timeout_ms milliseconds. Must not be called from interrupt context as
+ * this API can sleep.
+ * Will return status if handle was signaled. Status can be from pre-defined
+ * states (enum synx_signal_status) or custom status sent by producer.
+ *
+ * @param session    : Session ptr (returned from synx_initialize)
+ * @param h_synx     : Synx object handle to be waited upon
+ * @param timeout_ms : Timeout in ms
+ *
+ * @return Signal status. -SYNX_INVAL if synx object is in bad state or arguments
+ * are invalid, -SYNX_TIMEOUT if wait times out.
+ */
+int synx_wait(struct synx_session *session, u32 h_synx, u64 timeout_ms);
+
+/**
+ * synx_get_status - Returns the status of the synx object
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param h_synx  : Synx object handle
+ *
+ * @return Status of the synx object.
+ */
+int synx_get_status(struct synx_session *session, u32 h_synx);
+
+/**
+ * synx_import - Imports (looks up) synx object from given handle/fence
+ *
+ * Import subscribes the client session for notification on signal
+ * of handles/fences.
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param params  : Pointer to import params
+ *
+ * @return SYNX_SUCCESS upon success, -SYNX_INVAL if synx object is bad state
+ */
+int synx_import(struct synx_session *session, struct synx_import_params *params);
+
+/**
+ * synx_get_fence - Get the native fence backing the synx object
+ *
+ * Function returns the native fence. Clients need to
+ * acquire & release additional reference explicitly.
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param h_synx  : Synx object handle
+ *
+ * @return Fence pointer upon success, NULL or error in case of failure.
+ */
+void *synx_get_fence(struct synx_session *session, u32 h_synx);
+
+/**
+ * synx_release - Release the synx object
+ *
+ * Decrements refcount of a synx object by 1, and destroys it
+ * if becomes 0.
+ *
+ * @param session : Session ptr (returned from synx_initialize)
+ * @param h_synx  : Synx object handle to be destroyed
+ *
+ * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
+ */
+int synx_release(struct synx_session *session, u32 h_synx);
+
+/**
+ * synx_recover - Recover any possible handle leaks
+ *
+ * Function should be called on HW hang/reset to
+ * recover the Synx handles shared. This cleans up
+ * Synx handles held by the rest HW, and avoids
+ * potential resource leaks.
+ *
+ * Function does not destroy the session, but only
+ * recover synx handles belonging to the session.
+ * Synx session would still be active and clients
+ * need to destroy the session explicitly through
+ * synx_uninitialize API.
+ *
+ * @param id : Client ID of core to recover
+ *
+ * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
+ */
+int synx_recover(enum synx_client_id id);
+
+#endif /* __SYNX_API_H__ */

+ 145 - 0
msm/synx/synx_debugfs.c

@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "synx_api.h"
+#include "synx_debugfs.h"
+#include "synx_util.h"
+
+#define MAX_DBG_BUF_SIZE (36 * SYNX_MAX_OBJS)
+
+struct dentry *my_direc;
+
+int synx_columns = NAME_COLUMN | ID_COLUMN |
+	STATE_COLUMN | GLOBAL_COLUMN;
+EXPORT_SYMBOL(synx_columns);
+
+int synx_debug = SYNX_ERR | SYNX_WARN |
+	SYNX_INFO | SYNX_DBG;
+EXPORT_SYMBOL(synx_debug);
+
+void populate_bound_rows(
+	struct synx_coredata *row, char *cur, char *end)
+{
+	int j;
+
+	for (j = 0; j < row->num_bound_synxs; j++)
+		cur += scnprintf(cur, end - cur,
+			"\n\tID: %d",
+			row->bound_synxs[j].external_desc.id);
+}
+static ssize_t synx_table_read(struct file *file,
+		char *buf,
+		size_t count,
+		loff_t *ppos)
+{
+	struct synx_device *dev = file->private_data;
+	struct error_node *err_node, *err_node_tmp;
+	char *dbuf, *cur, *end;
+	int rc = SYNX_SUCCESS;
+	ssize_t len = 0;
+
+	dbuf = kzalloc(MAX_DBG_BUF_SIZE, GFP_KERNEL);
+	if (!dbuf)
+		return -ENOMEM;
+
+	/* dump client details */
+	cur = dbuf;
+	end = cur + MAX_DBG_BUF_SIZE;
+	if (synx_columns & NAME_COLUMN)
+		cur += scnprintf(cur, end - cur, "|   Name   |");
+	if (synx_columns & ID_COLUMN)
+		cur += scnprintf(cur, end - cur, "|    ID    |");
+	if (synx_columns & STATE_COLUMN)
+		cur += scnprintf(cur, end - cur, "|   Status   |");
+	if (synx_columns & FENCE_COLUMN)
+		cur += scnprintf(cur, end - cur, "|   Fence   |");
+	if (synx_columns & COREDATA_COLUMN)
+		cur += scnprintf(cur, end - cur, "|   Coredata |");
+	if (synx_columns & GLOBAL_COLUMN)
+		cur += scnprintf(cur, end - cur, "|   Coredata |");
+	if (synx_columns & BOUND_COLUMN)
+		cur += scnprintf(cur, end - cur, "|   Bound   |");
+	cur += scnprintf(cur, end - cur, "\n");
+
+	rc = synx_global_dump_shared_memory();
+	if (rc) {
+		cur += scnprintf(cur, end - cur,
+			"Err %d: Failed to dump global shared mem\n", rc);
+	}
+
+	if (synx_columns & ERROR_CODES && !list_empty(
+		&dev->error_list)) {
+		cur += scnprintf(
+			cur, end - cur, "\nError(s): ");
+
+		mutex_lock(&dev->error_lock);
+		list_for_each_entry_safe(
+			 err_node, err_node_tmp,
+			 &dev->error_list, node) {
+			cur += scnprintf(cur, end - cur,
+			"\n\tTime: %s - ID: %d - Code: %d",
+			err_node->timestamp,
+			err_node->h_synx,
+			err_node->error_code);
+			list_del(&err_node->node);
+			kfree(err_node);
+		}
+		mutex_unlock(&dev->error_lock);
+	}
+
+	len = simple_read_from_buffer(buf, count, ppos,
+		dbuf, cur - dbuf);
+	kfree(dbuf);
+	return len;
+}
+
+static ssize_t synx_table_write(struct file *file,
+		const char __user *buf,
+		size_t count,
+		loff_t *ppos)
+{
+	return 0;
+}
+
+static const struct file_operations synx_table_fops = {
+	.owner = THIS_MODULE,
+	.read = synx_table_read,
+	.write = synx_table_write,
+	.open = simple_open,
+};
+
+struct dentry *synx_init_debugfs_dir(struct synx_device *dev)
+{
+	struct dentry *dir = NULL;
+
+	dir = debugfs_create_dir("synx_debug", NULL);
+	if (!dir) {
+		dprintk(SYNX_ERR, "Failed to create debugfs for synx\n");
+		return NULL;
+	}
+
+	debugfs_create_u32("debug_level", 0644, dir, &synx_debug);
+	debugfs_create_u32("column_level", 0644, dir, &synx_columns);
+
+	if (!debugfs_create_file("synx_table",
+		0644, dir, dev, &synx_table_fops)) {
+		dprintk(SYNX_ERR, "Failed to create debugfs file for synx\n");
+		return NULL;
+	}
+
+	return dir;
+}
+
+void synx_remove_debugfs_dir(struct synx_device *dev)
+{
+	debugfs_remove_recursive(dev->debugfs_root);
+}

+ 94 - 0
msm/synx/synx_debugfs.h

@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SYNX_DEBUGFS_H__
+#define __SYNX_DEBUGFS_H__
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+
+#include "synx_private.h"
+
+enum synx_debug_level {
+	SYNX_ERR  = 0x0001,
+	SYNX_WARN = 0x0002,
+	SYNX_INFO = 0x0004,
+	SYNX_DBG  = 0x0008,
+	SYNX_VERB = 0x0010,
+	SYNX_IPCL = 0x0020,
+	SYNX_GSM  = 0x0040,
+	SYNX_MEM  = 0x0080,
+	SYNX_ALL  = SYNX_ERR | SYNX_WARN | SYNX_INFO |
+				SYNX_DBG | SYNX_IPCL | SYNX_GSM  | SYNX_MEM,
+};
+
+enum synx_columns_level {
+	NAME_COLUMN     = 0x0001,
+	ID_COLUMN       = 0x0002,
+	BOUND_COLUMN    = 0x0004,
+	STATE_COLUMN    = 0x0008,
+	FENCE_COLUMN    = 0x0010,
+	COREDATA_COLUMN = 0x0020,
+	GLOBAL_COLUMN   = 0x0040,
+	ERROR_CODES     = 0x8000,
+};
+
+#ifndef SYNX_DBG_LABEL
+#define SYNX_DBG_LABEL "synx"
+#endif
+
+#define SYNX_DBG_TAG SYNX_DBG_LABEL ": %4s: "
+
+extern int synx_debug;
+
+static inline char *synx_debug_str(int level)
+{
+	switch (level) {
+	case SYNX_ERR:
+		return "err";
+	case SYNX_WARN:
+		return "warn";
+	case SYNX_INFO:
+		return "info";
+	case SYNX_DBG:
+		return "dbg";
+	case SYNX_VERB:
+		return "verb";
+	case SYNX_IPCL:
+		return "ipcl";
+	case SYNX_GSM:
+		return "gmem";
+	case SYNX_MEM:
+		return "mem";
+	default:
+		return "???";
+	}
+}
+
+#define dprintk(__level, __fmt, arg...)                 \
+	do {                                                \
+		if (synx_debug & __level) {                     \
+			pr_info(SYNX_DBG_TAG "%s: %d: "  __fmt,     \
+				synx_debug_str(__level), __func__,      \
+				__LINE__, ## arg);                      \
+		}                                               \
+	} while (0)
+
+/**
+ * synx_init_debugfs_dir - Initializes debugfs
+ *
+ * @param dev : Pointer to synx device structure
+ */
+struct dentry *synx_init_debugfs_dir(struct synx_device *dev);
+
+/**
+ * synx_remove_debugfs_dir - Removes debugfs
+ *
+ * @param dev : Pointer to synx device structure
+ */
+void synx_remove_debugfs_dir(struct synx_device *dev);
+
+#endif /* __SYNX_DEBUGFS_H__ */

+ 27 - 0
msm/synx/synx_err.h

@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SYNX_ERR_H__
+#define __SYNX_ERR_H__
+
+#include <linux/err.h>
+
+/**
+ * Error codes returned from framework
+ *
+ * Return codes are mapped to platform specific
+ * return values.
+ */
+#define SYNX_SUCCESS   0
+#define SYNX_NOMEM     ENOMEM
+#define SYNX_NOSUPPORT EOPNOTSUPP
+#define SYNX_NOPERM    EPERM
+#define SYNX_TIMEOUT   ETIMEDOUT
+#define SYNX_ALREADY   EALREADY
+#define SYNX_NOENT     ENOENT
+#define SYNX_INVALID   EINVAL
+#define SYNX_BUSY      EBUSY
+
+#endif /* __SYNX_ERR_H__ */

+ 819 - 0
msm/synx/synx_global.c

@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/hwspinlock.h>
+#include <linux/string.h>
+
+#include "synx_debugfs.h"
+#include "synx_global.h"
+
+static struct synx_shared_mem synx_gmem;
+static struct hwspinlock *synx_hwlock;
+
+static u32 synx_gmem_lock_owner(u32 idx)
+{
+	/*
+	 * subscribers field of global table index 0 is used to
+	 * maintain synx gmem lock owner data.
+	 * core updates the field after acquiring the lock and
+	 * before releasing the lock appropriately.
+	 */
+	return synx_gmem.table[0].subscribers;
+}
+
+static void synx_gmem_lock_owner_set(u32 idx)
+{
+	synx_gmem.table[0].subscribers = SYNX_CORE_APSS;
+}
+
+static void synx_gmem_lock_owner_clear(u32 idx)
+{
+	if (synx_gmem.table[0].subscribers != SYNX_CORE_APSS)
+		dprintk(SYNX_WARN, "reset lock owned by core %u\n",
+			synx_gmem.table[0].subscribers);
+
+	synx_gmem.table[0].subscribers = SYNX_CORE_MAX;
+}
+
+static int synx_gmem_lock(u32 idx, unsigned long *flags)
+{
+	int rc;
+
+	if (!synx_hwlock)
+		return -SYNX_INVALID;
+
+	rc = hwspin_lock_timeout_irqsave(
+		synx_hwlock, SYNX_HWSPIN_TIMEOUT, flags);
+	if (!rc)
+		synx_gmem_lock_owner_set(idx);
+
+	return rc;
+}
+
+static void synx_gmem_unlock(u32 idx, unsigned long *flags)
+{
+	synx_gmem_lock_owner_clear(idx);
+	hwspin_unlock_irqrestore(synx_hwlock, flags);
+}
+
+static void synx_global_print_data(
+	struct synx_global_coredata *synx_g_obj,
+	const char *func)
+{
+	int i = 0;
+
+	dprintk(SYNX_VERB, "%s: status %u, handle %u, refcount %u",
+		func, synx_g_obj->status,
+		synx_g_obj->handle, synx_g_obj->refcount);
+
+	dprintk(SYNX_VERB, "%s: subscribers %u, waiters %u, pending %u",
+		func, synx_g_obj->subscribers, synx_g_obj->waiters,
+		synx_g_obj->num_child);
+
+	for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++)
+		if (synx_g_obj->parents[i])
+			dprintk(SYNX_VERB, "%s: parents %u:%u",
+				func, i, synx_g_obj->parents[i]);
+}
+
+int synx_global_dump_shared_memory(void)
+{
+	int rc = SYNX_SUCCESS, idx;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return -SYNX_INVALID;
+
+	/* Print bitmap memory*/
+	for (idx = 0; idx < SHRD_MEM_DUMP_NUM_BMAP_WORDS; idx++) {
+		rc = synx_gmem_lock(idx, &flags);
+
+		if (rc)
+			return rc;
+
+		dprintk(SYNX_VERB, "%s: idx %d, bitmap value %d",
+		__func__, idx, synx_gmem.bitmap[idx]);
+
+		synx_gmem_unlock(idx, &flags);
+	}
+
+	/* Print table memory*/
+	for (idx = 0;
+		idx < SHRD_MEM_DUMP_NUM_BMAP_WORDS * sizeof(u32) * NUM_CHAR_BIT;
+		idx++) {
+		rc = synx_gmem_lock(idx, &flags);
+
+		if (rc)
+			return rc;
+
+		dprintk(SYNX_VERB, "%s: idx %d\n", __func__, idx);
+
+		synx_g_obj = &synx_gmem.table[idx];
+		synx_global_print_data(synx_g_obj, __func__);
+
+		synx_gmem_unlock(idx, &flags);
+	}
+	return rc;
+}
+
+static int synx_gmem_init(void)
+{
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	synx_hwlock = hwspin_lock_request_specific(SYNX_HWSPIN_ID);
+	if (!synx_hwlock) {
+		dprintk(SYNX_ERR, "hwspinlock request failed\n");
+		return -SYNX_NOMEM;
+	}
+
+	/* zero idx not allocated for clients */
+	ipclite_global_test_and_set_bit(0,
+		(ipclite_atomic_uint32_t *)synx_gmem.bitmap);
+	memset(&synx_gmem.table[0], 0, sizeof(struct synx_global_coredata));
+
+	return SYNX_SUCCESS;
+}
+
+u32 synx_global_map_core_id(enum synx_core_id id)
+{
+	u32 host_id;
+
+	switch (id) {
+	case SYNX_CORE_APSS:
+		host_id = IPCMEM_APPS; break;
+	case SYNX_CORE_NSP:
+		host_id = IPCMEM_CDSP; break;
+	case SYNX_CORE_IRIS:
+		host_id = IPCMEM_VPU; break;
+	case SYNX_CORE_EVA:
+		host_id = IPCMEM_CVP; break;
+	default:
+		host_id = IPCMEM_NUM_HOSTS;
+		dprintk(SYNX_ERR, "invalid core id\n");
+	}
+
+	return host_id;
+}
+
+int synx_global_alloc_index(u32 *idx)
+{
+	int rc = SYNX_SUCCESS;
+	u32 prev, index;
+	const u32 size = SYNX_GLOBAL_MAX_OBJS;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (IS_ERR_OR_NULL(idx))
+		return -SYNX_INVALID;
+
+	do {
+		index = find_first_zero_bit((unsigned long *)synx_gmem.bitmap, size);
+		if (index >= size) {
+			rc = -SYNX_NOMEM;
+			break;
+		}
+		prev = ipclite_global_test_and_set_bit(index % 32,
+				(ipclite_atomic_uint32_t *)(synx_gmem.bitmap + index/32));
+		if ((prev & (1UL << (index % 32))) == 0) {
+			*idx = index;
+			dprintk(SYNX_MEM, "allocated global idx %u\n", *idx);
+			break;
+		}
+	} while (true);
+
+	return rc;
+}
+
+int synx_global_init_coredata(u32 h_synx)
+{
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+	u32 idx = h_synx & SYNX_HANDLE_INDEX_MASK;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (!synx_is_valid_idx(idx))
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	memset(synx_g_obj, 0, sizeof(*synx_g_obj));
+	/* set status to active */
+	synx_g_obj->status = SYNX_STATE_ACTIVE;
+	synx_g_obj->refcount = 1;
+	synx_g_obj->subscribers = (1UL << SYNX_CORE_APSS);
+	synx_g_obj->handle = h_synx;
+	synx_gmem_unlock(idx, &flags);
+
+	return SYNX_SUCCESS;
+}
+
+static int synx_global_get_waiting_cores_locked(
+	struct synx_global_coredata *synx_g_obj,
+	bool *cores)
+{
+	int i;
+
+	synx_global_print_data(synx_g_obj, __func__);
+	for (i = 0; i < SYNX_CORE_MAX; i++) {
+		if (synx_g_obj->waiters & (1UL << i)) {
+			cores[i] = true;
+			dprintk(SYNX_VERB,
+				"waiting for handle %u/n",
+				synx_g_obj->handle);
+		}
+	}
+
+	/* clear waiter list so signals are not repeated */
+	synx_g_obj->waiters = 0;
+
+	return SYNX_SUCCESS;
+}
+
+int synx_global_get_waiting_cores(u32 idx, bool *cores)
+{
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (IS_ERR_OR_NULL(cores) || !synx_is_valid_idx(idx))
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	synx_global_get_waiting_cores_locked(synx_g_obj, cores);
+	synx_gmem_unlock(idx, &flags);
+
+	return SYNX_SUCCESS;
+}
+
+int synx_global_set_waiting_core(u32 idx, enum synx_core_id id)
+{
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	synx_g_obj->waiters |= (1UL << id);
+	synx_gmem_unlock(idx, &flags);
+
+	return SYNX_SUCCESS;
+}
+
+int synx_global_get_subscribed_cores(u32 idx, bool *cores)
+{
+	int i;
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (IS_ERR_OR_NULL(cores) || !synx_is_valid_idx(idx))
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	for (i = 0; i < SYNX_CORE_MAX; i++)
+		if (synx_g_obj->subscribers & (1UL << i))
+			cores[i] = true;
+	synx_gmem_unlock(idx, &flags);
+
+	return SYNX_SUCCESS;
+}
+
+int synx_global_set_subscribed_core(u32 idx, enum synx_core_id id)
+{
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	synx_g_obj->subscribers |= (1UL << id);
+	synx_gmem_unlock(idx, &flags);
+
+	return SYNX_SUCCESS;
+}
+
+u32 synx_global_get_parents_num(u32 idx)
+{
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+	u32 i, count = 0;
+
+	if (!synx_gmem.table)
+		return 0;
+
+	if (!synx_is_valid_idx(idx))
+		return 0;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
+		if (synx_g_obj->parents[i] != 0)
+			count++;
+	}
+	synx_gmem_unlock(idx, &flags);
+
+	return count;
+}
+
+static int synx_global_get_parents_locked(
+	struct synx_global_coredata *synx_g_obj, u32 *parents)
+{
+	u32 i;
+
+	if (!synx_g_obj || !parents)
+		return -SYNX_NOMEM;
+
+	for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++)
+		parents[i] = synx_g_obj->parents[i];
+
+	return SYNX_SUCCESS;
+}
+
+int synx_global_get_parents(u32 idx, u32 *parents)
+{
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table || !parents)
+		return -SYNX_NOMEM;
+
+	if (!synx_is_valid_idx(idx))
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	rc = synx_global_get_parents_locked(synx_g_obj, parents);
+	synx_gmem_unlock(idx, &flags);
+
+	return rc;
+}
+
+u32 synx_global_get_status(u32 idx)
+{
+	int rc;
+	unsigned long flags;
+	u32 status;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return 0;
+
+	if (!synx_is_valid_idx(idx))
+		return 0;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	status = synx_g_obj->status;
+	synx_gmem_unlock(idx, &flags);
+
+	return status;
+}
+
+u32 synx_global_test_status_set_wait(u32 idx,
+	enum synx_core_id id)
+{
+	int rc;
+	unsigned long flags;
+	u32 status;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return 0;
+
+	if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
+		return 0;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return 0;
+	synx_g_obj = &synx_gmem.table[idx];
+	synx_global_print_data(synx_g_obj, __func__);
+	status = synx_g_obj->status;
+	/* if handle is still ACTIVE */
+	if (status == SYNX_STATE_ACTIVE)
+		synx_g_obj->waiters |= (1UL << id);
+	else
+		dprintk(SYNX_DBG, "handle %u already signaled %u",
+			synx_g_obj->handle, synx_g_obj->status);
+	synx_gmem_unlock(idx, &flags);
+
+	return status;
+}
+
+static int synx_global_update_status_core(u32 idx,
+	u32 status)
+{
+	u32 i, p_idx;
+	int rc;
+	bool clear = false;
+	unsigned long flags;
+	uint64_t data;
+	struct synx_global_coredata *synx_g_obj;
+	u32 h_parents[SYNX_GLOBAL_MAX_PARENTS] = {0};
+	bool wait_cores[SYNX_CORE_MAX] = {false};
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	synx_global_print_data(synx_g_obj, __func__);
+	/* prepare for cross core signaling */
+	data = synx_g_obj->handle;
+	data <<= 32;
+	if (synx_g_obj->num_child != 0) {
+		/* composite handle */
+		synx_g_obj->num_child--;
+		if (synx_g_obj->num_child == 0) {
+			if (synx_g_obj->status == SYNX_STATE_ACTIVE) {
+				synx_g_obj->status =
+					(status == SYNX_STATE_SIGNALED_SUCCESS) ?
+					SYNX_STATE_SIGNALED_SUCCESS : SYNX_STATE_SIGNALED_ERROR;
+				data |= synx_g_obj->status;
+				synx_global_get_waiting_cores_locked(synx_g_obj,
+					wait_cores);
+				synx_global_get_parents_locked(synx_g_obj, h_parents);
+			} else {
+				data = 0;
+				dprintk(SYNX_WARN,
+					"merged handle %u already in state %u\n",
+					synx_g_obj->handle, synx_g_obj->status);
+			}
+			/* release ref held by constituting handles */
+			synx_g_obj->refcount--;
+			if (synx_g_obj->refcount == 0) {
+				memset(synx_g_obj, 0,
+					sizeof(*synx_g_obj));
+				clear = true;
+			}
+		} else if (status != SYNX_STATE_SIGNALED_SUCCESS) {
+			synx_g_obj->status = SYNX_STATE_SIGNALED_ERROR;
+			data |= synx_g_obj->status;
+			synx_global_get_waiting_cores_locked(synx_g_obj,
+				wait_cores);
+			synx_global_get_parents_locked(synx_g_obj, h_parents);
+			dprintk(SYNX_WARN,
+				"merged handle %u signaled with error state\n",
+				synx_g_obj->handle);
+		} else {
+			/* pending notification from  handles */
+			data = 0;
+			dprintk(SYNX_DBG,
+				"Child notified parent handle %u, pending %u\n",
+				synx_g_obj->handle, synx_g_obj->num_child);
+		}
+	} else {
+		synx_g_obj->status = status;
+		data |= synx_g_obj->status;
+		synx_global_get_waiting_cores_locked(synx_g_obj,
+			wait_cores);
+		synx_global_get_parents_locked(synx_g_obj, h_parents);
+	}
+	synx_gmem_unlock(idx, &flags);
+
+	if (clear) {
+		ipclite_global_test_and_clear_bit(idx%32,
+			(ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
+		dprintk(SYNX_MEM,
+			"cleared global idx %u\n", idx);
+	}
+
+	/* notify waiting clients on signal */
+	if (data) {
+		/* notify wait client */
+		for (i = 1; i < SYNX_CORE_MAX; i++) {
+			if (!wait_cores[i])
+				continue;
+			dprintk(SYNX_DBG,
+				"invoking ipc signal handle %u, status %u\n",
+				synx_g_obj->handle, synx_g_obj->status);
+			if (ipclite_msg_send(
+				synx_global_map_core_id(i),
+				data))
+				dprintk(SYNX_ERR,
+					"ipc signaling %llu to core %u failed\n",
+					data, i);
+		}
+	}
+
+	/* handle parent notifications */
+	for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
+		p_idx = h_parents[i];
+		if (p_idx == 0)
+			continue;
+		synx_global_update_status_core(p_idx, status);
+	}
+
+	return SYNX_SUCCESS;
+}
+
+int synx_global_update_status(u32 idx, u32 status)
+{
+	int rc = -SYNX_INVALID;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (!synx_is_valid_idx(idx) || status <= SYNX_STATE_ACTIVE)
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	if (synx_g_obj->num_child != 0) {
+		/* composite handle cannot be signaled */
+		goto fail;
+	} else if (synx_g_obj->status != SYNX_STATE_ACTIVE) {
+		rc = -SYNX_ALREADY;
+		goto fail;
+	}
+	synx_gmem_unlock(idx, &flags);
+
+	return synx_global_update_status_core(idx, status);
+
+fail:
+	synx_gmem_unlock(idx, &flags);
+	return rc;
+}
+
+int synx_global_get_ref(u32 idx)
+{
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (!synx_is_valid_idx(idx))
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	synx_global_print_data(synx_g_obj, __func__);
+	if (synx_g_obj->handle && synx_g_obj->refcount)
+		synx_g_obj->refcount++;
+	else
+		rc = -SYNX_NOENT;
+	synx_gmem_unlock(idx, &flags);
+
+	return rc;
+}
+
+void synx_global_put_ref(u32 idx)
+{
+	int rc;
+	bool clear = false;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return;
+
+	if (!synx_is_valid_idx(idx))
+		return;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return;
+	synx_g_obj = &synx_gmem.table[idx];
+	synx_g_obj->refcount--;
+	if (synx_g_obj->refcount == 0) {
+		memset(synx_g_obj, 0, sizeof(*synx_g_obj));
+		clear = true;
+	}
+	synx_gmem_unlock(idx, &flags);
+
+	if (clear) {
+		ipclite_global_test_and_clear_bit(idx%32,
+			(ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
+		dprintk(SYNX_MEM, "cleared global idx %u\n", idx);
+	}
+}
+
+int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx)
+{
+	int rc = -SYNX_INVALID;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+	u32 i, j = 0;
+	u32 idx;
+	bool sig_error = false;
+	u32 num_child = 0;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (!synx_is_valid_idx(p_idx))
+		return -SYNX_INVALID;
+
+	while (j < num_list) {
+		idx = idx_list[j];
+
+		if (!synx_is_valid_idx(idx))
+			goto fail;
+
+		rc = synx_gmem_lock(idx, &flags);
+		if (rc)
+			goto fail;
+
+		synx_g_obj = &synx_gmem.table[idx];
+		if (synx_g_obj->status == SYNX_STATE_ACTIVE) {
+			for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
+				if (synx_g_obj->parents[i] == 0) {
+					synx_g_obj->parents[i] = p_idx;
+					break;
+				}
+			}
+			num_child++;
+		} else if (synx_g_obj->status >
+			SYNX_STATE_SIGNALED_SUCCESS) {
+			sig_error = true;
+		}
+		synx_gmem_unlock(idx, &flags);
+
+		if (i >= SYNX_GLOBAL_MAX_PARENTS) {
+			rc = -SYNX_NOMEM;
+			goto fail;
+		}
+
+		j++;
+	}
+
+	rc = synx_gmem_lock(p_idx, &flags);
+	if (rc)
+		goto fail;
+	synx_g_obj = &synx_gmem.table[p_idx];
+	synx_g_obj->num_child += num_child;
+	if (sig_error)
+		synx_g_obj->status = SYNX_STATE_SIGNALED_ERROR;
+	else if (synx_g_obj->num_child != 0)
+		synx_g_obj->refcount++;
+	else if (synx_g_obj->num_child == 0 &&
+		synx_g_obj->status == SYNX_STATE_ACTIVE)
+		synx_g_obj->status = SYNX_STATE_SIGNALED_SUCCESS;
+	synx_global_print_data(synx_g_obj, __func__);
+	synx_gmem_unlock(p_idx, &flags);
+
+	return SYNX_SUCCESS;
+
+fail:
+	while (num_child--) {
+		idx = idx_list[num_child];
+
+		if (synx_gmem_lock(idx, &flags))
+			continue;
+		synx_g_obj = &synx_gmem.table[idx];
+		for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
+			if (synx_g_obj->parents[i] == p_idx) {
+				synx_g_obj->parents[i] = 0;
+				break;
+			}
+		}
+		synx_gmem_unlock(idx, &flags);
+	}
+
+	return rc;
+}
+
+int synx_global_recover(enum synx_core_id core_id)
+{
+	int rc = SYNX_SUCCESS;
+	u32 idx = 0;
+	const u32 size = SYNX_GLOBAL_MAX_OBJS;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+	
+	bool update;
+	int *clear_idx = NULL;
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	clear_idx = kzalloc(sizeof(int)*SYNX_GLOBAL_MAX_OBJS, GFP_KERNEL);
+
+	if (!clear_idx)
+		return -SYNX_NOMEM;
+
+	ipclite_hwlock_reset(synx_global_map_core_id(core_id));
+
+	/* recover synx gmem lock if it was owned by core in ssr */
+	if (synx_gmem_lock_owner(0) == core_id) {
+		synx_gmem_lock_owner_clear(0);
+		hwspin_unlock_raw(synx_hwlock);
+	}
+
+	idx = find_next_bit((unsigned long *)synx_gmem.bitmap,
+			size, idx + 1);
+	while (idx < size) {
+		update = false;
+		rc = synx_gmem_lock(idx, &flags);
+		if (rc)
+			goto free;
+		synx_g_obj = &synx_gmem.table[idx];
+		if (synx_g_obj->refcount &&
+			 synx_g_obj->subscribers & (1UL << core_id)) {
+			synx_g_obj->subscribers &= ~(1UL << core_id);
+			synx_g_obj->refcount--;
+			if (synx_g_obj->refcount == 0) {
+				memset(synx_g_obj, 0, sizeof(*synx_g_obj));
+				clear_idx[idx] = 1;
+			} else if (synx_g_obj->status == SYNX_STATE_ACTIVE) {
+				update = true;
+			}
+		}
+		synx_gmem_unlock(idx, &flags);
+		if (update)
+			synx_global_update_status(idx,
+				SYNX_STATE_SIGNALED_SSR);
+		idx = find_next_bit((unsigned long *)synx_gmem.bitmap,
+				size, idx + 1);
+	}
+
+	for (idx = 1; idx < size; idx++) {
+		if (clear_idx[idx]) {
+			ipclite_global_test_and_clear_bit(idx % 32,
+				(ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
+			dprintk(SYNX_MEM, "released global idx %u\n", idx);
+		}
+	}
+free:
+	kfree(clear_idx);
+
+	return rc;
+}
+
+int synx_global_mem_init(void)
+{
+	int rc;
+	int bitmap_size = SYNX_GLOBAL_MAX_OBJS/32;
+	struct global_region_info mem_info;
+
+	rc = get_global_partition_info(&mem_info);
+	if (rc) {
+		dprintk(SYNX_ERR, "error setting up global shared memory\n");
+		return rc;
+	}
+
+	memset(mem_info.virt_base, 0, mem_info.size);
+	dprintk(SYNX_DBG, "global shared memory %pK size %u\n",
+		mem_info.virt_base, mem_info.size);
+
+	synx_gmem.bitmap = (u32 *)mem_info.virt_base;
+	synx_gmem.locks = synx_gmem.bitmap + bitmap_size;
+	synx_gmem.table =
+		(struct synx_global_coredata *)(synx_gmem.locks + 2);
+	dprintk(SYNX_DBG, "global memory bitmap %pK, table %pK\n",
+		synx_gmem.bitmap, synx_gmem.table);
+
+	return synx_gmem_init();
+}

+ 284 - 0
msm/synx/synx_global.h

@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SYNX_SHARED_MEM_H__
+#define __SYNX_SHARED_MEM_H__
+
+#include "synx_err.h"
+#include "ipclite_client.h"
+
+#include <synx_header.h>
+
+/**
+ * enum synx_core_id - Synx core IDs
+ *
+ * SYNX_CORE_APSS : APSS core
+ * SYNX_CORE_NSP  : NSP core
+ * SYNX_CORE_EVA  : EVA core
+ * SYNX_CORE_IRIS : IRIS core
+ */
+enum synx_core_id {
+	SYNX_CORE_APSS = 0,
+	SYNX_CORE_NSP,
+	SYNX_CORE_EVA,
+	SYNX_CORE_IRIS,
+	SYNX_CORE_MAX,
+};
+
+/* synx handle encoding */
+#define SYNX_HANDLE_INDEX_BITS         16
+#define SYNX_HANDLE_CORE_BITS          4
+#define SYNX_HANDLE_GLOBAL_FLAG_BIT    1
+
+#define SYNX_GLOBAL_SHARED_LOCKS       1
+#define SYNX_GLOBAL_MAX_OBJS           4096
+#define SYNX_GLOBAL_MAX_PARENTS        4
+
+#define SYNX_HANDLE_INDEX_MASK         ((1UL<<SYNX_HANDLE_INDEX_BITS)-1)
+
+#define SHRD_MEM_DUMP_NUM_BMAP_WORDS   10
+#define NUM_CHAR_BIT                   8
+
+/* spin lock timeout (ms) */
+#define SYNX_HWSPIN_TIMEOUT            500
+#define SYNX_HWSPIN_ID                 10
+
+/* internal signal states */
+#define SYNX_STATE_INVALID             0
+#define SYNX_STATE_ACTIVE              1
+#define SYNX_STATE_SIGNALED_ERROR      3
+#define SYNX_STATE_SIGNALED_EXTERNAL   5
+#define SYNX_STATE_SIGNALED_SSR        6
+
+/**
+ * struct synx_global_coredata - Synx global object, used for book keeping
+ * of all metadata associated with each individual global entry
+ *
+ * @status      : Synx signaling status
+ * @handle      : Handle of global entry
+ * @refcount    : References owned by each core
+ * @num_child   : Count of children pending signal (for composite handle)
+ * @subscribers : Cores owning reference on this object
+ * @waiters     : Cores waiting for notification
+ * @parents     : Composite global coredata index of parent entities
+ *                Can be part of SYNX_GLOBAL_MAX_PARENTS composite entries.
+ */
+struct synx_global_coredata {
+	u32 status;
+	u32 handle;
+	u16 refcount;
+	u16 num_child;
+	u16 subscribers;
+	u16 waiters;
+	u16 parents[SYNX_GLOBAL_MAX_PARENTS];
+};
+
+/**
+ * struct synx_shared_mem - Synx global shared memory descriptor
+ *
+ * @bitmap : Bitmap for allocating entries form table
+ * @locks  : Array of locks for exclusive access to table entries
+ * @table  : Array of Synx global entries
+ */
+struct synx_shared_mem {
+	u32 *bitmap;
+	u32 *locks;
+	struct synx_global_coredata *table;
+};
+
+static inline bool synx_is_valid_idx(u32 idx)
+{
+	if (idx < SYNX_GLOBAL_MAX_OBJS)
+		return true;
+	return false;
+}
+
+/**
+ * synx_global_mem_init - Initialize global shared memory
+ *
+ * @return Zero on success, negative error on failure.
+ */
+int synx_global_mem_init(void);
+
+/**
+ * synx_global_map_core_id - Map Synx core ID to IPC Lite host
+ *
+ * @param id : Core Id to map
+ *
+ * @return IPC host ID.
+ */
+u32 synx_global_map_core_id(enum synx_core_id id);
+
+/**
+ * synx_global_alloc_index - Allocate new global entry
+ *
+ * @param idx : Pointer to global table index (filled by function)
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_alloc_index(u32 *idx);
+
+/**
+ * synx_global_init_coredata - Allocate new global entry
+ *
+ * @param h_synx : Synx global handle
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_init_coredata(u32 h_synx);
+
+/**
+ * synx_global_get_waiting_cores - Get list of all the waiting core on global entry
+ *
+ * Will fill the cores array with TRUE if core is waiting, and
+ * false if not. Indexed through enum synx_core_id.
+ *
+ * @param idx   : Global entry index
+ * @param cores : Array of boolean variables, one each for supported core.
+ *                Array should contain SYNX_CORE_MAX entries.
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_get_waiting_cores(u32 idx, bool *cores);
+
+/**
+ * synx_global_set_waiting_core - Set core as a waiting core on global entry
+ *
+ * @param idx : Global entry index
+ * @param id  : Core to be set as waiter
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_set_waiting_core(u32 idx, enum synx_core_id id);
+
+/**
+ * synx_global_get_subscribed_cores - Get list of all the subscribed core on global entry
+ *
+ * Will fill the cores array with TRUE if core is subscribed, and
+ * false if not. Indexed through enum synx_core_id.
+ *
+ * @param idx   : Global entry index
+ * @param cores : Array of boolean variables, one each for supported core.
+ *                Array should contain SYNX_CORE_MAX entries.
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_get_subscribed_cores(u32 idx, bool *cores);
+
+/**
+ * synx_global_set_subscribed_core - Set core as a subscriber core on global entry
+ *
+ * @param idx : Global entry index
+ * @param id  : Core to be added as subscriber
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_set_subscribed_core(u32 idx, enum synx_core_id id);
+
+/**
+ * synx_global_get_status - Get status of the global entry
+ *
+ * @param idx : Global entry index
+ *
+ * @return Global entry status
+ */
+u32 synx_global_get_status(u32 idx);
+
+/**
+ * synx_global_test_status_set_wait - Check status and add core as waiter is not signaled
+ *
+ * This tests and adds the waiter in one atomic operation, to avoid
+ * race with signal which can miss sending the IPC signal if
+ * check status and set as done as two different operations
+ * (signal coming in between the two ops).
+ *
+ * @param idx : Global entry index
+ * @param id  : Core to be set as waiter (if unsignaled)
+ *
+ * @return Status of global entry idx.
+ */
+u32 synx_global_test_status_set_wait(u32 idx,
+	enum synx_core_id id);
+
+/**
+ * synx_global_update_status - Update status of the global entry
+ *
+ * Function also updates the parent composite handles
+ * about the signaling.
+ *
+ * @param idx    : Global entry index
+ * @param status : Signaling status
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_update_status(u32 idx, u32 status);
+
+/**
+ * synx_global_get_ref - Get additional reference on global entry
+ *
+ * @param idx : Global entry index
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_get_ref(u32 idx);
+
+/**
+ * synx_global_put_ref - Release reference on global entry
+ *
+ * @param idx : Global entry index
+ */
+void synx_global_put_ref(u32 idx);
+
+/**
+ * synx_global_get_parents - Get the global entry index of all composite parents
+ *
+ * @param idx     : Global entry index whose parents are requested
+ * @param parents : Array of global entry index of composite handles
+ *                  Filled by the function. Array should contain atleast
+ *                  SYNX_GLOBAL_MAX_PARENTS entries.
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_get_parents(u32 idx, u32 *parents);
+
+/**
+ * synx_global_merge - Merge handles to form global handle
+ *
+ * Is essential for merge functionality.
+ *
+ * @param idx_list : List of global indexes to merge
+ * @param num_list : Number of handles in the list to merge
+ * @params p_idx   : Global entry index allocated for composite handle
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx);
+
+/**
+ * synx_global_recover - Recover handles subscribed by specific core
+ *
+ * @param id : Core ID to clean up
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_recover(enum synx_core_id id);
+
+/**
+ * synx_global_clean_cdsp_mem - Release handles created/used by CDSP
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_clean_cdsp_mem(void);
+
+/**
+ * synx_global_dump_shared_memory - Prints the top entries of
+ * bitmap and table in global shared memory.
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+
+int synx_global_dump_shared_memory(void);
+
+#endif /* __SYNX_SHARED_MEM_H__ */

+ 245 - 0
msm/synx/synx_private.h

@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SYNX_PRIVATE_H__
+#define __SYNX_PRIVATE_H__
+
+#include <linux/bitmap.h>
+#include <linux/cdev.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
+#include <linux/hashtable.h>
+#include <linux/ktime.h>
+#include <linux/workqueue.h>
+
+#include "synx_api.h"
+#include "synx_global.h"
+
+#define SYNX_MAX_OBJS               SYNX_GLOBAL_MAX_OBJS
+
+#define SYNX_NAME                   "synx"
+#define SYNX_DEVICE_NAME            "synx_device"
+#define SYNX_WQ_CB_NAME             "hiprio_synx_cb_queue"
+#define SYNX_WQ_CB_THREADS          4
+#define SYNX_WQ_CLEANUP_NAME        "hiprio_synx_cleanup_queue"
+#define SYNX_WQ_CLEANUP_THREADS     2
+#define SYNX_MAX_NUM_BINDINGS       8
+
+#define SYNX_OBJ_HANDLE_SHIFT       SYNX_HANDLE_INDEX_BITS
+#define SYNX_OBJ_CORE_ID_SHIFT      (SYNX_OBJ_HANDLE_SHIFT+SYNX_HANDLE_CORE_BITS)
+#define SYNX_OBJ_GLOBAL_FLAG_SHIFT  (SYNX_OBJ_CORE_ID_SHIFT+SYNX_HANDLE_GLOBAL_FLAG_BIT)
+
+#define SYNX_OBJ_HANDLE_MASK        GENMASK_ULL(SYNX_OBJ_HANDLE_SHIFT-1, 0)
+#define SYNX_OBJ_CORE_ID_MASK       GENMASK_ULL(SYNX_OBJ_CORE_ID_SHIFT-1, SYNX_OBJ_HANDLE_SHIFT)
+#define SYNX_OBJ_GLOBAL_FLAG_MASK   \
+	GENMASK_ULL(SYNX_OBJ_GLOBAL_FLAG_SHIFT-1, SYNX_OBJ_CORE_ID_SHIFT)
+
+#define MAX_TIMESTAMP_SIZE          32
+#define SYNX_OBJ_NAME_LEN           64
+
+#define SYNX_PAYLOAD_WORDS          4
+
+#define SYNX_CREATE_IM_EX_RELEASE   SYNX_CREATE_MAX_FLAGS
+#define SYNX_CREATE_MERGED_FENCE    (SYNX_CREATE_MAX_FLAGS << 1)
+
+#define SYNX_MAX_REF_COUNTS         100
+
+struct synx_bind_desc {
+	struct synx_external_desc_v2 external_desc;
+	void *external_data;
+};
+
+struct error_node {
+	char timestamp[32];
+	u64 session;
+	u32 client_id;
+	u32 h_synx;
+	s32 error_code;
+	struct list_head node;
+};
+
+struct synx_entry_32 {
+	u32 key;
+	void *data;
+	struct hlist_node node;
+};
+
+struct synx_entry_64 {
+	u64 key;
+	u32 data[2];
+	struct kref refcount;
+	struct hlist_node node;
+};
+
+struct synx_map_entry {
+	struct synx_coredata *synx_obj;
+	struct kref refcount;
+	u32 flags;
+	u32 key;
+	struct work_struct dispatch;
+	struct hlist_node node;
+};
+
+struct synx_fence_entry {
+	u32 g_handle;
+	u32 l_handle;
+	u64 key;
+	struct hlist_node node;
+};
+
+struct synx_kernel_payload {
+	u32 h_synx;
+	u32 status;
+	void *data;
+	synx_user_callback_t cb_func;
+	synx_user_callback_t cancel_cb_func;
+};
+
+struct synx_cb_data {
+	struct synx_session *session;
+	u32 idx;
+	u32 status;
+	struct work_struct cb_dispatch;
+	struct list_head node;
+};
+
+struct synx_client_cb {
+	bool is_valid;
+	u32 idx;
+	struct synx_client *client;
+	struct synx_kernel_payload kernel_cb;
+	struct list_head node;
+};
+
+struct synx_registered_ops {
+	char name[SYNX_OBJ_NAME_LEN];
+	struct bind_operations ops;
+	enum synx_bind_client_type type;
+	bool valid;
+};
+
+struct synx_cleanup_cb {
+	void *data;
+	struct work_struct cb_dispatch;
+};
+
+enum synx_signal_handler {
+	SYNX_SIGNAL_FROM_CLIENT   = 0x1,
+	SYNX_SIGNAL_FROM_FENCE    = 0x2,
+	SYNX_SIGNAL_FROM_IPC      = 0x4,
+	SYNX_SIGNAL_FROM_CALLBACK = 0x8,
+};
+
+struct synx_signal_cb {
+	u32 handle;
+	u32 status;
+	u64 ext_sync_id;
+	struct synx_coredata *synx_obj;
+	enum synx_signal_handler flag;
+	struct dma_fence_cb fence_cb;
+	struct work_struct cb_dispatch;
+};
+
+struct synx_coredata {
+	char name[SYNX_OBJ_NAME_LEN];
+	struct dma_fence *fence;
+	struct mutex obj_lock;
+	struct kref refcount;
+	u32 type;
+	u32 num_bound_synxs;
+	struct synx_bind_desc bound_synxs[SYNX_MAX_NUM_BINDINGS];
+	struct list_head reg_cbs_list;
+	u32 global_idx;
+	u32 map_count;
+	struct synx_signal_cb *signal_cb;
+};
+
+struct synx_client;
+struct synx_device;
+
+struct synx_handle_coredata {
+	struct synx_client *client;
+	struct synx_coredata *synx_obj;
+	void *map_entry;
+	struct kref refcount;
+	u32 key;
+	u32 rel_count;
+	struct work_struct dispatch;
+	struct hlist_node node;
+};
+
+struct synx_client {
+	u32 type;
+	bool active;
+	struct synx_device *device;
+	char name[SYNX_OBJ_NAME_LEN];
+	u64 id;
+	u64 dma_context;
+	struct kref refcount;
+	struct mutex event_q_lock;
+	struct list_head event_q;
+	wait_queue_head_t event_wq;
+	DECLARE_BITMAP(cb_bitmap, SYNX_MAX_OBJS);
+	struct synx_client_cb cb_table[SYNX_MAX_OBJS];
+	DECLARE_HASHTABLE(handle_map, 8);
+	spinlock_t handle_map_lock;
+	struct work_struct dispatch;
+	struct hlist_node node;
+};
+
+struct synx_native {
+	spinlock_t metadata_map_lock;
+	DECLARE_HASHTABLE(client_metadata_map, 8);
+	spinlock_t fence_map_lock;
+	DECLARE_HASHTABLE(fence_map, 10);
+	spinlock_t global_map_lock;
+	DECLARE_HASHTABLE(global_map, 10);
+	spinlock_t local_map_lock;
+	DECLARE_HASHTABLE(local_map, 8);
+	spinlock_t csl_map_lock;
+	DECLARE_HASHTABLE(csl_fence_map, 8);
+	DECLARE_BITMAP(bitmap, SYNX_MAX_OBJS);
+};
+
+struct synx_cdsp_ssr {
+	u64 ssrcnt;
+	void *handle;
+	struct notifier_block nb;
+};
+
+struct synx_device {
+	struct cdev cdev;
+	dev_t dev;
+	struct class *class;
+	struct synx_native *native;
+	struct workqueue_struct *wq_cb;
+	struct workqueue_struct *wq_cleanup;
+	struct mutex vtbl_lock;
+	struct synx_registered_ops bind_vtbl[SYNX_MAX_BIND_TYPES];
+	struct dentry *debugfs_root;
+	struct list_head error_list;
+	struct mutex error_lock;
+	struct synx_cdsp_ssr cdsp_ssr;
+};
+
+int synx_signal_core(struct synx_coredata *synx_obj,
+	u32 status,
+	bool cb_signal,
+	s32 ext_sync_id);
+
+int synx_ipc_callback(uint32_t client_id,
+	int64_t data, void *priv);
+
+void synx_signal_handler(struct work_struct *cb_dispatch);
+
+int synx_native_release_core(struct synx_client *session,
+	u32 h_synx);
+
+int synx_bind(struct synx_session *session,
+	u32 h_synx,
+	struct synx_external_desc_v2 external_sync);
+
+#endif /* __SYNX_PRIVATE_H__ */

+ 1525 - 0
msm/synx/synx_util.c

@@ -0,0 +1,1525 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
+
+#include "synx_debugfs.h"
+#include "synx_util.h"
+
+extern void synx_external_callback(s32 sync_obj, int status, void *data);
+
+int synx_util_init_coredata(struct synx_coredata *synx_obj,
+	struct synx_create_params *params,
+	struct dma_fence_ops *ops,
+	u64 dma_context)
+{
+	int rc = -SYNX_INVALID;
+	spinlock_t *fence_lock;
+	struct dma_fence *fence;
+	struct synx_fence_entry *entry;
+
+	if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(params) ||
+		 IS_ERR_OR_NULL(ops) || IS_ERR_OR_NULL(params->h_synx))
+		return -SYNX_INVALID;
+
+	if (params->flags & SYNX_CREATE_GLOBAL_FENCE &&
+		*params->h_synx != 0) {
+		rc = synx_global_get_ref(
+			synx_util_global_idx(*params->h_synx));
+		synx_obj->global_idx = synx_util_global_idx(*params->h_synx);
+	} else if (params->flags & SYNX_CREATE_GLOBAL_FENCE) {
+		rc = synx_alloc_global_handle(params->h_synx);
+		synx_obj->global_idx = synx_util_global_idx(*params->h_synx);
+	} else {
+		rc = synx_alloc_local_handle(params->h_synx);
+	}
+
+	if (rc != SYNX_SUCCESS)
+		return rc;
+
+	synx_obj->map_count = 1;
+	synx_obj->num_bound_synxs = 0;
+	synx_obj->type |= params->flags;
+	kref_init(&synx_obj->refcount);
+	mutex_init(&synx_obj->obj_lock);
+	INIT_LIST_HEAD(&synx_obj->reg_cbs_list);
+	if (params->name)
+		strlcpy(synx_obj->name, params->name, sizeof(synx_obj->name));
+
+	if (params->flags & SYNX_CREATE_DMA_FENCE) {
+		fence = params->fence;
+		if (IS_ERR_OR_NULL(fence)) {
+			dprintk(SYNX_ERR, "invalid external fence\n");
+			goto free;
+		}
+
+		dma_fence_get(fence);
+		synx_obj->fence = fence;
+	} else {
+		/*
+		 * lock and fence memory will be released in fence
+		 * release function
+		 */
+		fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL);
+		if (IS_ERR_OR_NULL(fence_lock)) {
+			rc = -SYNX_NOMEM;
+			goto free;
+		}
+
+		fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+		if (IS_ERR_OR_NULL(fence)) {
+			kfree(fence_lock);
+			rc = -SYNX_NOMEM;
+			goto free;
+		}
+
+		spin_lock_init(fence_lock);
+		dma_fence_init(fence, ops, fence_lock, dma_context, 1);
+
+		synx_obj->fence = fence;
+		synx_util_activate(synx_obj);
+		dprintk(SYNX_MEM,
+			"allocated backing fence %pK\n", fence);
+
+		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+		if (IS_ERR_OR_NULL(entry)) {
+			rc = -SYNX_NOMEM;
+			goto clean;
+		}
+
+		entry->key = (u64)fence;
+		if (params->flags & SYNX_CREATE_GLOBAL_FENCE)
+			entry->g_handle = *params->h_synx;
+		else
+			entry->l_handle = *params->h_synx;
+
+		rc = synx_util_insert_fence_entry(entry,
+				params->h_synx,
+				params->flags & SYNX_CREATE_GLOBAL_FENCE);
+		BUG_ON(rc != SYNX_SUCCESS);
+	}
+
+	if (rc != SYNX_SUCCESS)
+		goto clean;
+
+	return SYNX_SUCCESS;
+
+clean:
+	dma_fence_put(fence);
+free:
+	if (params->flags & SYNX_CREATE_GLOBAL_FENCE)
+		synx_global_put_ref(
+			synx_util_global_idx(*params->h_synx));
+	else
+		clear_bit(synx_util_global_idx(*params->h_synx),
+			synx_dev->native->bitmap);
+
+	return rc;
+}
+
+int synx_util_add_callback(struct synx_coredata *synx_obj,
+	u32 h_synx)
+{
+	int rc;
+	struct synx_signal_cb *signal_cb;
+
+	if (IS_ERR_OR_NULL(synx_obj))
+		return -SYNX_INVALID;
+
+	signal_cb = kzalloc(sizeof(*signal_cb), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(signal_cb))
+		return -SYNX_NOMEM;
+
+	signal_cb->handle = h_synx;
+	signal_cb->flag = SYNX_SIGNAL_FROM_FENCE;
+	signal_cb->synx_obj = synx_obj;
+
+	/* get reference on synx coredata for signal cb */
+	synx_util_get_object(synx_obj);
+
+	/*
+	 * adding callback enables synx framework to
+	 * get notified on signal from clients using
+	 * native dma fence operations.
+	 */
+	rc = dma_fence_add_callback(synx_obj->fence,
+			&signal_cb->fence_cb, synx_fence_callback);
+	if (rc != 0) {
+		if (rc == -ENOENT) {
+			if (synx_util_is_global_object(synx_obj)) {
+				/* signal (if) global handle */
+				rc = synx_global_update_status(
+					synx_obj->global_idx,
+					synx_util_get_object_status(synx_obj));
+				if (rc != SYNX_SUCCESS)
+					dprintk(SYNX_ERR,
+						"status update of %u with fence %pK\n",
+						synx_obj->global_idx, synx_obj->fence);
+			} else {
+				rc = SYNX_SUCCESS;
+			}
+		} else {
+			dprintk(SYNX_ERR,
+				"error adding callback for %pK err %d\n",
+				synx_obj->fence, rc);
+		}
+		synx_util_put_object(synx_obj);
+		kfree(signal_cb);
+		return rc;
+	}
+
+	synx_obj->signal_cb = signal_cb;
+	dprintk(SYNX_VERB, "added callback %pK to fence %pK\n",
+		signal_cb, synx_obj->fence);
+
+	return SYNX_SUCCESS;
+}
+
+int synx_util_init_group_coredata(struct synx_coredata *synx_obj,
+	struct dma_fence **fences,
+	struct synx_merge_params *params,
+	u32 num_objs,
+	u64 dma_context)
+{
+	int rc;
+	struct dma_fence_array *array;
+
+	if (IS_ERR_OR_NULL(synx_obj))
+		return -SYNX_INVALID;
+
+	if (params->flags & SYNX_MERGE_GLOBAL_FENCE) {
+		rc = synx_alloc_global_handle(params->h_merged_obj);
+		synx_obj->global_idx =
+			synx_util_global_idx(*params->h_merged_obj);
+	} else {
+		rc = synx_alloc_local_handle(params->h_merged_obj);
+	}
+
+	if (rc != SYNX_SUCCESS)
+		return rc;
+
+	array = dma_fence_array_create(num_objs, fences,
+				dma_context, 1, false);
+	if (IS_ERR_OR_NULL(array))
+		return -SYNX_INVALID;
+
+	synx_obj->fence = &array->base;
+	synx_obj->map_count = 1;
+	synx_obj->type = params->flags;
+	synx_obj->type |= SYNX_CREATE_MERGED_FENCE;
+	synx_obj->num_bound_synxs = 0;
+	kref_init(&synx_obj->refcount);
+	mutex_init(&synx_obj->obj_lock);
+	INIT_LIST_HEAD(&synx_obj->reg_cbs_list);
+
+	synx_util_activate(synx_obj);
+	return rc;
+}
+
+static void synx_util_destroy_coredata(struct kref *kref)
+{
+	struct synx_coredata *synx_obj =
+		container_of(kref, struct synx_coredata, refcount);
+
+	if (synx_util_is_global_object(synx_obj))
+		synx_global_put_ref(synx_obj->global_idx);
+	synx_util_object_destroy(synx_obj);
+}
+
+void synx_util_get_object(struct synx_coredata *synx_obj)
+{
+	kref_get(&synx_obj->refcount);
+}
+
+void synx_util_put_object(struct synx_coredata *synx_obj)
+{
+	kref_put(&synx_obj->refcount, synx_util_destroy_coredata);
+}
+
+void synx_util_object_destroy(struct synx_coredata *synx_obj)
+{
+	int rc;
+	u32 i;
+	s32 sync_id;
+	u32 type;
+	struct synx_cb_data *synx_cb, *synx_cb_temp;
+	struct synx_bind_desc *bind_desc;
+	struct bind_operations *bind_ops;
+	struct synx_external_data *data;
+
+	/* clear all the undispatched callbacks */
+	list_for_each_entry_safe(synx_cb,
+		synx_cb_temp, &synx_obj->reg_cbs_list, node) {
+		dprintk(SYNX_ERR,
+			"cleaning up callback of session %pK\n",
+			synx_cb->session);
+		list_del_init(&synx_cb->node);
+		kfree(synx_cb);
+	}
+
+	for (i = 0; i < synx_obj->num_bound_synxs; i++) {
+		bind_desc = &synx_obj->bound_synxs[i];
+		sync_id = bind_desc->external_desc.id;
+		type = bind_desc->external_desc.type;
+		data = bind_desc->external_data;
+		bind_ops = synx_util_get_bind_ops(type);
+		if (IS_ERR_OR_NULL(bind_ops)) {
+			dprintk(SYNX_ERR,
+				"bind ops fail id: %d, type: %u, err: %d\n",
+				sync_id, type, rc);
+			continue;
+		}
+
+		/* clear the hash table entry */
+		synx_util_remove_data(&sync_id, type);
+
+		rc = bind_ops->deregister_callback(
+				synx_external_callback, data, sync_id);
+		if (rc < 0) {
+			dprintk(SYNX_ERR,
+				"de-registration fail id: %d, type: %u, err: %d\n",
+				sync_id, type, rc);
+			continue;
+		}
+
+		/*
+		 * release the memory allocated for external data.
+		 * It is safe to release this memory
+		 * only if deregistration is successful.
+		 */
+		kfree(data);
+	}
+
+	mutex_destroy(&synx_obj->obj_lock);
+	synx_util_release_fence_entry((u64)synx_obj->fence);
+	dma_fence_put(synx_obj->fence);
+	kfree(synx_obj);
+	dprintk(SYNX_MEM, "released synx object %pK\n", synx_obj);
+}
+
+long synx_util_get_free_handle(unsigned long *bitmap, unsigned int size)
+{
+	bool bit;
+	long idx;
+
+	do {
+		idx = find_first_zero_bit(bitmap, size);
+		if (idx >= size)
+			break;
+		bit = test_and_set_bit(idx, bitmap);
+	} while (bit);
+
+	return idx;
+}
+
+u32 synx_encode_handle(u32 idx, u32 core_id, bool global_idx)
+{
+	u32 handle = 0;
+
+	if (idx >= SYNX_MAX_OBJS)
+		return 0;
+
+	if (global_idx) {
+		handle = 1;
+		handle <<= SYNX_HANDLE_CORE_BITS;
+	}
+
+	handle |= core_id;
+	handle <<= SYNX_HANDLE_INDEX_BITS;
+	handle |= idx;
+
+	return handle;
+}
+
+int synx_alloc_global_handle(u32 *new_synx)
+{
+	int rc;
+	u32 idx;
+
+	rc = synx_global_alloc_index(&idx);
+	if (rc != SYNX_SUCCESS)
+		return rc;
+
+	*new_synx = synx_encode_handle(idx, SYNX_CORE_APSS, true);
+	dprintk(SYNX_DBG, "allocated global handle %u (0x%x)\n",
+		*new_synx, *new_synx);
+
+	rc = synx_global_init_coredata(*new_synx);
+	return rc;
+}
+
+int synx_alloc_local_handle(u32 *new_synx)
+{
+	u32 idx;
+
+	idx = synx_util_get_free_handle(synx_dev->native->bitmap,
+		SYNX_MAX_OBJS);
+	if (idx >= SYNX_MAX_OBJS)
+		return -SYNX_NOMEM;
+
+	*new_synx = synx_encode_handle(idx, SYNX_CORE_APSS, false);
+	dprintk(SYNX_DBG, "allocated local handle %u (0x%x)\n",
+		*new_synx, *new_synx);
+
+	return SYNX_SUCCESS;
+}
+
+int synx_util_init_handle(struct synx_client *client,
+	struct synx_coredata *synx_obj, u32 *new_h_synx,
+	void *map_entry)
+{
+	int rc = SYNX_SUCCESS;
+	bool found = false;
+	struct synx_handle_coredata *synx_data, *curr;
+
+	if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(synx_obj) ||
+		IS_ERR_OR_NULL(new_h_synx) || IS_ERR_OR_NULL(map_entry))
+		return -SYNX_INVALID;
+
+	synx_data = kzalloc(sizeof(*synx_data), GFP_ATOMIC);
+	if (IS_ERR_OR_NULL(synx_data))
+		return -SYNX_NOMEM;
+
+	synx_data->client = client;
+	synx_data->synx_obj = synx_obj;
+	synx_data->key = *new_h_synx;
+	synx_data->map_entry = map_entry;
+	kref_init(&synx_data->refcount);
+	synx_data->rel_count = 1;
+
+	spin_lock_bh(&client->handle_map_lock);
+	hash_for_each_possible(client->handle_map,
+		curr, node, *new_h_synx) {
+		if (curr->key == *new_h_synx) {
+			if (curr->synx_obj != synx_obj) {
+				rc = -SYNX_INVALID;
+				dprintk(SYNX_ERR,
+					"inconsistent data in handle map\n");
+			} else {
+				kref_get(&curr->refcount);
+				curr->rel_count++;
+			}
+			found = true;
+			break;
+		}
+	}
+	if (unlikely(found))
+		kfree(synx_data);
+	else
+		hash_add(client->handle_map,
+			&synx_data->node, *new_h_synx);
+	spin_unlock_bh(&client->handle_map_lock);
+
+	return rc;
+}
+
+int synx_util_activate(struct synx_coredata *synx_obj)
+{
+	if (IS_ERR_OR_NULL(synx_obj))
+		return -SYNX_INVALID;
+
+	/* move synx to ACTIVE state and register cb for merged object */
+	dma_fence_enable_sw_signaling(synx_obj->fence);
+	return 0;
+}
+
+static u32 synx_util_get_references(struct synx_coredata *synx_obj)
+{
+	u32 count = 0;
+	u32 i = 0;
+	struct dma_fence_array *array = NULL;
+
+	/* obtain dma fence reference */
+	if (dma_fence_is_array(synx_obj->fence)) {
+		array = to_dma_fence_array(synx_obj->fence);
+		if (IS_ERR_OR_NULL(array))
+			return 0;
+
+		for (i = 0; i < array->num_fences; i++)
+			dma_fence_get(array->fences[i]);
+		count = array->num_fences;
+	} else {
+		dma_fence_get(synx_obj->fence);
+		count = 1;
+	}
+
+	return count;
+}
+
+static void synx_util_put_references(struct synx_coredata *synx_obj)
+{
+	u32 i = 0;
+	struct dma_fence_array *array = NULL;
+
+	if (dma_fence_is_array(synx_obj->fence)) {
+		array = to_dma_fence_array(synx_obj->fence);
+		if (IS_ERR_OR_NULL(array))
+			return;
+
+		for (i = 0; i < array->num_fences; i++)
+			dma_fence_put(array->fences[i]);
+	} else {
+		dma_fence_put(synx_obj->fence);
+	}
+}
+
+static u32 synx_util_add_fence(struct synx_coredata *synx_obj,
+	struct dma_fence **fences,
+	u32 idx)
+{
+	struct dma_fence_array *array = NULL;
+	u32 i = 0;
+
+	if (dma_fence_is_array(synx_obj->fence)) {
+		array = to_dma_fence_array(synx_obj->fence);
+		if (IS_ERR_OR_NULL(array))
+			return 0;
+
+		for (i = 0; i < array->num_fences; i++)
+			fences[idx+i] = array->fences[i];
+
+		return array->num_fences;
+	}
+
+	fences[idx] = synx_obj->fence;
+	return 1;
+}
+
+static u32 synx_util_remove_duplicates(struct dma_fence **arr, u32 num)
+{
+	int i, j;
+	u32 wr_idx = 1;
+
+	if (IS_ERR_OR_NULL(arr)) {
+		dprintk(SYNX_ERR, "invalid input array\n");
+		return 0;
+	}
+
+	for (i = 1; i < num; i++) {
+		for (j = 0; j < wr_idx ; j++) {
+			if (arr[i] == arr[j]) {
+				/* release reference obtained for duplicate */
+				dprintk(SYNX_DBG,
+					"releasing duplicate reference\n");
+				dma_fence_put(arr[i]);
+				break;
+			}
+		}
+		if (j == wr_idx)
+			arr[wr_idx++] = arr[i];
+	}
+
+	return wr_idx;
+}
+
+s32 synx_util_merge_error(struct synx_client *client,
+	u32 *h_synxs,
+	u32 num_objs)
+{
+	u32 i = 0;
+	struct synx_handle_coredata *synx_data;
+	struct synx_coredata *synx_obj;
+
+	if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(h_synxs))
+		return -SYNX_INVALID;
+
+	for (i = 0; i < num_objs; i++) {
+		synx_data = synx_util_acquire_handle(client, h_synxs[i]);
+		synx_obj = synx_util_obtain_object(synx_data);
+		if (IS_ERR_OR_NULL(synx_obj) ||
+			IS_ERR_OR_NULL(synx_obj->fence)) {
+			dprintk(SYNX_ERR,
+				"[sess :%llu] invalid handle %d in cleanup\n",
+				client->id, h_synxs[i]);
+			continue;
+		}
+		/* release all references obtained during merge validatation */
+		synx_util_put_references(synx_obj);
+		synx_util_release_handle(synx_data);
+	}
+
+	return 0;
+}
+
+int synx_util_validate_merge(struct synx_client *client,
+	u32 *h_synxs,
+	u32 num_objs,
+	struct dma_fence ***fence_list,
+	u32 *fence_cnt)
+{
+	u32 count = 0;
+	u32 i = 0;
+	struct synx_handle_coredata **synx_datas;
+	struct synx_coredata **synx_objs;
+	struct dma_fence **fences = NULL;
+
+	if (num_objs <= 1) {
+		dprintk(SYNX_ERR, "single handle merge is not allowed\n");
+		return -SYNX_INVALID;
+	}
+
+	synx_datas = kcalloc(num_objs, sizeof(*synx_datas), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(synx_datas))
+		return -SYNX_NOMEM;
+
+	synx_objs = kcalloc(num_objs, sizeof(*synx_objs), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(synx_objs)) {
+		kfree(synx_datas);
+		return -SYNX_NOMEM;
+	}
+
+	for (i = 0; i < num_objs; i++) {
+		synx_datas[i] = synx_util_acquire_handle(client, h_synxs[i]);
+		synx_objs[i] = synx_util_obtain_object(synx_datas[i]);
+		if (IS_ERR_OR_NULL(synx_objs[i]) ||
+			IS_ERR_OR_NULL(synx_objs[i]->fence)) {
+			dprintk(SYNX_ERR,
+				"[sess :%llu] invalid handle %d in merge list\n",
+				client->id, h_synxs[i]);
+			*fence_cnt = i;
+			goto error;
+		}
+		count += synx_util_get_references(synx_objs[i]);
+	}
+
+	fences = kcalloc(count, sizeof(*fences), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(fences)) {
+		*fence_cnt = num_objs;
+		goto error;
+	}
+
+	/* memory will be released later in the invoking function */
+	*fence_list = fences;
+	count = 0;
+
+	for (i = 0; i < num_objs; i++) {
+		count += synx_util_add_fence(synx_objs[i], fences, count);
+		/* release the reference obtained earlier in the function */
+		synx_util_release_handle(synx_datas[i]);
+	}
+
+	*fence_cnt = synx_util_remove_duplicates(fences, count);
+	kfree(synx_objs);
+	kfree(synx_datas);
+	return 0;
+
+error:
+	/* release the reference/s obtained earlier in the function */
+	for (i = 0; i < *fence_cnt; i++) {
+		synx_util_put_references(synx_objs[i]);
+		synx_util_release_handle(synx_datas[i]);
+	}
+	*fence_cnt = 0;
+	kfree(synx_objs);
+	kfree(synx_datas);
+	return -SYNX_INVALID;
+}
+
+static u32 __fence_state(struct dma_fence *fence, bool locked)
+{
+	s32 status;
+	u32 state = SYNX_STATE_INVALID;
+
+	if (IS_ERR_OR_NULL(fence)) {
+		dprintk(SYNX_ERR, "invalid fence\n");
+		return SYNX_STATE_INVALID;
+	}
+
+	if (locked)
+		status = dma_fence_get_status_locked(fence);
+	else
+		status = dma_fence_get_status(fence);
+
+	/* convert fence status to synx state */
+	switch (status) {
+	case 0:
+		state = SYNX_STATE_ACTIVE;
+		break;
+	case 1:
+		state = SYNX_STATE_SIGNALED_SUCCESS;
+		break;
+	case -SYNX_STATE_SIGNALED_CANCEL:
+		state = SYNX_STATE_SIGNALED_CANCEL;
+		break;
+	case -SYNX_STATE_SIGNALED_EXTERNAL:
+		state = SYNX_STATE_SIGNALED_EXTERNAL;
+		break;
+	case -SYNX_STATE_SIGNALED_ERROR:
+		state = SYNX_STATE_SIGNALED_ERROR;
+		break;
+	default:
+		state = (u32)(-status);
+	}
+
+	return state;
+}
+
+static u32 __fence_group_state(struct dma_fence *fence, bool locked)
+{
+	u32 i = 0;
+	u32 state = SYNX_STATE_INVALID;
+	struct dma_fence_array *array = NULL;
+	u32 intr, actv_cnt, sig_cnt, err_cnt;
+
+	if (IS_ERR_OR_NULL(fence)) {
+		dprintk(SYNX_ERR, "invalid fence\n");
+		return SYNX_STATE_INVALID;
+	}
+
+	actv_cnt = sig_cnt = err_cnt = 0;
+	array = to_dma_fence_array(fence);
+	if (IS_ERR_OR_NULL(array))
+		return SYNX_STATE_INVALID;
+
+	for (i = 0; i < array->num_fences; i++) {
+		intr = __fence_state(array->fences[i], locked);
+		switch (intr) {
+		case SYNX_STATE_ACTIVE:
+			actv_cnt++;
+			break;
+		case SYNX_STATE_SIGNALED_SUCCESS:
+			sig_cnt++;
+			break;
+		default:
+			err_cnt++;
+		}
+	}
+
+	dprintk(SYNX_DBG,
+		"group cnt stats act:%u, sig: %u, err: %u\n",
+		actv_cnt, sig_cnt, err_cnt);
+
+	if (err_cnt)
+		state = SYNX_STATE_SIGNALED_ERROR;
+	else if (actv_cnt)
+		state = SYNX_STATE_ACTIVE;
+	else if (sig_cnt == array->num_fences)
+		state = SYNX_STATE_SIGNALED_SUCCESS;
+
+	return state;
+}
+
+/*
+ * WARN: Should not hold the fence spinlock when invoking
+ * this function. Use synx_fence_state_locked instead
+ */
+u32 synx_util_get_object_status(struct synx_coredata *synx_obj)
+{
+	u32 state;
+
+	if (IS_ERR_OR_NULL(synx_obj))
+		return SYNX_STATE_INVALID;
+
+	if (synx_util_is_merged_object(synx_obj))
+		state = __fence_group_state(synx_obj->fence, false);
+	else
+		state = __fence_state(synx_obj->fence, false);
+
+	return state;
+}
+
+/* use this for status check when holding on to metadata spinlock */
+u32 synx_util_get_object_status_locked(struct synx_coredata *synx_obj)
+{
+	u32 state;
+
+	if (IS_ERR_OR_NULL(synx_obj))
+		return SYNX_STATE_INVALID;
+
+	if (synx_util_is_merged_object(synx_obj))
+		state = __fence_group_state(synx_obj->fence, true);
+	else
+		state = __fence_state(synx_obj->fence, true);
+
+	return state;
+}
+
+struct synx_handle_coredata *synx_util_acquire_handle(
+	struct synx_client *client, u32 h_synx)
+{
+	struct synx_handle_coredata *synx_data = NULL;
+	struct synx_handle_coredata *synx_handle =
+		ERR_PTR(-SYNX_NOENT);
+
+	if (IS_ERR_OR_NULL(client))
+		return ERR_PTR(-SYNX_INVALID);
+
+	spin_lock_bh(&client->handle_map_lock);
+	hash_for_each_possible(client->handle_map,
+		synx_data, node, h_synx) {
+		if (synx_data->key == h_synx &&
+			synx_data->rel_count != 0) {
+			kref_get(&synx_data->refcount);
+			synx_handle = synx_data;
+			break;
+		}
+	}
+	spin_unlock_bh(&client->handle_map_lock);
+
+	return synx_handle;
+}
+
+struct synx_map_entry *synx_util_insert_to_map(
+	struct synx_coredata *synx_obj,
+	u32 h_synx, u32 flags)
+{
+	struct synx_map_entry *map_entry;
+
+	map_entry = kzalloc(sizeof(*map_entry), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(map_entry))
+		return ERR_PTR(-SYNX_NOMEM);
+
+	kref_init(&map_entry->refcount);
+	map_entry->synx_obj = synx_obj;
+	map_entry->flags = flags;
+	map_entry->key = h_synx;
+
+	if (synx_util_is_global_handle(h_synx)) {
+		spin_lock_bh(&synx_dev->native->global_map_lock);
+		hash_add(synx_dev->native->global_map,
+			&map_entry->node, h_synx);
+		spin_unlock_bh(&synx_dev->native->global_map_lock);
+		dprintk(SYNX_MEM,
+			"added handle %u to global map %pK\n",
+			h_synx, map_entry);
+	} else {
+		spin_lock_bh(&synx_dev->native->local_map_lock);
+		hash_add(synx_dev->native->local_map,
+			&map_entry->node, h_synx);
+		spin_unlock_bh(&synx_dev->native->local_map_lock);
+		dprintk(SYNX_MEM,
+			"added handle %u to local map %pK\n",
+			h_synx, map_entry);
+	}
+
+	return map_entry;
+}
+
+struct synx_map_entry *synx_util_get_map_entry(u32 h_synx)
+{
+	struct synx_map_entry *curr;
+	struct synx_map_entry *map_entry = ERR_PTR(-SYNX_NOENT);
+
+	if (h_synx == 0)
+		return ERR_PTR(-SYNX_INVALID);
+
+	if (synx_util_is_global_handle(h_synx)) {
+		spin_lock_bh(&synx_dev->native->global_map_lock);
+		hash_for_each_possible(synx_dev->native->global_map,
+			curr, node, h_synx) {
+			if (curr->key == h_synx) {
+				kref_get(&curr->refcount);
+				map_entry = curr;
+				break;
+			}
+		}
+		spin_unlock_bh(&synx_dev->native->global_map_lock);
+	} else {
+		spin_lock_bh(&synx_dev->native->local_map_lock);
+		hash_for_each_possible(synx_dev->native->local_map,
+			curr, node, h_synx) {
+			if (curr->key == h_synx) {
+				kref_get(&curr->refcount);
+				map_entry = curr;
+				break;
+			}
+		}
+		spin_unlock_bh(&synx_dev->native->local_map_lock);
+	}
+
+	/* should we allocate if entry not found? */
+	return map_entry;
+}
+
+static void synx_util_cleanup_fence(
+	struct synx_coredata *synx_obj)
+{
+	struct synx_signal_cb *signal_cb;
+	unsigned long flags;
+	u32 g_status;
+	u32 f_status;
+
+	mutex_lock(&synx_obj->obj_lock);
+	synx_obj->map_count--;
+	signal_cb = synx_obj->signal_cb;
+	f_status = synx_util_get_object_status(synx_obj);
+	dprintk(SYNX_VERB, "f_status:%u, signal_cb:%p, map:%u, idx:%u\n",
+		f_status, signal_cb, synx_obj->map_count, synx_obj->global_idx);
+	if (synx_obj->map_count == 0 &&
+		(signal_cb != NULL) &&
+		(synx_obj->global_idx != 0) &&
+		(f_status == SYNX_STATE_ACTIVE)) {
+		/*
+		 * no more clients interested for notification
+		 * on handle on local core.
+		 * remove reference held by callback on synx
+		 * coredata structure and update cb (if still
+		 * un-signaled) with global handle idx to
+		 * notify any cross-core clients waiting on
+		 * handle.
+		 */
+		g_status = synx_global_get_status(synx_obj->global_idx);
+		if (g_status > SYNX_STATE_ACTIVE) {
+			dprintk(SYNX_DBG, "signaling fence %pK with status %u\n",
+				synx_obj->fence, g_status);
+			synx_native_signal_fence(synx_obj, g_status);
+		} else {
+			spin_lock_irqsave(synx_obj->fence->lock, flags);
+			if (synx_util_get_object_status_locked(synx_obj) ==
+				SYNX_STATE_ACTIVE) {
+				signal_cb->synx_obj = NULL;
+				signal_cb->handle = synx_obj->global_idx;
+				synx_obj->signal_cb =  NULL;
+				/*
+				 * release reference held by signal cb and
+				 * get reference on global index instead.
+				 */
+				synx_util_put_object(synx_obj);
+				synx_global_get_ref(synx_obj->global_idx);
+			}
+			spin_unlock_irqrestore(synx_obj->fence->lock, flags);
+		}
+	} else if (synx_obj->map_count == 0 && signal_cb &&
+		(f_status == SYNX_STATE_ACTIVE)) {
+		if (dma_fence_remove_callback(synx_obj->fence,
+			&signal_cb->fence_cb)) {
+			kfree(signal_cb);
+			synx_obj->signal_cb = NULL;
+			/*
+			 * release reference held by signal cb and
+			 * get reference on global index instead.
+			 */
+			synx_util_put_object(synx_obj);
+			dprintk(SYNX_MEM, "signal cb destroyed %pK\n",
+				synx_obj->signal_cb);
+		}
+	}
+	mutex_unlock(&synx_obj->obj_lock);
+}
+
+static void synx_util_destroy_map_entry_worker(
+	struct work_struct *dispatch)
+{
+	struct synx_map_entry *map_entry =
+		container_of(dispatch, struct synx_map_entry, dispatch);
+	struct synx_coredata *synx_obj;
+
+	synx_obj = map_entry->synx_obj;
+	if (!IS_ERR_OR_NULL(synx_obj)) {
+		synx_util_cleanup_fence(synx_obj);
+		/* release reference held by map entry */
+		synx_util_put_object(synx_obj);
+	}
+
+	if (!synx_util_is_global_handle(map_entry->key))
+		clear_bit(synx_util_global_idx(map_entry->key),
+			synx_dev->native->bitmap);
+	dprintk(SYNX_VERB, "map entry for %u destroyed %pK\n",
+		map_entry->key, map_entry);
+	kfree(map_entry);
+}
+
+static void synx_util_destroy_map_entry(struct kref *kref)
+{
+	struct synx_map_entry *map_entry =
+		container_of(kref, struct synx_map_entry, refcount);
+
+	hash_del(&map_entry->node);
+	dprintk(SYNX_MEM, "map entry for %u removed %pK\n",
+		map_entry->key, map_entry);
+	INIT_WORK(&map_entry->dispatch, synx_util_destroy_map_entry_worker);
+	queue_work(synx_dev->wq_cleanup, &map_entry->dispatch);
+}
+
+void synx_util_release_map_entry(struct synx_map_entry *map_entry)
+{
+	spinlock_t *lock;
+
+	if (IS_ERR_OR_NULL(map_entry))
+		return;
+
+	if (synx_util_is_global_handle(map_entry->key))
+		lock = &synx_dev->native->global_map_lock;
+	else
+		lock = &synx_dev->native->local_map_lock;
+
+	spin_lock_bh(lock);
+	kref_put(&map_entry->refcount,
+		synx_util_destroy_map_entry);
+	spin_unlock_bh(lock);
+}
+
+static void synx_util_destroy_handle_worker(
+	struct work_struct *dispatch)
+{
+	struct synx_handle_coredata *synx_data =
+		container_of(dispatch, struct synx_handle_coredata,
+		dispatch);
+
+	synx_util_release_map_entry(synx_data->map_entry);
+	dprintk(SYNX_VERB, "handle %u destroyed %pK\n",
+		synx_data->key, synx_data);
+	kfree(synx_data);
+}
+
+static void synx_util_destroy_handle(struct kref *kref)
+{
+	struct synx_handle_coredata *synx_data =
+		container_of(kref, struct synx_handle_coredata,
+		refcount);
+
+	hash_del(&synx_data->node);
+	dprintk(SYNX_MEM, "[sess :%llu] handle %u removed %pK\n",
+		synx_data->client->id, synx_data->key, synx_data);
+	INIT_WORK(&synx_data->dispatch, synx_util_destroy_handle_worker);
+	queue_work(synx_dev->wq_cleanup, &synx_data->dispatch);
+}
+
+void synx_util_release_handle(struct synx_handle_coredata *synx_data)
+{
+	struct synx_client *client;
+
+	if (IS_ERR_OR_NULL(synx_data))
+		return;
+
+	client = synx_data->client;
+	if (IS_ERR_OR_NULL(client))
+		return;
+
+	spin_lock_bh(&client->handle_map_lock);
+	kref_put(&synx_data->refcount,
+		synx_util_destroy_handle);
+	spin_unlock_bh(&client->handle_map_lock);
+}
+
+struct bind_operations *synx_util_get_bind_ops(u32 type)
+{
+	struct synx_registered_ops *client_ops;
+
+	if (!synx_util_is_valid_bind_type(type))
+		return NULL;
+
+	mutex_lock(&synx_dev->vtbl_lock);
+	client_ops = &synx_dev->bind_vtbl[type];
+	if (!client_ops->valid) {
+		mutex_unlock(&synx_dev->vtbl_lock);
+		return NULL;
+	}
+	mutex_unlock(&synx_dev->vtbl_lock);
+
+	return &client_ops->ops;
+}
+
+int synx_util_alloc_cb_entry(struct synx_client *client,
+	struct synx_kernel_payload *data,
+	u32 *cb_idx)
+{
+	long idx;
+	struct synx_client_cb *cb;
+
+	if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(data) ||
+		IS_ERR_OR_NULL(cb_idx))
+		return -SYNX_INVALID;
+
+	idx = synx_util_get_free_handle(client->cb_bitmap, SYNX_MAX_OBJS);
+	if (idx >= SYNX_MAX_OBJS) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] free cb index not available\n",
+			client->id);
+		return -SYNX_NOMEM;
+	}
+
+	cb = &client->cb_table[idx];
+	memset(cb, 0, sizeof(*cb));
+	cb->is_valid = true;
+	cb->client = client;
+	cb->idx = idx;
+	memcpy(&cb->kernel_cb, data,
+		sizeof(cb->kernel_cb));
+
+	*cb_idx = idx;
+	dprintk(SYNX_VERB, "[sess :%llu] allocated cb index %u\n",
+		client->id, *cb_idx);
+	return 0;
+}
+
+int synx_util_clear_cb_entry(struct synx_client *client,
+	struct synx_client_cb *cb)
+{
+	int rc = 0;
+	u32 idx;
+
+	if (IS_ERR_OR_NULL(cb))
+		return -SYNX_INVALID;
+
+	idx = cb->idx;
+	memset(cb, 0, sizeof(*cb));
+	if (idx && idx < SYNX_MAX_OBJS) {
+		clear_bit(idx, client->cb_bitmap);
+	} else {
+		dprintk(SYNX_ERR, "invalid index\n");
+		rc = -SYNX_INVALID;
+	}
+
+	return rc;
+}
+
+void synx_util_default_user_callback(u32 h_synx,
+	int status, void *data)
+{
+	struct synx_client_cb *cb = data;
+	struct synx_client *client = NULL;
+
+	if (cb && cb->client) {
+		client = cb->client;
+		dprintk(SYNX_VERB,
+			"[sess :%llu] user cb queued for handle %d\n",
+			client->id, h_synx);
+		cb->kernel_cb.status = status;
+		mutex_lock(&client->event_q_lock);
+		list_add_tail(&cb->node, &client->event_q);
+		mutex_unlock(&client->event_q_lock);
+		wake_up_all(&client->event_wq);
+	} else {
+		dprintk(SYNX_ERR, "invalid params\n");
+	}
+}
+
+void synx_util_callback_dispatch(struct synx_coredata *synx_obj, u32 status)
+{
+	struct synx_cb_data *synx_cb, *synx_cb_temp;
+
+	if (IS_ERR_OR_NULL(synx_obj)) {
+		dprintk(SYNX_ERR, "invalid arguments\n");
+		return;
+	}
+
+	list_for_each_entry_safe(synx_cb,
+		synx_cb_temp, &synx_obj->reg_cbs_list, node) {
+		synx_cb->status = status;
+		list_del_init(&synx_cb->node);
+		queue_work(synx_dev->wq_cb,
+			&synx_cb->cb_dispatch);
+		dprintk(SYNX_VERB, "dispatched callback\n");
+	}
+}
+
+void synx_util_cb_dispatch(struct work_struct *cb_dispatch)
+{
+	struct synx_cb_data *synx_cb =
+		container_of(cb_dispatch, struct synx_cb_data, cb_dispatch);
+	struct synx_client *client;
+	struct synx_client_cb *cb;
+	struct synx_kernel_payload payload;
+	u32 status;
+
+	client = synx_get_client(synx_cb->session);
+	if (IS_ERR_OR_NULL(client)) {
+		dprintk(SYNX_ERR,
+			"invalid session data %pK in cb payload\n",
+			synx_cb->session);
+		goto free;
+	}
+
+	if (synx_cb->idx == 0 ||
+		synx_cb->idx >= SYNX_MAX_OBJS) {
+		dprintk(SYNX_ERR,
+			"[sess :%llu] invalid cb index %u\n",
+			client->id, synx_cb->idx);
+		goto fail;
+	}
+
+	status = synx_cb->status;
+	cb = &client->cb_table[synx_cb->idx];
+	if (!cb->is_valid) {
+		dprintk(SYNX_ERR, "invalid cb payload\n");
+		goto fail;
+	}
+
+	memcpy(&payload, &cb->kernel_cb, sizeof(cb->kernel_cb));
+	payload.status = status;
+
+	if (payload.cb_func == synx_util_default_user_callback) {
+		/*
+		 * need to send client cb data for default
+		 * user cb (userspace cb)
+		 */
+		payload.data = cb;
+	} else {
+		/*
+		 * clear the cb entry. userspace cb entry
+		 * will be cleared after data read by the
+		 * polling thread or when client is destroyed
+		 */
+		if (synx_util_clear_cb_entry(client, cb))
+			dprintk(SYNX_ERR,
+				"[sess :%llu] error clearing cb entry\n",
+				client->id);
+	}
+
+	dprintk(SYNX_INFO,
+		"callback dispatched for handle %u, status %u, data %pK\n",
+		payload.h_synx, payload.status, payload.data);
+
+	/* dispatch kernel callback */
+	payload.cb_func(payload.h_synx,
+		payload.status, payload.data);
+
+fail:
+	synx_put_client(client);
+free:
+	kfree(synx_cb);
+}
+
+u32 synx_util_get_fence_entry(u64 key, u32 global)
+{
+	u32 h_synx = 0;
+	struct synx_fence_entry *curr;
+
+	spin_lock_bh(&synx_dev->native->fence_map_lock);
+	hash_for_each_possible(synx_dev->native->fence_map,
+		curr, node, key) {
+		if (curr->key == key) {
+			if (global)
+				h_synx = curr->g_handle;
+			/* return local handle if global not available */
+			if (h_synx == 0)
+				h_synx = curr->l_handle;
+
+			break;
+		}
+	}
+	spin_unlock_bh(&synx_dev->native->fence_map_lock);
+
+	return h_synx;
+}
+
+void synx_util_release_fence_entry(u64 key)
+{
+	struct synx_fence_entry *entry = NULL, *curr;
+
+	spin_lock_bh(&synx_dev->native->fence_map_lock);
+	hash_for_each_possible(synx_dev->native->fence_map,
+		curr, node, key) {
+		if (curr->key == key) {
+			entry = curr;
+			break;
+		}
+	}
+
+	if (entry) {
+		hash_del(&entry->node);
+		dprintk(SYNX_MEM,
+			"released fence entry %pK for fence %pK\n",
+			entry, (void *)key);
+		kfree(entry);
+	}
+
+	spin_unlock_bh(&synx_dev->native->fence_map_lock);
+}
+
+int synx_util_insert_fence_entry(struct synx_fence_entry *entry,
+	u32 *h_synx, u32 global)
+{
+	int rc = SYNX_SUCCESS;
+	struct synx_fence_entry *curr;
+
+	if (IS_ERR_OR_NULL(entry) || IS_ERR_OR_NULL(h_synx))
+		return -SYNX_INVALID;
+
+	spin_lock_bh(&synx_dev->native->fence_map_lock);
+	hash_for_each_possible(synx_dev->native->fence_map,
+		curr, node, entry->key) {
+		/* raced with import from another process on same fence */
+		if (curr->key == entry->key) {
+			if (global)
+				*h_synx = curr->g_handle;
+
+			if (*h_synx == 0 || !global)
+				*h_synx = curr->l_handle;
+
+			rc = -SYNX_ALREADY;
+			break;
+		}
+	}
+	/* add entry only if its not present in the map */
+	if (rc == SYNX_SUCCESS) {
+		hash_add(synx_dev->native->fence_map,
+			&entry->node, entry->key);
+		dprintk(SYNX_MEM,
+			"added fence entry %pK for fence %pK\n",
+			entry, (void *)entry->key);
+	}
+	spin_unlock_bh(&synx_dev->native->fence_map_lock);
+
+	return rc;
+}
+
+struct synx_client *synx_get_client(struct synx_session *session)
+{
+	struct synx_client *client = NULL;
+	struct synx_client *curr;
+
+	if (IS_ERR_OR_NULL(session))
+		return ERR_PTR(-SYNX_INVALID);
+
+	spin_lock_bh(&synx_dev->native->metadata_map_lock);
+	hash_for_each_possible(synx_dev->native->client_metadata_map,
+		curr, node, (u64)session) {
+		if (curr == (struct synx_client *)session) {
+			if (curr->active) {
+				kref_get(&curr->refcount);
+				client = curr;
+			}
+			break;
+		}
+	}
+	spin_unlock_bh(&synx_dev->native->metadata_map_lock);
+
+	return client;
+}
+
+static void synx_client_cleanup(struct work_struct *dispatch)
+{
+	int i, j;
+	struct synx_client *client =
+		container_of(dispatch, struct synx_client, dispatch);
+	struct synx_handle_coredata *curr;
+	struct hlist_node *tmp;
+
+	/*
+	 * go over all the remaining synx obj handles
+	 * un-released from this session and remove them.
+	 */
+	hash_for_each_safe(client->handle_map, i, tmp, curr, node) {
+		dprintk(SYNX_WARN,
+			"[sess :%llu] un-released handle %u\n",
+			client->id, curr->key);
+		j = kref_read(&curr->refcount);
+		/* release pending reference */
+		while (j--)
+			kref_put(&curr->refcount, synx_util_destroy_handle);
+	}
+
+	mutex_destroy(&client->event_q_lock);
+
+	dprintk(SYNX_VERB, "session %llu [%s] destroyed %pK\n",
+		client->id, client->name, client);
+	vfree(client);
+}
+
+static void synx_client_destroy(struct kref *kref)
+{
+	struct synx_client *client =
+		container_of(kref, struct synx_client, refcount);
+
+	hash_del(&client->node);
+	dprintk(SYNX_INFO, "[sess :%llu] session removed %s\n",
+		client->id, client->name);
+
+	INIT_WORK(&client->dispatch, synx_client_cleanup);
+	queue_work(synx_dev->wq_cleanup, &client->dispatch);
+}
+
+void synx_put_client(struct synx_client *client)
+{
+	if (IS_ERR_OR_NULL(client))
+		return;
+
+	spin_lock_bh(&synx_dev->native->metadata_map_lock);
+	kref_put(&client->refcount, synx_client_destroy);
+	spin_unlock_bh(&synx_dev->native->metadata_map_lock);
+}
+
+void synx_util_generate_timestamp(char *timestamp, size_t size)
+{
+	struct timespec64 tv;
+	struct tm tm;
+
+	ktime_get_real_ts64(&tv);
+	time64_to_tm(tv.tv_sec, 0, &tm);
+	snprintf(timestamp, size, "%02d-%02d %02d:%02d:%02d",
+		tm.tm_mon + 1, tm.tm_mday, tm.tm_hour,
+		tm.tm_min, tm.tm_sec);
+}
+
+void synx_util_log_error(u32 client_id, u32 h_synx, s32 err)
+{
+	struct error_node *err_node;
+
+	if (!synx_dev->debugfs_root)
+		return;
+
+	err_node = kzalloc(sizeof(*err_node), GFP_KERNEL);
+	if (!err_node)
+		return;
+
+	err_node->client_id = client_id;
+	err_node->error_code = err;
+	err_node->h_synx = h_synx;
+	synx_util_generate_timestamp(err_node->timestamp,
+		sizeof(err_node->timestamp));
+	mutex_lock(&synx_dev->error_lock);
+	list_add(&err_node->node,
+		&synx_dev->error_list);
+	mutex_unlock(&synx_dev->error_lock);
+}
+
+int synx_util_save_data(void *fence, u32 flags,
+	u32 h_synx)
+{
+	int rc = SYNX_SUCCESS;
+	struct synx_entry_64 *entry, *curr;
+	u64 key;
+	u32 tbl = synx_util_map_params_to_type(flags);
+
+	switch (tbl) {
+	case SYNX_TYPE_CSL:
+		key = *(u32 *)fence;
+		spin_lock_bh(&synx_dev->native->csl_map_lock);
+		/* ensure fence is not already added to map */
+		hash_for_each_possible(synx_dev->native->csl_fence_map,
+			curr, node, key) {
+			if (curr->key == key) {
+				rc = -SYNX_ALREADY;
+				break;
+			}
+		}
+		if (rc == SYNX_SUCCESS) {
+			entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+			if (entry) {
+				entry->data[0] = h_synx;
+				entry->key = key;
+				kref_init(&entry->refcount);
+				hash_add(synx_dev->native->csl_fence_map,
+					&entry->node, entry->key);
+				dprintk(SYNX_MEM, "added csl fence %d to map %pK\n",
+					entry->key, entry);
+			} else {
+				rc = -SYNX_NOMEM;
+			}
+		}
+		spin_unlock_bh(&synx_dev->native->csl_map_lock);
+		break;
+	default:
+		dprintk(SYNX_ERR, "invalid hash table selection\n");
+		kfree(entry);
+		rc = -SYNX_INVALID;
+	}
+
+	return rc;
+}
+
+struct synx_entry_64 *synx_util_retrieve_data(void *fence,
+	u32 type)
+{
+	u64 key;
+	struct synx_entry_64 *entry = NULL;
+	struct synx_entry_64 *curr;
+
+	switch (type) {
+	case SYNX_TYPE_CSL:
+		key = *(u32 *)fence;
+		spin_lock_bh(&synx_dev->native->csl_map_lock);
+		hash_for_each_possible(synx_dev->native->csl_fence_map,
+			curr, node, key) {
+			if (curr->key == key) {
+				kref_get(&curr->refcount);
+				entry = curr;
+				break;
+			}
+		}
+		spin_unlock_bh(&synx_dev->native->csl_map_lock);
+		break;
+	default:
+		dprintk(SYNX_ERR, "invalid hash table selection %u\n",
+			type);
+	}
+
+	return entry;
+}
+
+static void synx_util_destroy_data(struct kref *kref)
+{
+	struct synx_entry_64 *entry =
+		container_of(kref, struct synx_entry_64, refcount);
+
+	hash_del(&entry->node);
+	dprintk(SYNX_MEM, "released fence %llu entry %pK\n",
+		entry->key, entry);
+	kfree(entry);
+}
+
+void synx_util_remove_data(void *fence,
+	u32 type)
+{
+	u64 key;
+	struct synx_entry_64 *entry = NULL;
+	struct synx_entry_64 *curr;
+
+	if (IS_ERR_OR_NULL(fence))
+		return;
+
+	switch (type) {
+	case SYNX_TYPE_CSL:
+		key = *((u32 *)fence);
+		spin_lock_bh(&synx_dev->native->csl_map_lock);
+		hash_for_each_possible(synx_dev->native->csl_fence_map,
+			curr, node, key) {
+			if (curr->key == key) {
+				entry = curr;
+				break;
+			}
+		}
+		if (entry)
+			kref_put(&entry->refcount, synx_util_destroy_data);
+		spin_unlock_bh(&synx_dev->native->csl_map_lock);
+		break;
+	default:
+		dprintk(SYNX_ERR, "invalid hash table selection %u\n",
+			type);
+	}
+}
+
+void synx_util_map_import_params_to_create(
+	struct synx_import_indv_params *params,
+	struct synx_create_params *c_params)
+{
+	if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(c_params))
+		return;
+
+	if (params->flags & SYNX_IMPORT_GLOBAL_FENCE)
+		c_params->flags |= SYNX_CREATE_GLOBAL_FENCE;
+
+	if (params->flags & SYNX_IMPORT_LOCAL_FENCE)
+		c_params->flags |= SYNX_CREATE_LOCAL_FENCE;
+
+	if (params->flags & SYNX_IMPORT_DMA_FENCE)
+		c_params->flags |= SYNX_CREATE_DMA_FENCE;
+}
+
+u32 synx_util_map_client_id_to_core(
+	enum synx_client_id id)
+{
+	u32 core_id;
+
+	switch (id) {
+	case SYNX_CLIENT_NATIVE:
+		core_id = SYNX_CORE_APSS; break;
+	case SYNX_CLIENT_EVA_CTX0:
+		core_id = SYNX_CORE_EVA; break;
+	case SYNX_CLIENT_VID_CTX0:
+		core_id = SYNX_CORE_IRIS; break;
+	case SYNX_CLIENT_NSP_CTX0:
+		core_id = SYNX_CORE_NSP; break;
+	default:
+		core_id = SYNX_CORE_MAX;
+	}
+
+	return core_id;
+}

+ 181 - 0
msm/synx/synx_util.h

@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SYNX_UTIL_H__
+#define __SYNX_UTIL_H__
+
+#include "synx_api.h"
+#include "synx_private.h"
+
+extern struct synx_device *synx_dev;
+
+extern void synx_fence_callback(struct dma_fence *fence,
+	struct dma_fence_cb *cb);
+extern int synx_native_signal_fence(struct synx_coredata *synx_obj,
+	u32 status);
+
+static inline bool synx_util_is_valid_bind_type(u32 type)
+{
+	if (type < SYNX_MAX_BIND_TYPES)
+		return true;
+
+	return false;
+}
+
+static inline bool synx_util_is_global_handle(u32 h_synx)
+{
+	return (h_synx & SYNX_OBJ_GLOBAL_FLAG_MASK) ? true : false;
+}
+
+static inline u32 synx_util_get_object_type(
+	struct synx_coredata *synx_obj)
+{
+	return synx_obj ? synx_obj->type : 0;
+}
+
+static inline bool synx_util_is_merged_object(
+	struct synx_coredata *synx_obj)
+{
+	if (synx_obj &&
+		(synx_obj->type & SYNX_CREATE_MERGED_FENCE))
+		return true;
+
+	return false;
+}
+
+static inline bool synx_util_is_global_object(
+	struct synx_coredata *synx_obj)
+{
+	if (synx_obj &&
+		(synx_obj->type & SYNX_CREATE_GLOBAL_FENCE))
+		return true;
+
+	return false;
+}
+
+static inline bool synx_util_is_external_object(
+	struct synx_coredata *synx_obj)
+{
+	if (synx_obj &&
+		(synx_obj->type & SYNX_CREATE_DMA_FENCE))
+		return true;
+
+	return false;
+}
+
+static inline u32 synx_util_map_params_to_type(u32 flags)
+{
+	if (flags & SYNX_CREATE_CSL_FENCE)
+		return SYNX_TYPE_CSL;
+
+	return SYNX_MAX_BIND_TYPES;
+}
+
+static inline u32 synx_util_global_idx(u32 h_synx)
+{
+	return (h_synx & SYNX_OBJ_HANDLE_MASK);
+}
+
+/* coredata memory functions */
+void synx_util_get_object(struct synx_coredata *synx_obj);
+void synx_util_put_object(struct synx_coredata *synx_obj);
+void synx_util_object_destroy(struct synx_coredata *synx_obj);
+
+static inline struct synx_coredata *synx_util_obtain_object(
+	struct synx_handle_coredata *synx_data)
+{
+	if (IS_ERR_OR_NULL(synx_data))
+		return NULL;
+
+	return synx_data->synx_obj;
+}
+
+/* global/local map functions */
+struct synx_map_entry *synx_util_insert_to_map(struct synx_coredata *synx_obj,
+			u32 h_synx, u32 flags);
+struct synx_map_entry *synx_util_get_map_entry(u32 h_synx);
+void synx_util_release_map_entry(struct synx_map_entry *map_entry);
+
+/* fence map functions */
+int synx_util_insert_fence_entry(struct synx_fence_entry *entry, u32 *h_synx,
+			u32 global);
+u32 synx_util_get_fence_entry(u64 key, u32 global);
+void synx_util_release_fence_entry(u64 key);
+
+/* coredata initialize functions */
+int synx_util_init_coredata(struct synx_coredata *synx_obj,
+			struct synx_create_params *params,
+			struct dma_fence_ops *ops,
+			u64 dma_context);
+int synx_util_init_group_coredata(struct synx_coredata *synx_obj,
+			struct dma_fence **fences,
+			struct synx_merge_params *params,
+			u32 num_objs,
+			u64 dma_context);
+
+/* handle related functions */
+int synx_alloc_global_handle(u32 *new_synx);
+int synx_alloc_local_handle(u32 *new_synx);
+long synx_util_get_free_handle(unsigned long *bitmap, unsigned int size);
+int synx_util_init_handle(struct synx_client *client, struct synx_coredata *obj,
+			u32 *new_h_synx,
+			void *map_entry);
+
+u32 synx_encode_handle(u32 idx, u32 core_id, bool global_idx);
+
+/* callback related functions */
+int synx_util_alloc_cb_entry(struct synx_client *client,
+			struct synx_kernel_payload *data,
+			u32 *cb_idx);
+int synx_util_clear_cb_entry(struct synx_client *client,
+			struct synx_client_cb *cb);
+void synx_util_default_user_callback(u32 h_synx, int status, void *data);
+void synx_util_callback_dispatch(struct synx_coredata *synx_obj, u32 state);
+void synx_util_cb_dispatch(struct work_struct *cb_dispatch);
+
+/* external fence functions */
+int synx_util_activate(struct synx_coredata *synx_obj);
+int synx_util_add_callback(struct synx_coredata *synx_obj, u32 h_synx);
+
+/* merge related helper functions */
+s32 synx_util_merge_error(struct synx_client *client, u32 *h_synxs, u32 num_objs);
+int synx_util_validate_merge(struct synx_client *client, u32 *h_synxs, u32 num_objs,
+			struct dma_fence ***fences,
+			u32 *fence_cnt);
+
+/* coredata status functions */
+u32 synx_util_get_object_status(struct synx_coredata *synx_obj);
+u32 synx_util_get_object_status_locked(struct synx_coredata *synx_obj);
+
+/* client handle map related functions */
+struct synx_handle_coredata *synx_util_acquire_handle(struct synx_client *client,
+			u32 h_synx);
+void synx_util_release_handle(struct synx_handle_coredata *synx_data);
+int synx_util_update_handle(struct synx_client *client, u32 h_synx, u32 sync_id,
+			u32 type, struct synx_handle_coredata **handle);
+
+/* client memory handler functions */
+struct synx_client *synx_get_client(struct synx_session *session);
+void synx_put_client(struct synx_client *client);
+
+/* error log functions */
+void synx_util_generate_timestamp(char *timestamp, size_t size);
+void synx_util_log_error(u32 id, u32 h_synx, s32 err);
+
+/* external fence map functions */
+int synx_util_save_data(void *fence, u32 flags, u32 data);
+struct synx_entry_64 *synx_util_retrieve_data(void *fence, u32 type);
+void synx_util_remove_data(void *fence, u32 type);
+
+/* misc */
+void synx_util_map_import_params_to_create(
+			struct synx_import_indv_params *params,
+			struct synx_create_params *c_params);
+
+struct bind_operations *synx_util_get_bind_ops(u32 type);
+u32 synx_util_map_client_id_to_core(enum synx_client_id id);
+
+#endif /* __SYNX_UTIL_H__ */

+ 19 - 0
synx_kernel_board.mk

@@ -0,0 +1,19 @@
+# Build synx kernel driver
+
+TARGET_SYNX_ENABLE := false
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true)
+	ifeq ($(TARGET_KERNEL_DLKM_SYNX_OVERRIDE),true)
+		TARGET_SYNX_ENABLE := true
+	endif
+else
+TARGET_SYNX_ENABLE := true
+endif
+#
+ifeq ($(TARGET_SYNX_ENABLE), true)
+ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true)
+BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/synx-driver.ko
+BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/synx-driver.ko
+BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/synx-driver.ko
+endif
+endif
+

+ 12 - 0
synx_kernel_product.mk

@@ -0,0 +1,12 @@
+TARGET_SYNX_ENABLE := false
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true)
+	ifeq ($(TARGET_KERNEL_DLKM_SYNX_OVERRIDE),true)
+		TARGET_SYNX_ENABLE := true
+	endif
+else
+TARGET_SYNX_ENABLE := true
+endif
+
+ifeq ($(TARGET_SYNX_ENABLE), true)
+PRODUCT_PACKAGES += synx-driver.ko
+endif