msm: synx: build script as DLKM for Vendor SI

added mk and Kbuild script to support building synx driver as external module for Vendor SI

Change-Id: Ib66325d115ca46e6b61de1e168e85d09419f73e2
Signed-off-by: NITIN LAXMIDAS NAIK <quic_nitinlax@quicinc.com>
This commit is contained in:
NITIN LAXMIDAS NAIK
2022-07-13 12:14:48 -07:00
parent c9856ccfeb
commit 25cb61693a
24 changed files with 8525 additions and 0 deletions

5
Android.bp Normal file
View File

@@ -0,0 +1,5 @@
cc_library_headers {
name: "qti_synx_kernel_headers",
export_include_dirs: ["include/uapi/synx/media"],
vendor_available: true
}

53
Android.mk Normal file
View File

@@ -0,0 +1,53 @@
TARGET_SYNX_ENABLE := false
ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true)
ifeq ($(TARGET_KERNEL_DLKM_SYNX_OVERRIDE),true)
TARGET_SYNX_ENABLE := true
endif
else
TARGET_SYNX_ENABLE := true
endif
ifeq ($(TARGET_SYNX_ENABLE),true)
SYNX_BLD_DIR := $(TOP)/vendor/qcom/opensource/synx-kernel
# Build synx-driver.ko
###########################################################
# This is set once per LOCAL_PATH, not per (kernel) module
KBUILD_OPTIONS := SYNX_ROOT=$(SYNX_BLD_DIR)
KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
###########################################################
DLKM_DIR := $(TOP)/device/qcom/common/dlkm
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
# For incremental compilation
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := synx-driver-symvers
LOCAL_MODULE_KBUILD_NAME := Module.symvers
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
# Include kp_module.ko in the /vendor/lib/modules (vendor.img)
# BOARD_VENDOR_KERNEL_MODULES += $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
include $(CLEAR_VARS)
# For incremental compilation
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
$(info LOCAL_SRC_FILES = $(LOCAL_SRC_FILES))
LOCAL_MODULE := synx-driver.ko
LOCAL_MODULE_KBUILD_NAME := msm/synx-driver.ko
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
# print out variables
$(info KBUILD_OPTIONS = $(KBUILD_OPTIONS))
$(info LOCAL_ADDITIONAL_DEPENDENCY = $(LOCAL_ADDITIONAL_DEPENDENCY))
$(info LOCAL_ADDITIONAL_DEPENDENCIES = $(LOCAL_ADDITIONAL_DEPENDENCIES))
$(info LOCAL_REQUIRED_MODULES = $(LOCAL_REQUIRED_MODULES))
$(info DLKM_DIR = $(DLKM_DIR))
include $(DLKM_DIR)/Build_external_kernelmodule.mk
endif # End of check for TARGET_SYNX_ENABLE

6
Kbuild Normal file
View File

@@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
CONFIG_BUILD_VENDORSI := true
# auto-detect subdirs
obj-y +=msm/

11
Makefile Normal file
View File

@@ -0,0 +1,11 @@
KBUILD_OPTIONS+= SYNX_ROOT=$(KERNEL_SRC)/$(M)
all:
$(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS)
modules_install:
$(MAKE) M=$(M) -C $(KERNEL_SRC) modules_install
%:
$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
clean:
rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
rm -rf .tmp_versions

View File

@@ -0,0 +1,5 @@
ifeq ($(CONFIG_QGKI),y)
export TARGET_SYNX_ENABLE=y
else
export TARGET_SYNX_ENABLE=m
endif

View File

@@ -0,0 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define TARGET_SYNX_ENABLE 1

View File

@@ -0,0 +1,326 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __UAPI_SYNX_H__
#define __UAPI_SYNX_H__
#include <linux/types.h>
#include <linux/ioctl.h>
/* Size of opaque payload sent to kernel for safekeeping until signal time */
#define SYNX_USER_PAYLOAD_SIZE 4
#define SYNX_MAX_WAITING_SYNX 16
#define SYNX_CALLBACK_RESULT_SUCCESS 2
#define SYNX_CALLBACK_RESULT_FAILED 3
#define SYNX_CALLBACK_RESULT_CANCELED 4
/**
* struct synx_info - Sync object creation information
*
* @name : Optional string representation of the synx object
* @synx_obj : Sync object returned after creation in kernel
*/
struct synx_info {
char name[64];
__s32 synx_obj;
};
/**
* struct synx_userpayload_info - Payload info from user space
*
* @synx_obj: Sync object for which payload has to be registered for
* @reserved: Reserved
* @payload: Pointer to user payload
*/
struct synx_userpayload_info {
__s32 synx_obj;
__u32 reserved;
__u64 payload[SYNX_USER_PAYLOAD_SIZE];
};
/**
* struct synx_signal - Sync object signaling struct
*
* @synx_obj : Sync object to be signaled
* @synx_state : State of the synx object to which it should be signaled
*/
struct synx_signal {
__s32 synx_obj;
__u32 synx_state;
};
/**
* struct synx_merge - Merge information for synx objects
*
* @synx_objs : Pointer to synx object array to merge
* @num_objs : Number of objects in the array
* @merged : Merged synx object
*/
struct synx_merge {
__u64 synx_objs;
__u32 num_objs;
__s32 merged;
};
/**
* struct synx_wait - Sync object wait information
*
* @synx_obj : Sync object to wait on
* @reserved : Reserved
* @timeout_ms : Timeout in milliseconds
*/
struct synx_wait {
__s32 synx_obj;
__u32 reserved;
__u64 timeout_ms;
};
/**
* struct synx_external_desc - info of external sync object
*
* @type : Synx type
* @reserved : Reserved
* @id : Sync object id
*
*/
struct synx_external_desc {
__u32 type;
__u32 reserved;
__s32 id[2];
};
/**
* struct synx_bind - info for binding two synx objects
*
* @synx_obj : Synx object
* @Reserved : Reserved
* @ext_sync_desc : External synx to bind to
*
*/
struct synx_bind {
__s32 synx_obj;
__u32 reserved;
struct synx_external_desc ext_sync_desc;
};
/**
* struct synx_addrefcount - info for refcount increment
*
* @synx_obj : Synx object
* @count : Count to increment
*
*/
struct synx_addrefcount {
__s32 synx_obj;
__u32 count;
};
/**
* struct synx_id_info - info for import and export of a synx object
*
* @synx_obj : Synx object to be exported
* @secure_key : Secure key created in export and used in import
* @new_synx_obj : Synx object created in import
*
*/
struct synx_id_info {
__s32 synx_obj;
__u32 secure_key;
__s32 new_synx_obj;
__u32 padding;
};
/**
* struct synx_fence_desc - info of external fence object
*
* @type : Fence type
* @reserved : Reserved
* @id : Fence object id
*
*/
struct synx_fence_desc {
__u32 type;
__u32 reserved;
__s32 id[2];
};
/**
* struct synx_create - Sync object creation information
*
* @name : Optional string representation of the synx object
* @synx_obj : Synx object allocated
* @flags : Create flags
* @desc : External fence desc
*/
struct synx_create_v2 {
char name[64];
__u32 synx_obj;
__u32 flags;
struct synx_fence_desc desc;
};
/**
* struct synx_userpayload_info - Payload info from user space
*
* @synx_obj : Sync object for which payload has to be registered for
* @reserved : Reserved
* @payload : Pointer to user payload
*/
struct synx_userpayload_info_v2 {
__u32 synx_obj;
__u32 reserved;
__u64 payload[SYNX_USER_PAYLOAD_SIZE];
};
/**
* struct synx_signal - Sync object signaling struct
*
* @synx_obj : Sync object to be signaled
* @synx_state : State of the synx object to which it should be signaled
* @reserved : Reserved
*/
struct synx_signal_v2 {
__u32 synx_obj;
__u32 synx_state;
__u64 reserved;
};
/**
* struct synx_merge - Merge information for synx objects
*
* @synx_objs : Pointer to synx object array to merge
* @num_objs : Number of objects in the array
* @merged : Merged synx object
* @flags : Merge flags
* @reserved : Reserved
*/
struct synx_merge_v2 {
__u64 synx_objs;
__u32 num_objs;
__u32 merged;
__u32 flags;
__u32 reserved;
};
/**
* struct synx_wait - Sync object wait information
*
* @synx_obj : Sync object to wait on
* @reserved : Reserved
* @timeout_ms : Timeout in milliseconds
*/
struct synx_wait_v2 {
__u32 synx_obj;
__u32 reserved;
__u64 timeout_ms;
};
/**
* struct synx_external_desc - info of external sync object
*
* @type : Synx type
* @reserved : Reserved
* @id : Sync object id
*
*/
struct synx_external_desc_v2 {
__u64 id;
__u32 type;
__u32 reserved;
};
/**
* struct synx_bind - info for binding two synx objects
*
* @synx_obj : Synx object
* @Reserved : Reserved
* @ext_sync_desc : External synx to bind to
*
*/
struct synx_bind_v2 {
__u32 synx_obj;
__u32 reserved;
struct synx_external_desc_v2 ext_sync_desc;
};
/**
* struct synx_import_info - import info
*
* @synx_obj : Synx handle to be imported
* @flags : Import flags
* @new_synx_obj : Synx object created in import
* @reserved : Reserved
* @desc : External fence descriptor
*/
struct synx_import_info {
__u32 synx_obj;
__u32 flags;
__u32 new_synx_obj;
__u32 reserved;
struct synx_fence_desc desc;
};
/**
* struct synx_import_arr_info - import list info
*
* @list : List of synx_import_info
* @num_objs : No of fences to import
*/
struct synx_import_arr_info {
__u64 list;
__u32 num_objs;
};
/**
* struct synx_fence_fd - get fd for synx fence
*
* @synx_obj : Synx handle
* @fd : fd for synx handle fence
*/
struct synx_fence_fd {
__u32 synx_obj;
__s32 fd;
};
/**
* struct synx_private_ioctl_arg - Sync driver ioctl argument
*
* @id : IOCTL command id
* @size : Size of command payload
* @result : Result of command execution
* @reserved : Reserved
* @ioctl_ptr : Pointer to user data
*/
struct synx_private_ioctl_arg {
__u32 id;
__u32 size;
__u32 result;
__u32 reserved;
__u64 ioctl_ptr;
};
#define SYNX_PRIVATE_MAGIC_NUM 's'
#define SYNX_PRIVATE_IOCTL_CMD \
_IOWR(SYNX_PRIVATE_MAGIC_NUM, 130, struct synx_private_ioctl_arg)
#define SYNX_CREATE 0
#define SYNX_RELEASE 1
#define SYNX_SIGNAL 2
#define SYNX_MERGE 3
#define SYNX_REGISTER_PAYLOAD 4
#define SYNX_DEREGISTER_PAYLOAD 5
#define SYNX_WAIT 6
#define SYNX_BIND 7
#define SYNX_ADDREFCOUNT 8
#define SYNX_GETSTATUS 9
#define SYNX_IMPORT 10
#define SYNX_EXPORT 11
#define SYNX_IMPORT_ARR 12
#define SYNX_GETFENCE_FD 13
#endif /* __UAPI_SYNX_H__ */

37
msm/Kbuild Normal file
View File

@@ -0,0 +1,37 @@
LINUXINCLUDE += -I$(SYNX_ROOT)/include \
-I$(SYNX_ROOT)/include/uapi \
-I$(SYNX_ROOT)/include/uapi/synx/media
ccflags-y += -I$(SYNX_ROOT)/msm/synx/
# add flag to compile mmrm actual implementatio instead of stub version.
# to follow up with mmrm team if techpack users need to define this for long term?
#KBUILD_CPPFLAGS += -DCONFIG_MSM_MMRM
# ported from Android.mk
$(info within KBUILD file KBUILD_EXTRA_SYMBOLS = $(KBUILD_EXTRA_SYMBOLS))
ifeq ($(CONFIG_ARCH_WAIPIO), y)
$(info within KBUILD file CONFIG_ARCH_WAIPIO = $(CONFIG_ARCH_WAIPIO))
# include $(SYNX_ROOT)/config/waipio.mk
KBUILD_CPPFLAGS += -DCONFIG_SYNX_WAIPIO=1
ccflags-y += -DCONFIG_SYNX_WAIPIO=1
endif
ifeq ($(CONFIG_ARCH_KALAMA), y)
$(info within KBUILD file CONFIG_ARCH_KALAMA = $(CONFIG_ARCH_KALAMA))
# include $(SYNX_ROOT)/config/waipio.mk
KBUILD_CPPFLAGS += -DCONFIG_SYNX_KALAMA=1
ccflags-y += -DCONFIG_SYNX_KALAMA=1
endif
ifeq ($(CONFIG_ARCH_PINEAPPLE), y)
$(info within KBUILD file CONFIG_ARCH_PINEAPPLE = $(CONFIG_ARCH_PINEAPPLE))
# include $(SYNX_ROOT)/config/pineapple.mk
KBUILD_CPPFLAGS += -DCONFIG_SYNX_PINEAPPLE=1
ccflags-y += -DCONFIG_SYNX_PINEAPPLE=1
endif
obj-m += synx-driver.o
obj-m += synx/ipclite.o
synx-driver-objs := synx/synx.o synx/synx_global.o synx/synx_util.o synx/synx_debugfs.o

5
msm/Makefile Normal file
View File

@@ -0,0 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-m += synx/ipclite.o
obj-m += synx-driver.o
synx-driver-objs := synx/synx.o synx/synx_util.o synx/synx_debugfs.o synx/synx_global.o

1030
msm/synx/ipclite.c Normal file

File diff suppressed because it is too large Load Diff

321
msm/synx/ipclite.h Normal file
View File

@@ -0,0 +1,321 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved..
*/
#include <linux/hwspinlock.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/soc/qcom,ipcc.h>
#include <linux/mailbox_client.h>
#include <linux/mailbox_controller.h>
#include "ipclite_client.h"
#define IPCMEM_INIT_COMPLETED 0x1
#define ACTIVE_CHANNEL 0x1
#define IPCMEM_TOC_SIZE (4*1024)
#define MAX_CHANNEL_SIGNALS 4
#define MAX_PARTITION_COUNT 7 /*7 partitions other than global partition*/
#define IPCLITE_MSG_SIGNAL 0
#define IPCLITE_MEM_INIT_SIGNAL 1
#define IPCLITE_VERSION_SIGNAL 2
#define IPCLITE_TEST_SIGNAL 3
/** Flag definitions for the entries */
#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_READ_PROTECTION (0x01)
#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_WRITE_PROTECTION (0x02)
#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION \
(IPCMEM_TOC_ENTRY_FLAGS_ENABLE_READ_PROTECTION | \
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_WRITE_PROTECTION)
#define IPCMEM_TOC_ENTRY_FLAGS_IGNORE_PARTITION (0x00000004)
/*Hardcoded macro to identify local host on each core*/
#define LOCAL_HOST IPCMEM_APPS
/* Timeout (ms) for the trylock of remote spinlocks */
#define HWSPINLOCK_TIMEOUT 1000
/*IPCMEM Structure Definitions*/
struct ipclite_features {
uint32_t global_atomic_support;
uint32_t version_finalised;
};
struct ipcmem_partition_header {
uint32_t type; /*partition type*/
uint32_t desc_offset; /*descriptor offset*/
uint32_t desc_size; /*descriptor size*/
uint32_t fifo0_offset; /*fifo 0 offset*/
uint32_t fifo0_size; /*fifo 0 size*/
uint32_t fifo1_offset; /*fifo 1 offset*/
uint32_t fifo1_size; /*fifo 1 size*/
};
struct ipcmem_toc_entry {
uint32_t base_offset; /*partition offset from IPCMEM base*/
uint32_t size; /*partition size*/
uint32_t flags; /*partition flags if required*/
uint32_t host0; /*subsystem 0 who can access this partition*/
uint32_t host1; /*subsystem 1 who can access this partition*/
uint32_t status; /*partition active status*/
};
struct ipcmem_toc_header {
uint32_t size;
uint32_t init_done;
};
struct ipcmem_toc {
struct ipcmem_toc_header hdr;
struct ipcmem_toc_entry toc_entry_global;
struct ipcmem_toc_entry toc_entry[IPCMEM_NUM_HOSTS][IPCMEM_NUM_HOSTS];
/* Need to have a better implementation here */
/* as ipcmem is 4k and if host number increases */
/* it would create problems*/
struct ipclite_features ipclite_features;
uint32_t global_atomic_hwlock_owner;
};
struct ipcmem_region {
u64 aux_base;
void __iomem *virt_base;
uint32_t size;
};
struct ipcmem_partition {
struct ipcmem_partition_header hdr;
};
struct global_partition_header {
uint32_t partition_type;
uint32_t region_offset;
uint32_t region_size;
};
struct ipcmem_global_partition {
struct global_partition_header hdr;
};
struct ipclite_mem {
struct ipcmem_toc *toc;
struct ipcmem_region mem;
struct ipcmem_global_partition *global_partition;
struct ipcmem_partition *partition[MAX_PARTITION_COUNT];
};
struct ipclite_fifo {
uint32_t length;
__le32 *tail;
__le32 *head;
void *fifo;
size_t (*avail)(struct ipclite_fifo *fifo);
void (*peak)(struct ipclite_fifo *fifo,
void *data, size_t count);
void (*advance)(struct ipclite_fifo *fifo,
size_t count);
void (*write)(struct ipclite_fifo *fifo,
const void *data, size_t dlen);
void (*reset)(struct ipclite_fifo *fifo);
};
struct ipclite_hw_mutex_ops {
unsigned long flags;
void (*acquire)(void);
void (*release)(void);
};
struct ipclite_irq_info {
struct mbox_client mbox_client;
struct mbox_chan *mbox_chan;
int irq;
int signal_id;
char irqname[32];
};
struct ipclite_client {
IPCLite_Client callback;
void *priv_data;
int reg_complete;
};
struct ipclite_channel {
uint32_t remote_pid;
struct ipclite_fifo *tx_fifo;
struct ipclite_fifo *rx_fifo;
spinlock_t tx_lock;
struct ipclite_irq_info irq_info[MAX_CHANNEL_SIGNALS];
struct ipclite_client client;
uint32_t channel_version;
uint32_t version_finalised;
uint32_t channel_status;
};
/*Single structure that defines everything about IPCLite*/
struct ipclite_info {
struct device *dev;
struct ipclite_channel channel[IPCMEM_NUM_HOSTS];
struct ipclite_mem ipcmem;
struct hwspinlock *hwlock;
struct ipclite_hw_mutex_ops *ipclite_hw_mutex;
};
const struct ipcmem_toc_entry ipcmem_toc_global_partition_entry = {
/* Global partition. */
4 * 1024,
128 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_GLOBAL_HOST,
IPCMEM_GLOBAL_HOST,
};
const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = {
/* Global partition. */
/* {
* 4 * 1024,
* 128 * 1024,
* IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
* IPCMEM_GLOBAL_HOST,
* IPCMEM_GLOBAL_HOST,
* },
*/
/* Apps<->CDSP partition. */
{
132 * 1024,
32 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_APPS,
IPCMEM_CDSP,
1,
},
/* APPS<->CVP (EVA) partition. */
{
164 * 1024,
32 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_APPS,
IPCMEM_CVP,
1,
},
/* APPS<->VPU partition. */
{
196 * 1024,
32 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_APPS,
IPCMEM_VPU,
1,
},
/* CDSP<->CVP (EVA) partition. */
{
228 * 1024,
32 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_CDSP,
IPCMEM_CVP,
1,
},
/* CDSP<->VPU partition. */
{
260 * 1024,
32 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_CDSP,
IPCMEM_VPU,
1,
},
/* VPU<->CVP (EVA) partition. */
{
292 * 1024,
32 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_VPU,
IPCMEM_CVP,
1,
},
/* APPS<->APPS partition. */
{
326 * 1024,
32 * 1024,
IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
IPCMEM_APPS,
IPCMEM_APPS,
1,
}
/* Last entry uses invalid hosts and no protections to signify the end. */
/* {
* 0,
* 0,
* IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
* IPCMEM_INVALID_HOST,
* IPCMEM_INVALID_HOST,
* }
*/
};
/*D:wefault partition parameters*/
#define DEFAULT_PARTITION_TYPE 0x0
#define DEFAULT_PARTITION_HDR_SIZE 1024
#define DEFAULT_DESCRIPTOR_OFFSET 1024
#define DEFAULT_DESCRIPTOR_SIZE (3*1024)
#define DEFAULT_FIFO0_OFFSET (4*1024)
#define DEFAULT_FIFO0_SIZE (8*1024)
#define DEFAULT_FIFO1_OFFSET (12*1024)
#define DEFAULT_FIFO1_SIZE (8*1024)
/*Loopback partition parameters*/
#define LOOPBACK_PARTITION_TYPE 0x1
/*Global partition parameters*/
#define GLOBAL_PARTITION_TYPE 0xFF
#define GLOBAL_PARTITION_HDR_SIZE (4*1024)
#define GLOBAL_REGION_OFFSET (4*1024)
#define GLOBAL_REGION_SIZE (124*1024)
const struct ipcmem_partition_header default_partition_hdr = {
DEFAULT_PARTITION_TYPE,
DEFAULT_DESCRIPTOR_OFFSET,
DEFAULT_DESCRIPTOR_SIZE,
DEFAULT_FIFO0_OFFSET,
DEFAULT_FIFO0_SIZE,
DEFAULT_FIFO1_OFFSET,
DEFAULT_FIFO1_SIZE,
};
/* TX and RX FIFO point to same location for such loopback partition type
* (FIFO0 offset = FIFO1 offset)
*/
const struct ipcmem_partition_header loopback_partition_hdr = {
LOOPBACK_PARTITION_TYPE,
DEFAULT_DESCRIPTOR_OFFSET,
DEFAULT_DESCRIPTOR_SIZE,
DEFAULT_FIFO0_OFFSET,
DEFAULT_FIFO0_SIZE,
DEFAULT_FIFO0_OFFSET,
DEFAULT_FIFO0_SIZE,
};
const struct global_partition_header global_partition_hdr = {
GLOBAL_PARTITION_TYPE,
GLOBAL_REGION_OFFSET,
GLOBAL_REGION_SIZE,
};

191
msm/synx/ipclite_client.h Normal file
View File

@@ -0,0 +1,191 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __IPCLITE_CLIENT_H__
#define __IPCLITE_CLIENT_H__
typedef atomic_t ipclite_atomic_uint32_t;
typedef atomic_t ipclite_atomic_int32_t;
/**
* A list of hosts supported in IPCMEM
*/
enum ipcmem_host_type {
IPCMEM_APPS = 0, /**< Apps Processor */
IPCMEM_MODEM = 1, /**< Modem processor */
IPCMEM_LPASS = 2, /**< Audio processor */
IPCMEM_SLPI = 3, /**< Sensor processor */
IPCMEM_GPU = 4, /**< Graphics processor */
IPCMEM_CDSP = 5, /**< Compute DSP processor */
IPCMEM_CVP = 6, /**< Computer Vision processor */
IPCMEM_CAM = 7, /**< Camera processor */
IPCMEM_VPU = 8, /**< Video processor */
IPCMEM_NUM_HOSTS = 9, /**< Max number of host in target */
IPCMEM_GLOBAL_HOST = 0xFE, /**< Global Host */
IPCMEM_INVALID_HOST = 0xFF, /**< Invalid processor */
};
struct global_region_info {
void *virt_base;
uint32_t size;
};
typedef int32_t (*IPCLite_Client)(uint32_t proc_id, int64_t data, void *priv);
/**
* ipclite_msg_send() - Sends message to remote client.
*
* @proc_id : Identifier for remote client or subsystem.
* @data : 64 bit message value.
*
* @return Zero on successful registration, negative on failure.
*/
int32_t ipclite_msg_send(int32_t proc_id, uint64_t data);
/**
* ipclite_register_client() - Registers client callback with framework.
*
* @cb_func_ptr : Client callback function to be called on message receive.
* @priv : Private data required by client for handling callback.
*
* @return Zero on successful registration, negative on failure.
*/
int32_t ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv);
/**
* ipclite_test_msg_send() - Sends message to remote client.
*
* @proc_id : Identifier for remote client or subsystem.
* @data : 64 bit message value.
*
* @return Zero on successful registration, negative on failure.
*/
int32_t ipclite_test_msg_send(int32_t proc_id, uint64_t data);
/**
* ipclite_register_test_client() - Registers client callback with framework.
*
* @cb_func_ptr : Client callback function to be called on message receive.
* @priv : Private data required by client for handling callback.
*
* @return Zero on successful registration, negative on failure.
*/
int32_t ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv);
/**
* get_global_partition_info() - Gets info about IPCMEM's global partitions.
*
* @global_ipcmem : Pointer to global_region_info structure.
*
* @return Zero on successful registration, negative on failure.
*/
int32_t get_global_partition_info(struct global_region_info *global_ipcmem);
/**
* ipclite_hwlock_reset() - Resets the lock if the lock is currently held by core_id
*
* core_id : takes the core id of which the lock needs to be resetted.
*
* @return None.
*/
void ipclite_hwlock_reset(enum ipcmem_host_type core_id);
/**
* ipclite_atomic_init_u32() - Initializes the global memory with uint32_t value.
*
* @addr : Pointer to global memory
* @data : Value to store in global memory
*
* @return None.
*/
void ipclite_atomic_init_u32(ipclite_atomic_uint32_t *addr, uint32_t data);
/**
* ipclite_atomic_init_i32() - Initializes the global memory with int32_t value.
*
* @addr : Pointer to global memory
* @data : Value to store in global memory
*
* @return None.
*/
void ipclite_atomic_init_i32(ipclite_atomic_int32_t *addr, int32_t data);
/**
* ipclite_global_atomic_store_u32() - Writes uint32_t value to global memory.
*
* @addr : Pointer to global memory
* @data : Value to store in global memory
*
* @return None.
*/
void ipclite_global_atomic_store_u32(ipclite_atomic_uint32_t *addr, uint32_t data);
/**
* ipclite_global_atomic_store_i32() - Writes int32_t value to global memory.
*
* @addr : Pointer to global memory
* @data : Value to store in global memory
*
* @return None.
*/
void ipclite_global_atomic_store_i32(ipclite_atomic_int32_t *addr, int32_t data);
/**
* ipclite_global_atomic_load_u32() - Reads the value from global memory.
*
* @addr : Pointer to global memory
*
* @return uint32_t value.
*/
uint32_t ipclite_global_atomic_load_u32(ipclite_atomic_uint32_t *addr);
/**
* ipclite_global_atomic_load_i32() - Reads the value from global memory.
*
* @addr : Pointer to global memory
*
* @return int32_t value.
*/
int32_t ipclite_global_atomic_load_i32(ipclite_atomic_int32_t *addr);
/**
* ipclite_global_test_and_set_bit() - Sets a bit in global memory.
*
* @nr : Bit position to set.
* @addr : Pointer to global memory
*
* @return previous value.
*/
uint32_t ipclite_global_test_and_set_bit(uint32_t nr, ipclite_atomic_uint32_t *addr);
/**
* ipclite_global_test_and_clear_bit() - Clears a bit in global memory.
*
* @nr : Bit position to clear.
* @addr : Pointer to global memory
*
* @return previous value.
*/
uint32_t ipclite_global_test_and_clear_bit(uint32_t nr, ipclite_atomic_uint32_t *addr);
/**
* ipclite_global_atomic_inc() - Increments an atomic variable by one.
*
* @addr : Pointer to global memory
*
* @return previous value.
*/
int32_t ipclite_global_atomic_inc(ipclite_atomic_int32_t *addr);
/**
* ipclite_global_atomic_dec() - Decrements an atomic variable by one.
*
* @addr : Pointer to global variable
*
* @return previous value.
*/
int32_t ipclite_global_atomic_dec(ipclite_atomic_int32_t *addr);
#endif

2636
msm/synx/synx.c Normal file

File diff suppressed because it is too large Load Diff

542
msm/synx/synx_api.h Normal file
View File

@@ -0,0 +1,542 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SYNX_API_H__
#define __SYNX_API_H__
#include <linux/list.h>
#include <synx_header.h>
#include "synx_err.h"
/**
* enum synx_create_flags - Flags passed during synx_create call
*
* SYNX_CREATE_LOCAL_FENCE : Instructs the framework to create local synx object
* SYNX_CREATE_GLOBAL_FENCE : Instructs the framework to create global synx object
* SYNX_CREATE_DMA_FENCE : Create a synx object by wrapping the provided dma fence.
* Need to pass the dma_fence ptr through fence variable
* if this flag is set.
* SYNX_CREATE_CSL_FENCE : Create a synx object with provided csl fence.
* Establishes interop with the csl fence through
* bind operations.
*/
enum synx_create_flags {
SYNX_CREATE_LOCAL_FENCE = 0x01,
SYNX_CREATE_GLOBAL_FENCE = 0x02,
SYNX_CREATE_DMA_FENCE = 0x04,
SYNX_CREATE_CSL_FENCE = 0x08,
SYNX_CREATE_MAX_FLAGS = 0x10,
};
/**
* enum synx_init_flags - Session initialization flag
*/
enum synx_init_flags {
SYNX_INIT_MAX = 0x01,
};
/**
* enum synx_import_flags - Import flags
*
* SYNX_IMPORT_LOCAL_FENCE : Instructs the framework to create local synx object
* SYNX_IMPORT_GLOBAL_FENCE : Instructs the framework to create global synx object
* SYNX_IMPORT_SYNX_FENCE : Import native Synx handle for synchronization
* Need to pass the Synx handle ptr through fence variable
* if this flag is set.
* SYNX_IMPORT_DMA_FENCE : Import dma fence.and crate Synx handle for interop
* Need to pass the dma_fence ptr through fence variable
* if this flag is set.
* SYNX_IMPORT_EX_RELEASE : Flag to inform relaxed invocation where release call
* need not be called by client on this handle after import.
*/
enum synx_import_flags {
SYNX_IMPORT_LOCAL_FENCE = 0x01,
SYNX_IMPORT_GLOBAL_FENCE = 0x02,
SYNX_IMPORT_SYNX_FENCE = 0x04,
SYNX_IMPORT_DMA_FENCE = 0x08,
SYNX_IMPORT_EX_RELEASE = 0x10,
};
/**
* enum synx_signal_status - Signal status
*
* SYNX_STATE_SIGNALED_SUCCESS : Signal success
* SYNX_STATE_SIGNALED_CANCEL : Signal cancellation
* SYNX_STATE_SIGNALED_MAX : Clients can send custom notification
* beyond the max value (only positive)
*/
enum synx_signal_status {
SYNX_STATE_SIGNALED_SUCCESS = 2,
SYNX_STATE_SIGNALED_CANCEL = 4,
SYNX_STATE_SIGNALED_MAX = 64,
};
/**
* synx_callback - Callback invoked by external fence
*
* External fence dispatch the registered callback to notify
* signal to synx framework.
*/
typedef void (*synx_callback)(s32 sync_obj, int status, void *data);
/**
* synx_user_callback - Callback function registered by clients
*
* User callback registered for non-blocking wait. Dispatched when
* synx object is signaled.
*/
typedef void (*synx_user_callback_t)(u32 h_synx, int status, void *data);
/**
* struct bind_operations - Function pointers that need to be defined
* to achieve bind functionality for external fence with synx obj
*
* @register_callback : Function to register with external sync object
* @deregister_callback : Function to deregister with external sync object
* @enable_signaling : Function to enable the signaling on the external
* sync object (optional)
* @signal : Function to signal the external sync object
*/
struct bind_operations {
int (*register_callback)(synx_callback cb_func,
void *userdata, s32 sync_obj);
int (*deregister_callback)(synx_callback cb_func,
void *userdata, s32 sync_obj);
int (*enable_signaling)(s32 sync_obj);
int (*signal)(s32 sync_obj, u32 status);
};
/**
* synx_bind_client_type : External fence supported for bind
*
* SYNX_TYPE_CSL : Camera CSL fence
*/
enum synx_bind_client_type {
SYNX_TYPE_CSL = 0,
SYNX_MAX_BIND_TYPES,
};
/**
* struct synx_register_params - External registration parameters
*
* @ops : Bind operations struct
* @name : External client name
* Only first 64 bytes are accepted, rest will be ignored
* @type : Synx bind client type
*/
struct synx_register_params {
struct bind_operations ops;
char *name;
enum synx_bind_client_type type;
};
/**
* struct synx_queue_desc - Memory descriptor of the queue allocated by
* the fence driver for each client during
* register.
*
* @vaddr : CPU virtual address of the queue.
* @dev_addr : Physical address of the memory object.
* @size : Size of the memory.
* @mem_data : Internal pointer with the attributes of the allocation.
*/
struct synx_queue_desc {
void *vaddr;
u64 dev_addr;
u64 size;
void *mem_data;
};
/**
* enum synx_client_id : Unique identifier of the supported clients
*
* @SYNX_CLIENT_NATIVE : Native Client
* @SYNX_CLIENT_GFX_CTX0 : GFX Client 0
* @SYNX_CLIENT_DPU_CTL0 : DPU Client 0
* @SYNX_CLIENT_DPU_CTL1 : DPU Client 1
* @SYNX_CLIENT_DPU_CTL2 : DPU Client 2
* @SYNX_CLIENT_DPU_CTL3 : DPU Client 3
* @SYNX_CLIENT_DPU_CTL4 : DPU Client 4
* @SYNX_CLIENT_DPU_CTL5 : DPU Client 5
* @SYNX_CLIENT_EVA_CTX0 : EVA Client 0
* @SYNX_CLIENT_VID_CTX0 : Video Client 0
* @SYNX_CLIENT_NSP_CTX0 : NSP Client 0
* @SYNX_CLIENT_IFE_CTX0 : IFE Client 0
*/
enum synx_client_id {
SYNX_CLIENT_NATIVE = 0,
SYNX_CLIENT_GFX_CTX0,
SYNX_CLIENT_DPU_CTL0,
SYNX_CLIENT_DPU_CTL1,
SYNX_CLIENT_DPU_CTL2,
SYNX_CLIENT_DPU_CTL3,
SYNX_CLIENT_DPU_CTL4,
SYNX_CLIENT_DPU_CTL5,
SYNX_CLIENT_EVA_CTX0,
SYNX_CLIENT_VID_CTX0,
SYNX_CLIENT_NSP_CTX0,
SYNX_CLIENT_IFE_CTX0,
SYNX_CLIENT_MAX,
};
/**
* struct synx_session - Client session identifier
*
* @type : Session type
* @client : Pointer to client session
*/
struct synx_session {
u32 type;
void *client;
};
/**
* struct synx_initialization_params - Session params
*
* @name : Client session name
* Only first 64 bytes are accepted, rest will be ignored
* @ptr : Pointer to queue descriptor (filled by function)
* @id : Client identifier
* @flags : Synx initialization flags
*/
struct synx_initialization_params {
const char *name;
struct synx_queue_desc *ptr;
enum synx_client_id id;
enum synx_init_flags flags;
};
/**
* struct synx_create_params - Synx creation parameters
*
* @name : Optional parameter associating a name with the synx
* object for debug purposes
* Only first 64 bytes are accepted,
* rest will be ignored
* @h_synx : Pointer to synx object handle (filled by function)
* @fence : Pointer to external fence
* @flags : Synx flags for customization (mentioned below)
*
* SYNX_CREATE_GLOBAL_FENCE - Hints the framework to create global synx object
* If flag not set, hints framework to create a local synx object.
* SYNX_CREATE_DMA_FENCE - Wrap synx object with dma fence.
* Need to pass the dma_fence ptr through 'fence' variable if this flag is set.
* SYNX_CREATE_BIND_FENCE - Create a synx object with provided external fence.
* Establishes interop with supported external fence through bind operations.
* Need to fill synx_external_desc structure if this flag is set.
*/
struct synx_create_params {
const char *name;
u32 *h_synx;
void *fence;
enum synx_create_flags flags;
};
/**
* enum synx_merge_flags - Handle merge flags
*
* SYNX_MERGE_LOCAL_FENCE : Create local composite object.
* SYNX_MERGE_GLOBAL_FENCE : Create global composite object.
* SYNX_MERGE_NOTIFY_ON_ALL : Notify on signaling of ALL objects
* SYNX_MERGE_NOTIFY_ON_ANY : Notify on signaling of ANY object
*/
enum synx_merge_flags {
SYNX_MERGE_LOCAL_FENCE = 0x01,
SYNX_MERGE_GLOBAL_FENCE = 0x02,
SYNX_MERGE_NOTIFY_ON_ALL = 0x04,
SYNX_MERGE_NOTIFY_ON_ANY = 0x08,
};
/*
* struct synx_merge_params - Synx merge parameters
*
* @h_synxs : Pointer to a array of synx handles to be merged
* @flags : Merge flags
* @num_objs : Number of synx objs in the block
* @h_merged_obj : Merged synx object handle (filled by function)
*/
struct synx_merge_params {
u32 *h_synxs;
enum synx_merge_flags flags;
u32 num_objs;
u32 *h_merged_obj;
};
/**
* enum synx_import_type - Import type
*
* SYNX_IMPORT_INDV_PARAMS : Import filled with synx_import_indv_params struct
* SYNX_IMPORT_ARR_PARAMS : Import filled with synx_import_arr_params struct
*/
enum synx_import_type {
SYNX_IMPORT_INDV_PARAMS = 0x01,
SYNX_IMPORT_ARR_PARAMS = 0x02,
};
/**
* struct synx_import_indv_params - Synx import indv parameters
*
* @new_h_synxs : Pointer to new synx object
* (filled by the function)
* The new handle/s should be used by importing
* process for all synx api operations and
* for sharing with FW cores.
* @flags : Synx flags
* @fence : Pointer to external fence
*/
struct synx_import_indv_params {
u32 *new_h_synx;
enum synx_import_flags flags;
void *fence;
};
/**
* struct synx_import_arr_params - Synx import arr parameters
*
* @list : Array of synx_import_indv_params pointers
* @num_fences : No of fences passed to framework
*/
struct synx_import_arr_params {
struct synx_import_indv_params *list;
u32 num_fences;
};
/**
* struct synx_import_params - Synx import parameters
*
* @type : Import params type filled by client
* @indv : Params to import an individual handle/fence
* @arr : Params to import an array of handles/fences
*/
struct synx_import_params {
enum synx_import_type type;
union {
struct synx_import_indv_params indv;
struct synx_import_arr_params arr;
};
};
/**
* struct synx_callback_params - Synx callback parameters
*
* @h_synx : Synx object handle
* @cb_func : Pointer to callback func to be invoked
* @userdata : Opaque pointer passed back with callback
* @cancel_cb_func : Pointer to callback to ack cancellation (optional)
*/
struct synx_callback_params {
u32 h_synx;
synx_user_callback_t cb_func;
void *userdata;
synx_user_callback_t cancel_cb_func;
};
/* Kernel APIs */
/* synx_register_ops - Register operations for external synchronization
*
* Register with synx for enabling external synchronization through bind
*
* @param params : Pointer to register params
*
* @return Status of operation. SYNX_SUCCESS in case of success.
* -SYNX_INVALID will be returned if params are invalid.
* -SYNX_NOMEM will be returned if bind ops cannot be registered due to
* insufficient memory.
* -SYNX_ALREADY will be returned if type already in use.
*/
int synx_register_ops(const struct synx_register_params *params);
/**
* synx_deregister_ops - De-register external synchronization operations
*
* @param params : Pointer to register params
*
* @return Status of operation. SYNX_SUCCESS in case of success.
* -SYNX_INVALID will be returned if record not found.
*/
int synx_deregister_ops(const struct synx_register_params *params);
/**
* synx_initialize - Initializes a new client session
*
* @param params : Pointer to session init params
*
* @return Client session pointer on success. NULL or error in case of failure.
*/
struct synx_session *synx_initialize(struct synx_initialization_params *params);
/**
* synx_uninitialize - Destroys the client session
*
* @param session : Session ptr (returned from synx_initialize)
*
* @return Status of operation. SYNX_SUCCESS in case of success.
*/
int synx_uninitialize(struct synx_session *session);
/**
* synx_create - Creates a synx object
*
* Creates a new synx obj and returns the handle to client.
*
* @param session : Session ptr (returned from synx_initialize)
* @param params : Pointer to create params
*
* @return Status of operation. SYNX_SUCCESS in case of success.
* -SYNX_INVALID will be returned if params were invalid.
* -SYNX_NOMEM will be returned if the kernel can't allocate space for
* synx object.
*/
int synx_create(struct synx_session *session, struct synx_create_params *params);
/**
* synx_async_wait - Registers a callback with a synx object
*
* @param session : Session ptr (returned from synx_initialize)
* @param params : Callback params
*
* @return Status of operation. SYNX_SUCCESS in case of success.
* -SYNX_INVALID will be returned if userdata is invalid.
* -SYNX_NOMEM will be returned if cb_func is invalid.
*/
int synx_async_wait(struct synx_session *session, struct synx_callback_params *params);
/**
* synx_cancel_async_wait - De-registers a callback with a synx object
*
* @param session : Session ptr (returned from synx_initialize)
* @param params : Callback params
*
* @return Status of operation. SYNX_SUCCESS in case of success.
* -SYNX_ALREADY if object has already been signaled, and cannot be cancelled.
* -SYNX_INVALID will be returned if userdata is invalid.
* -SYNX_NOMEM will be returned if cb_func is invalid.
*/
int synx_cancel_async_wait(struct synx_session *session,
struct synx_callback_params *params);
/**
* synx_signal - Signals a synx object with the status argument.
*
* This function will signal the synx object referenced by h_synx
* and invoke any external binding synx objs.
* The status parameter will indicate whether the entity
* performing the signaling wants to convey an error case or a success case.
*
* @param session : Session ptr (returned from synx_initialize)
* @param h_synx : Synx object handle
* @param status : Status of signaling.
* Clients can send custom signaling status
* beyond SYNX_STATE_SIGNALED_MAX.
*
* @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
*/
int synx_signal(struct synx_session *session, u32 h_synx,
enum synx_signal_status status);
/**
* synx_merge - Merges multiple synx objects
*
* This function will merge multiple synx objects into a synx group.
*
* @param session : Session ptr (returned from synx_initialize)
* @param params : Merge params
*
* @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
*/
int synx_merge(struct synx_session *session, struct synx_merge_params *params);
/**
* synx_wait - Waits for a synx object synchronously
*
* Does a wait on the synx object identified by h_synx for a maximum
* of timeout_ms milliseconds. Must not be called from interrupt context as
* this API can sleep.
* Will return status if handle was signaled. Status can be from pre-defined
* states (enum synx_signal_status) or custom status sent by producer.
*
* @param session : Session ptr (returned from synx_initialize)
* @param h_synx : Synx object handle to be waited upon
* @param timeout_ms : Timeout in ms
*
* @return Signal status. -SYNX_INVAL if synx object is in bad state or arguments
* are invalid, -SYNX_TIMEOUT if wait times out.
*/
int synx_wait(struct synx_session *session, u32 h_synx, u64 timeout_ms);
/**
* synx_get_status - Returns the status of the synx object
*
* @param session : Session ptr (returned from synx_initialize)
* @param h_synx : Synx object handle
*
* @return Status of the synx object.
*/
int synx_get_status(struct synx_session *session, u32 h_synx);
/**
* synx_import - Imports (looks up) synx object from given handle/fence
*
* Import subscribes the client session for notification on signal
* of handles/fences.
*
* @param session : Session ptr (returned from synx_initialize)
* @param params : Pointer to import params
*
* @return SYNX_SUCCESS upon success, -SYNX_INVAL if synx object is bad state
*/
int synx_import(struct synx_session *session, struct synx_import_params *params);
/**
* synx_get_fence - Get the native fence backing the synx object
*
* Function returns the native fence. Clients need to
* acquire & release additional reference explicitly.
*
* @param session : Session ptr (returned from synx_initialize)
* @param h_synx : Synx object handle
*
* @return Fence pointer upon success, NULL or error in case of failure.
*/
void *synx_get_fence(struct synx_session *session, u32 h_synx);
/**
* synx_release - Release the synx object
*
* Decrements refcount of a synx object by 1, and destroys it
* if becomes 0.
*
* @param session : Session ptr (returned from synx_initialize)
* @param h_synx : Synx object handle to be destroyed
*
* @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
*/
int synx_release(struct synx_session *session, u32 h_synx);
/**
* synx_recover - Recover any possible handle leaks
*
* Function should be called on HW hang/reset to
* recover the Synx handles shared. This cleans up
* Synx handles held by the rest HW, and avoids
* potential resource leaks.
*
* Function does not destroy the session, but only
* recover synx handles belonging to the session.
* Synx session would still be active and clients
* need to destroy the session explicitly through
* synx_uninitialize API.
*
* @param id : Client ID of core to recover
*
* @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.
*/
int synx_recover(enum synx_client_id id);
#endif /* __SYNX_API_H__ */

145
msm/synx/synx_debugfs.c Normal file
View File

@@ -0,0 +1,145 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include "synx_api.h"
#include "synx_debugfs.h"
#include "synx_util.h"
#define MAX_DBG_BUF_SIZE (36 * SYNX_MAX_OBJS)
struct dentry *my_direc;
int synx_columns = NAME_COLUMN | ID_COLUMN |
STATE_COLUMN | GLOBAL_COLUMN;
EXPORT_SYMBOL(synx_columns);
int synx_debug = SYNX_ERR | SYNX_WARN |
SYNX_INFO | SYNX_DBG;
EXPORT_SYMBOL(synx_debug);
void populate_bound_rows(
struct synx_coredata *row, char *cur, char *end)
{
int j;
for (j = 0; j < row->num_bound_synxs; j++)
cur += scnprintf(cur, end - cur,
"\n\tID: %d",
row->bound_synxs[j].external_desc.id);
}
static ssize_t synx_table_read(struct file *file,
char *buf,
size_t count,
loff_t *ppos)
{
struct synx_device *dev = file->private_data;
struct error_node *err_node, *err_node_tmp;
char *dbuf, *cur, *end;
int rc = SYNX_SUCCESS;
ssize_t len = 0;
dbuf = kzalloc(MAX_DBG_BUF_SIZE, GFP_KERNEL);
if (!dbuf)
return -ENOMEM;
/* dump client details */
cur = dbuf;
end = cur + MAX_DBG_BUF_SIZE;
if (synx_columns & NAME_COLUMN)
cur += scnprintf(cur, end - cur, "| Name |");
if (synx_columns & ID_COLUMN)
cur += scnprintf(cur, end - cur, "| ID |");
if (synx_columns & STATE_COLUMN)
cur += scnprintf(cur, end - cur, "| Status |");
if (synx_columns & FENCE_COLUMN)
cur += scnprintf(cur, end - cur, "| Fence |");
if (synx_columns & COREDATA_COLUMN)
cur += scnprintf(cur, end - cur, "| Coredata |");
if (synx_columns & GLOBAL_COLUMN)
cur += scnprintf(cur, end - cur, "| Coredata |");
if (synx_columns & BOUND_COLUMN)
cur += scnprintf(cur, end - cur, "| Bound |");
cur += scnprintf(cur, end - cur, "\n");
rc = synx_global_dump_shared_memory();
if (rc) {
cur += scnprintf(cur, end - cur,
"Err %d: Failed to dump global shared mem\n", rc);
}
if (synx_columns & ERROR_CODES && !list_empty(
&dev->error_list)) {
cur += scnprintf(
cur, end - cur, "\nError(s): ");
mutex_lock(&dev->error_lock);
list_for_each_entry_safe(
err_node, err_node_tmp,
&dev->error_list, node) {
cur += scnprintf(cur, end - cur,
"\n\tTime: %s - ID: %d - Code: %d",
err_node->timestamp,
err_node->h_synx,
err_node->error_code);
list_del(&err_node->node);
kfree(err_node);
}
mutex_unlock(&dev->error_lock);
}
len = simple_read_from_buffer(buf, count, ppos,
dbuf, cur - dbuf);
kfree(dbuf);
return len;
}
static ssize_t synx_table_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
return 0;
}
static const struct file_operations synx_table_fops = {
.owner = THIS_MODULE,
.read = synx_table_read,
.write = synx_table_write,
.open = simple_open,
};
struct dentry *synx_init_debugfs_dir(struct synx_device *dev)
{
struct dentry *dir = NULL;
dir = debugfs_create_dir("synx_debug", NULL);
if (!dir) {
dprintk(SYNX_ERR, "Failed to create debugfs for synx\n");
return NULL;
}
debugfs_create_u32("debug_level", 0644, dir, &synx_debug);
debugfs_create_u32("column_level", 0644, dir, &synx_columns);
if (!debugfs_create_file("synx_table",
0644, dir, dev, &synx_table_fops)) {
dprintk(SYNX_ERR, "Failed to create debugfs file for synx\n");
return NULL;
}
return dir;
}
void synx_remove_debugfs_dir(struct synx_device *dev)
{
debugfs_remove_recursive(dev->debugfs_root);
}

94
msm/synx/synx_debugfs.h Normal file
View File

@@ -0,0 +1,94 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SYNX_DEBUGFS_H__
#define __SYNX_DEBUGFS_H__
#include <linux/debugfs.h>
#include <linux/delay.h>
#include "synx_private.h"
enum synx_debug_level {
SYNX_ERR = 0x0001,
SYNX_WARN = 0x0002,
SYNX_INFO = 0x0004,
SYNX_DBG = 0x0008,
SYNX_VERB = 0x0010,
SYNX_IPCL = 0x0020,
SYNX_GSM = 0x0040,
SYNX_MEM = 0x0080,
SYNX_ALL = SYNX_ERR | SYNX_WARN | SYNX_INFO |
SYNX_DBG | SYNX_IPCL | SYNX_GSM | SYNX_MEM,
};
enum synx_columns_level {
NAME_COLUMN = 0x0001,
ID_COLUMN = 0x0002,
BOUND_COLUMN = 0x0004,
STATE_COLUMN = 0x0008,
FENCE_COLUMN = 0x0010,
COREDATA_COLUMN = 0x0020,
GLOBAL_COLUMN = 0x0040,
ERROR_CODES = 0x8000,
};
#ifndef SYNX_DBG_LABEL
#define SYNX_DBG_LABEL "synx"
#endif
#define SYNX_DBG_TAG SYNX_DBG_LABEL ": %4s: "
extern int synx_debug;
static inline char *synx_debug_str(int level)
{
switch (level) {
case SYNX_ERR:
return "err";
case SYNX_WARN:
return "warn";
case SYNX_INFO:
return "info";
case SYNX_DBG:
return "dbg";
case SYNX_VERB:
return "verb";
case SYNX_IPCL:
return "ipcl";
case SYNX_GSM:
return "gmem";
case SYNX_MEM:
return "mem";
default:
return "???";
}
}
#define dprintk(__level, __fmt, arg...) \
do { \
if (synx_debug & __level) { \
pr_info(SYNX_DBG_TAG "%s: %d: " __fmt, \
synx_debug_str(__level), __func__, \
__LINE__, ## arg); \
} \
} while (0)
/**
* synx_init_debugfs_dir - Initializes debugfs
*
* @param dev : Pointer to synx device structure
*/
struct dentry *synx_init_debugfs_dir(struct synx_device *dev);
/**
* synx_remove_debugfs_dir - Removes debugfs
*
* @param dev : Pointer to synx device structure
*/
void synx_remove_debugfs_dir(struct synx_device *dev);
#endif /* __SYNX_DEBUGFS_H__ */

27
msm/synx/synx_err.h Normal file
View File

@@ -0,0 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SYNX_ERR_H__
#define __SYNX_ERR_H__
#include <linux/err.h>
/**
* Error codes returned from framework
*
* Return codes are mapped to platform specific
* return values.
*/
#define SYNX_SUCCESS 0
#define SYNX_NOMEM ENOMEM
#define SYNX_NOSUPPORT EOPNOTSUPP
#define SYNX_NOPERM EPERM
#define SYNX_TIMEOUT ETIMEDOUT
#define SYNX_ALREADY EALREADY
#define SYNX_NOENT ENOENT
#define SYNX_INVALID EINVAL
#define SYNX_BUSY EBUSY
#endif /* __SYNX_ERR_H__ */

819
msm/synx/synx_global.c Normal file
View File

@@ -0,0 +1,819 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/hwspinlock.h>
#include <linux/string.h>
#include "synx_debugfs.h"
#include "synx_global.h"
static struct synx_shared_mem synx_gmem;
static struct hwspinlock *synx_hwlock;
static u32 synx_gmem_lock_owner(u32 idx)
{
/*
* subscribers field of global table index 0 is used to
* maintain synx gmem lock owner data.
* core updates the field after acquiring the lock and
* before releasing the lock appropriately.
*/
return synx_gmem.table[0].subscribers;
}
static void synx_gmem_lock_owner_set(u32 idx)
{
synx_gmem.table[0].subscribers = SYNX_CORE_APSS;
}
static void synx_gmem_lock_owner_clear(u32 idx)
{
if (synx_gmem.table[0].subscribers != SYNX_CORE_APSS)
dprintk(SYNX_WARN, "reset lock owned by core %u\n",
synx_gmem.table[0].subscribers);
synx_gmem.table[0].subscribers = SYNX_CORE_MAX;
}
static int synx_gmem_lock(u32 idx, unsigned long *flags)
{
int rc;
if (!synx_hwlock)
return -SYNX_INVALID;
rc = hwspin_lock_timeout_irqsave(
synx_hwlock, SYNX_HWSPIN_TIMEOUT, flags);
if (!rc)
synx_gmem_lock_owner_set(idx);
return rc;
}
static void synx_gmem_unlock(u32 idx, unsigned long *flags)
{
synx_gmem_lock_owner_clear(idx);
hwspin_unlock_irqrestore(synx_hwlock, flags);
}
static void synx_global_print_data(
struct synx_global_coredata *synx_g_obj,
const char *func)
{
int i = 0;
dprintk(SYNX_VERB, "%s: status %u, handle %u, refcount %u",
func, synx_g_obj->status,
synx_g_obj->handle, synx_g_obj->refcount);
dprintk(SYNX_VERB, "%s: subscribers %u, waiters %u, pending %u",
func, synx_g_obj->subscribers, synx_g_obj->waiters,
synx_g_obj->num_child);
for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++)
if (synx_g_obj->parents[i])
dprintk(SYNX_VERB, "%s: parents %u:%u",
func, i, synx_g_obj->parents[i]);
}
int synx_global_dump_shared_memory(void)
{
int rc = SYNX_SUCCESS, idx;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return -SYNX_INVALID;
/* Print bitmap memory*/
for (idx = 0; idx < SHRD_MEM_DUMP_NUM_BMAP_WORDS; idx++) {
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
dprintk(SYNX_VERB, "%s: idx %d, bitmap value %d",
__func__, idx, synx_gmem.bitmap[idx]);
synx_gmem_unlock(idx, &flags);
}
/* Print table memory*/
for (idx = 0;
idx < SHRD_MEM_DUMP_NUM_BMAP_WORDS * sizeof(u32) * NUM_CHAR_BIT;
idx++) {
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
dprintk(SYNX_VERB, "%s: idx %d\n", __func__, idx);
synx_g_obj = &synx_gmem.table[idx];
synx_global_print_data(synx_g_obj, __func__);
synx_gmem_unlock(idx, &flags);
}
return rc;
}
static int synx_gmem_init(void)
{
if (!synx_gmem.table)
return -SYNX_NOMEM;
synx_hwlock = hwspin_lock_request_specific(SYNX_HWSPIN_ID);
if (!synx_hwlock) {
dprintk(SYNX_ERR, "hwspinlock request failed\n");
return -SYNX_NOMEM;
}
/* zero idx not allocated for clients */
ipclite_global_test_and_set_bit(0,
(ipclite_atomic_uint32_t *)synx_gmem.bitmap);
memset(&synx_gmem.table[0], 0, sizeof(struct synx_global_coredata));
return SYNX_SUCCESS;
}
u32 synx_global_map_core_id(enum synx_core_id id)
{
u32 host_id;
switch (id) {
case SYNX_CORE_APSS:
host_id = IPCMEM_APPS; break;
case SYNX_CORE_NSP:
host_id = IPCMEM_CDSP; break;
case SYNX_CORE_IRIS:
host_id = IPCMEM_VPU; break;
case SYNX_CORE_EVA:
host_id = IPCMEM_CVP; break;
default:
host_id = IPCMEM_NUM_HOSTS;
dprintk(SYNX_ERR, "invalid core id\n");
}
return host_id;
}
int synx_global_alloc_index(u32 *idx)
{
int rc = SYNX_SUCCESS;
u32 prev, index;
const u32 size = SYNX_GLOBAL_MAX_OBJS;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (IS_ERR_OR_NULL(idx))
return -SYNX_INVALID;
do {
index = find_first_zero_bit((unsigned long *)synx_gmem.bitmap, size);
if (index >= size) {
rc = -SYNX_NOMEM;
break;
}
prev = ipclite_global_test_and_set_bit(index % 32,
(ipclite_atomic_uint32_t *)(synx_gmem.bitmap + index/32));
if ((prev & (1UL << (index % 32))) == 0) {
*idx = index;
dprintk(SYNX_MEM, "allocated global idx %u\n", *idx);
break;
}
} while (true);
return rc;
}
int synx_global_init_coredata(u32 h_synx)
{
int rc;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
u32 idx = h_synx & SYNX_HANDLE_INDEX_MASK;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (!synx_is_valid_idx(idx))
return -SYNX_INVALID;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
memset(synx_g_obj, 0, sizeof(*synx_g_obj));
/* set status to active */
synx_g_obj->status = SYNX_STATE_ACTIVE;
synx_g_obj->refcount = 1;
synx_g_obj->subscribers = (1UL << SYNX_CORE_APSS);
synx_g_obj->handle = h_synx;
synx_gmem_unlock(idx, &flags);
return SYNX_SUCCESS;
}
static int synx_global_get_waiting_cores_locked(
struct synx_global_coredata *synx_g_obj,
bool *cores)
{
int i;
synx_global_print_data(synx_g_obj, __func__);
for (i = 0; i < SYNX_CORE_MAX; i++) {
if (synx_g_obj->waiters & (1UL << i)) {
cores[i] = true;
dprintk(SYNX_VERB,
"waiting for handle %u/n",
synx_g_obj->handle);
}
}
/* clear waiter list so signals are not repeated */
synx_g_obj->waiters = 0;
return SYNX_SUCCESS;
}
int synx_global_get_waiting_cores(u32 idx, bool *cores)
{
int rc;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (IS_ERR_OR_NULL(cores) || !synx_is_valid_idx(idx))
return -SYNX_INVALID;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
synx_global_get_waiting_cores_locked(synx_g_obj, cores);
synx_gmem_unlock(idx, &flags);
return SYNX_SUCCESS;
}
int synx_global_set_waiting_core(u32 idx, enum synx_core_id id)
{
int rc;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
return -SYNX_INVALID;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
synx_g_obj->waiters |= (1UL << id);
synx_gmem_unlock(idx, &flags);
return SYNX_SUCCESS;
}
int synx_global_get_subscribed_cores(u32 idx, bool *cores)
{
int i;
int rc;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (IS_ERR_OR_NULL(cores) || !synx_is_valid_idx(idx))
return -SYNX_INVALID;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
for (i = 0; i < SYNX_CORE_MAX; i++)
if (synx_g_obj->subscribers & (1UL << i))
cores[i] = true;
synx_gmem_unlock(idx, &flags);
return SYNX_SUCCESS;
}
int synx_global_set_subscribed_core(u32 idx, enum synx_core_id id)
{
int rc;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
return -SYNX_INVALID;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
synx_g_obj->subscribers |= (1UL << id);
synx_gmem_unlock(idx, &flags);
return SYNX_SUCCESS;
}
u32 synx_global_get_parents_num(u32 idx)
{
int rc;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
u32 i, count = 0;
if (!synx_gmem.table)
return 0;
if (!synx_is_valid_idx(idx))
return 0;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
if (synx_g_obj->parents[i] != 0)
count++;
}
synx_gmem_unlock(idx, &flags);
return count;
}
static int synx_global_get_parents_locked(
struct synx_global_coredata *synx_g_obj, u32 *parents)
{
u32 i;
if (!synx_g_obj || !parents)
return -SYNX_NOMEM;
for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++)
parents[i] = synx_g_obj->parents[i];
return SYNX_SUCCESS;
}
int synx_global_get_parents(u32 idx, u32 *parents)
{
int rc;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table || !parents)
return -SYNX_NOMEM;
if (!synx_is_valid_idx(idx))
return -SYNX_INVALID;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
rc = synx_global_get_parents_locked(synx_g_obj, parents);
synx_gmem_unlock(idx, &flags);
return rc;
}
u32 synx_global_get_status(u32 idx)
{
int rc;
unsigned long flags;
u32 status;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return 0;
if (!synx_is_valid_idx(idx))
return 0;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
status = synx_g_obj->status;
synx_gmem_unlock(idx, &flags);
return status;
}
u32 synx_global_test_status_set_wait(u32 idx,
enum synx_core_id id)
{
int rc;
unsigned long flags;
u32 status;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return 0;
if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
return 0;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return 0;
synx_g_obj = &synx_gmem.table[idx];
synx_global_print_data(synx_g_obj, __func__);
status = synx_g_obj->status;
/* if handle is still ACTIVE */
if (status == SYNX_STATE_ACTIVE)
synx_g_obj->waiters |= (1UL << id);
else
dprintk(SYNX_DBG, "handle %u already signaled %u",
synx_g_obj->handle, synx_g_obj->status);
synx_gmem_unlock(idx, &flags);
return status;
}
static int synx_global_update_status_core(u32 idx,
u32 status)
{
u32 i, p_idx;
int rc;
bool clear = false;
unsigned long flags;
uint64_t data;
struct synx_global_coredata *synx_g_obj;
u32 h_parents[SYNX_GLOBAL_MAX_PARENTS] = {0};
bool wait_cores[SYNX_CORE_MAX] = {false};
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
synx_global_print_data(synx_g_obj, __func__);
/* prepare for cross core signaling */
data = synx_g_obj->handle;
data <<= 32;
if (synx_g_obj->num_child != 0) {
/* composite handle */
synx_g_obj->num_child--;
if (synx_g_obj->num_child == 0) {
if (synx_g_obj->status == SYNX_STATE_ACTIVE) {
synx_g_obj->status =
(status == SYNX_STATE_SIGNALED_SUCCESS) ?
SYNX_STATE_SIGNALED_SUCCESS : SYNX_STATE_SIGNALED_ERROR;
data |= synx_g_obj->status;
synx_global_get_waiting_cores_locked(synx_g_obj,
wait_cores);
synx_global_get_parents_locked(synx_g_obj, h_parents);
} else {
data = 0;
dprintk(SYNX_WARN,
"merged handle %u already in state %u\n",
synx_g_obj->handle, synx_g_obj->status);
}
/* release ref held by constituting handles */
synx_g_obj->refcount--;
if (synx_g_obj->refcount == 0) {
memset(synx_g_obj, 0,
sizeof(*synx_g_obj));
clear = true;
}
} else if (status != SYNX_STATE_SIGNALED_SUCCESS) {
synx_g_obj->status = SYNX_STATE_SIGNALED_ERROR;
data |= synx_g_obj->status;
synx_global_get_waiting_cores_locked(synx_g_obj,
wait_cores);
synx_global_get_parents_locked(synx_g_obj, h_parents);
dprintk(SYNX_WARN,
"merged handle %u signaled with error state\n",
synx_g_obj->handle);
} else {
/* pending notification from handles */
data = 0;
dprintk(SYNX_DBG,
"Child notified parent handle %u, pending %u\n",
synx_g_obj->handle, synx_g_obj->num_child);
}
} else {
synx_g_obj->status = status;
data |= synx_g_obj->status;
synx_global_get_waiting_cores_locked(synx_g_obj,
wait_cores);
synx_global_get_parents_locked(synx_g_obj, h_parents);
}
synx_gmem_unlock(idx, &flags);
if (clear) {
ipclite_global_test_and_clear_bit(idx%32,
(ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
dprintk(SYNX_MEM,
"cleared global idx %u\n", idx);
}
/* notify waiting clients on signal */
if (data) {
/* notify wait client */
for (i = 1; i < SYNX_CORE_MAX; i++) {
if (!wait_cores[i])
continue;
dprintk(SYNX_DBG,
"invoking ipc signal handle %u, status %u\n",
synx_g_obj->handle, synx_g_obj->status);
if (ipclite_msg_send(
synx_global_map_core_id(i),
data))
dprintk(SYNX_ERR,
"ipc signaling %llu to core %u failed\n",
data, i);
}
}
/* handle parent notifications */
for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
p_idx = h_parents[i];
if (p_idx == 0)
continue;
synx_global_update_status_core(p_idx, status);
}
return SYNX_SUCCESS;
}
int synx_global_update_status(u32 idx, u32 status)
{
int rc = -SYNX_INVALID;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (!synx_is_valid_idx(idx) || status <= SYNX_STATE_ACTIVE)
return -SYNX_INVALID;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
if (synx_g_obj->num_child != 0) {
/* composite handle cannot be signaled */
goto fail;
} else if (synx_g_obj->status != SYNX_STATE_ACTIVE) {
rc = -SYNX_ALREADY;
goto fail;
}
synx_gmem_unlock(idx, &flags);
return synx_global_update_status_core(idx, status);
fail:
synx_gmem_unlock(idx, &flags);
return rc;
}
int synx_global_get_ref(u32 idx)
{
int rc;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (!synx_is_valid_idx(idx))
return -SYNX_INVALID;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return rc;
synx_g_obj = &synx_gmem.table[idx];
synx_global_print_data(synx_g_obj, __func__);
if (synx_g_obj->handle && synx_g_obj->refcount)
synx_g_obj->refcount++;
else
rc = -SYNX_NOENT;
synx_gmem_unlock(idx, &flags);
return rc;
}
void synx_global_put_ref(u32 idx)
{
int rc;
bool clear = false;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
if (!synx_gmem.table)
return;
if (!synx_is_valid_idx(idx))
return;
rc = synx_gmem_lock(idx, &flags);
if (rc)
return;
synx_g_obj = &synx_gmem.table[idx];
synx_g_obj->refcount--;
if (synx_g_obj->refcount == 0) {
memset(synx_g_obj, 0, sizeof(*synx_g_obj));
clear = true;
}
synx_gmem_unlock(idx, &flags);
if (clear) {
ipclite_global_test_and_clear_bit(idx%32,
(ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
dprintk(SYNX_MEM, "cleared global idx %u\n", idx);
}
}
int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx)
{
int rc = -SYNX_INVALID;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
u32 i, j = 0;
u32 idx;
bool sig_error = false;
u32 num_child = 0;
if (!synx_gmem.table)
return -SYNX_NOMEM;
if (!synx_is_valid_idx(p_idx))
return -SYNX_INVALID;
while (j < num_list) {
idx = idx_list[j];
if (!synx_is_valid_idx(idx))
goto fail;
rc = synx_gmem_lock(idx, &flags);
if (rc)
goto fail;
synx_g_obj = &synx_gmem.table[idx];
if (synx_g_obj->status == SYNX_STATE_ACTIVE) {
for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
if (synx_g_obj->parents[i] == 0) {
synx_g_obj->parents[i] = p_idx;
break;
}
}
num_child++;
} else if (synx_g_obj->status >
SYNX_STATE_SIGNALED_SUCCESS) {
sig_error = true;
}
synx_gmem_unlock(idx, &flags);
if (i >= SYNX_GLOBAL_MAX_PARENTS) {
rc = -SYNX_NOMEM;
goto fail;
}
j++;
}
rc = synx_gmem_lock(p_idx, &flags);
if (rc)
goto fail;
synx_g_obj = &synx_gmem.table[p_idx];
synx_g_obj->num_child += num_child;
if (sig_error)
synx_g_obj->status = SYNX_STATE_SIGNALED_ERROR;
else if (synx_g_obj->num_child != 0)
synx_g_obj->refcount++;
else if (synx_g_obj->num_child == 0 &&
synx_g_obj->status == SYNX_STATE_ACTIVE)
synx_g_obj->status = SYNX_STATE_SIGNALED_SUCCESS;
synx_global_print_data(synx_g_obj, __func__);
synx_gmem_unlock(p_idx, &flags);
return SYNX_SUCCESS;
fail:
while (num_child--) {
idx = idx_list[num_child];
if (synx_gmem_lock(idx, &flags))
continue;
synx_g_obj = &synx_gmem.table[idx];
for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
if (synx_g_obj->parents[i] == p_idx) {
synx_g_obj->parents[i] = 0;
break;
}
}
synx_gmem_unlock(idx, &flags);
}
return rc;
}
int synx_global_recover(enum synx_core_id core_id)
{
int rc = SYNX_SUCCESS;
u32 idx = 0;
const u32 size = SYNX_GLOBAL_MAX_OBJS;
unsigned long flags;
struct synx_global_coredata *synx_g_obj;
bool update;
int *clear_idx = NULL;
if (!synx_gmem.table)
return -SYNX_NOMEM;
clear_idx = kzalloc(sizeof(int)*SYNX_GLOBAL_MAX_OBJS, GFP_KERNEL);
if (!clear_idx)
return -SYNX_NOMEM;
ipclite_hwlock_reset(synx_global_map_core_id(core_id));
/* recover synx gmem lock if it was owned by core in ssr */
if (synx_gmem_lock_owner(0) == core_id) {
synx_gmem_lock_owner_clear(0);
hwspin_unlock_raw(synx_hwlock);
}
idx = find_next_bit((unsigned long *)synx_gmem.bitmap,
size, idx + 1);
while (idx < size) {
update = false;
rc = synx_gmem_lock(idx, &flags);
if (rc)
goto free;
synx_g_obj = &synx_gmem.table[idx];
if (synx_g_obj->refcount &&
synx_g_obj->subscribers & (1UL << core_id)) {
synx_g_obj->subscribers &= ~(1UL << core_id);
synx_g_obj->refcount--;
if (synx_g_obj->refcount == 0) {
memset(synx_g_obj, 0, sizeof(*synx_g_obj));
clear_idx[idx] = 1;
} else if (synx_g_obj->status == SYNX_STATE_ACTIVE) {
update = true;
}
}
synx_gmem_unlock(idx, &flags);
if (update)
synx_global_update_status(idx,
SYNX_STATE_SIGNALED_SSR);
idx = find_next_bit((unsigned long *)synx_gmem.bitmap,
size, idx + 1);
}
for (idx = 1; idx < size; idx++) {
if (clear_idx[idx]) {
ipclite_global_test_and_clear_bit(idx % 32,
(ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
dprintk(SYNX_MEM, "released global idx %u\n", idx);
}
}
free:
kfree(clear_idx);
return rc;
}
int synx_global_mem_init(void)
{
int rc;
int bitmap_size = SYNX_GLOBAL_MAX_OBJS/32;
struct global_region_info mem_info;
rc = get_global_partition_info(&mem_info);
if (rc) {
dprintk(SYNX_ERR, "error setting up global shared memory\n");
return rc;
}
memset(mem_info.virt_base, 0, mem_info.size);
dprintk(SYNX_DBG, "global shared memory %pK size %u\n",
mem_info.virt_base, mem_info.size);
synx_gmem.bitmap = (u32 *)mem_info.virt_base;
synx_gmem.locks = synx_gmem.bitmap + bitmap_size;
synx_gmem.table =
(struct synx_global_coredata *)(synx_gmem.locks + 2);
dprintk(SYNX_DBG, "global memory bitmap %pK, table %pK\n",
synx_gmem.bitmap, synx_gmem.table);
return synx_gmem_init();
}

284
msm/synx/synx_global.h Normal file
View File

@@ -0,0 +1,284 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SYNX_SHARED_MEM_H__
#define __SYNX_SHARED_MEM_H__
#include "synx_err.h"
#include "ipclite_client.h"
#include <synx_header.h>
/**
* enum synx_core_id - Synx core IDs
*
* SYNX_CORE_APSS : APSS core
* SYNX_CORE_NSP : NSP core
* SYNX_CORE_EVA : EVA core
* SYNX_CORE_IRIS : IRIS core
*/
enum synx_core_id {
SYNX_CORE_APSS = 0,
SYNX_CORE_NSP,
SYNX_CORE_EVA,
SYNX_CORE_IRIS,
SYNX_CORE_MAX,
};
/* synx handle encoding */
#define SYNX_HANDLE_INDEX_BITS 16
#define SYNX_HANDLE_CORE_BITS 4
#define SYNX_HANDLE_GLOBAL_FLAG_BIT 1
#define SYNX_GLOBAL_SHARED_LOCKS 1
#define SYNX_GLOBAL_MAX_OBJS 4096
#define SYNX_GLOBAL_MAX_PARENTS 4
#define SYNX_HANDLE_INDEX_MASK ((1UL<<SYNX_HANDLE_INDEX_BITS)-1)
#define SHRD_MEM_DUMP_NUM_BMAP_WORDS 10
#define NUM_CHAR_BIT 8
/* spin lock timeout (ms) */
#define SYNX_HWSPIN_TIMEOUT 500
#define SYNX_HWSPIN_ID 10
/* internal signal states */
#define SYNX_STATE_INVALID 0
#define SYNX_STATE_ACTIVE 1
#define SYNX_STATE_SIGNALED_ERROR 3
#define SYNX_STATE_SIGNALED_EXTERNAL 5
#define SYNX_STATE_SIGNALED_SSR 6
/**
* struct synx_global_coredata - Synx global object, used for book keeping
* of all metadata associated with each individual global entry
*
* @status : Synx signaling status
* @handle : Handle of global entry
* @refcount : References owned by each core
* @num_child : Count of children pending signal (for composite handle)
* @subscribers : Cores owning reference on this object
* @waiters : Cores waiting for notification
* @parents : Composite global coredata index of parent entities
* Can be part of SYNX_GLOBAL_MAX_PARENTS composite entries.
*/
struct synx_global_coredata {
u32 status;
u32 handle;
u16 refcount;
u16 num_child;
u16 subscribers;
u16 waiters;
u16 parents[SYNX_GLOBAL_MAX_PARENTS];
};
/**
* struct synx_shared_mem - Synx global shared memory descriptor
*
* @bitmap : Bitmap for allocating entries form table
* @locks : Array of locks for exclusive access to table entries
* @table : Array of Synx global entries
*/
struct synx_shared_mem {
u32 *bitmap;
u32 *locks;
struct synx_global_coredata *table;
};
static inline bool synx_is_valid_idx(u32 idx)
{
if (idx < SYNX_GLOBAL_MAX_OBJS)
return true;
return false;
}
/**
* synx_global_mem_init - Initialize global shared memory
*
* @return Zero on success, negative error on failure.
*/
int synx_global_mem_init(void);
/**
* synx_global_map_core_id - Map Synx core ID to IPC Lite host
*
* @param id : Core Id to map
*
* @return IPC host ID.
*/
u32 synx_global_map_core_id(enum synx_core_id id);
/**
* synx_global_alloc_index - Allocate new global entry
*
* @param idx : Pointer to global table index (filled by function)
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_alloc_index(u32 *idx);
/**
* synx_global_init_coredata - Allocate new global entry
*
* @param h_synx : Synx global handle
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_init_coredata(u32 h_synx);
/**
* synx_global_get_waiting_cores - Get list of all the waiting core on global entry
*
* Will fill the cores array with TRUE if core is waiting, and
* false if not. Indexed through enum synx_core_id.
*
* @param idx : Global entry index
* @param cores : Array of boolean variables, one each for supported core.
* Array should contain SYNX_CORE_MAX entries.
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_get_waiting_cores(u32 idx, bool *cores);
/**
* synx_global_set_waiting_core - Set core as a waiting core on global entry
*
* @param idx : Global entry index
* @param id : Core to be set as waiter
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_set_waiting_core(u32 idx, enum synx_core_id id);
/**
* synx_global_get_subscribed_cores - Get list of all the subscribed core on global entry
*
* Will fill the cores array with TRUE if core is subscribed, and
* false if not. Indexed through enum synx_core_id.
*
* @param idx : Global entry index
* @param cores : Array of boolean variables, one each for supported core.
* Array should contain SYNX_CORE_MAX entries.
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_get_subscribed_cores(u32 idx, bool *cores);
/**
* synx_global_set_subscribed_core - Set core as a subscriber core on global entry
*
* @param idx : Global entry index
* @param id : Core to be added as subscriber
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_set_subscribed_core(u32 idx, enum synx_core_id id);
/**
* synx_global_get_status - Get status of the global entry
*
* @param idx : Global entry index
*
* @return Global entry status
*/
u32 synx_global_get_status(u32 idx);
/**
* synx_global_test_status_set_wait - Check status and add core as waiter is not signaled
*
* This tests and adds the waiter in one atomic operation, to avoid
* race with signal which can miss sending the IPC signal if
* check status and set as done as two different operations
* (signal coming in between the two ops).
*
* @param idx : Global entry index
* @param id : Core to be set as waiter (if unsignaled)
*
* @return Status of global entry idx.
*/
u32 synx_global_test_status_set_wait(u32 idx,
enum synx_core_id id);
/**
* synx_global_update_status - Update status of the global entry
*
* Function also updates the parent composite handles
* about the signaling.
*
* @param idx : Global entry index
* @param status : Signaling status
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_update_status(u32 idx, u32 status);
/**
* synx_global_get_ref - Get additional reference on global entry
*
* @param idx : Global entry index
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_get_ref(u32 idx);
/**
* synx_global_put_ref - Release reference on global entry
*
* @param idx : Global entry index
*/
void synx_global_put_ref(u32 idx);
/**
* synx_global_get_parents - Get the global entry index of all composite parents
*
* @param idx : Global entry index whose parents are requested
* @param parents : Array of global entry index of composite handles
* Filled by the function. Array should contain atleast
* SYNX_GLOBAL_MAX_PARENTS entries.
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_get_parents(u32 idx, u32 *parents);
/**
* synx_global_merge - Merge handles to form global handle
*
* Is essential for merge functionality.
*
* @param idx_list : List of global indexes to merge
* @param num_list : Number of handles in the list to merge
* @params p_idx : Global entry index allocated for composite handle
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx);
/**
* synx_global_recover - Recover handles subscribed by specific core
*
* @param id : Core ID to clean up
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_recover(enum synx_core_id id);
/**
* synx_global_clean_cdsp_mem - Release handles created/used by CDSP
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_clean_cdsp_mem(void);
/**
* synx_global_dump_shared_memory - Prints the top entries of
* bitmap and table in global shared memory.
*
* @return SYNX_SUCCESS on success. Negative error on failure.
*/
int synx_global_dump_shared_memory(void);
#endif /* __SYNX_SHARED_MEM_H__ */

245
msm/synx/synx_private.h Normal file
View File

@@ -0,0 +1,245 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SYNX_PRIVATE_H__
#define __SYNX_PRIVATE_H__
#include <linux/bitmap.h>
#include <linux/cdev.h>
#include <linux/dma-fence.h>
#include <linux/dma-fence-array.h>
#include <linux/hashtable.h>
#include <linux/ktime.h>
#include <linux/workqueue.h>
#include "synx_api.h"
#include "synx_global.h"
#define SYNX_MAX_OBJS SYNX_GLOBAL_MAX_OBJS
#define SYNX_NAME "synx"
#define SYNX_DEVICE_NAME "synx_device"
#define SYNX_WQ_CB_NAME "hiprio_synx_cb_queue"
#define SYNX_WQ_CB_THREADS 4
#define SYNX_WQ_CLEANUP_NAME "hiprio_synx_cleanup_queue"
#define SYNX_WQ_CLEANUP_THREADS 2
#define SYNX_MAX_NUM_BINDINGS 8
#define SYNX_OBJ_HANDLE_SHIFT SYNX_HANDLE_INDEX_BITS
#define SYNX_OBJ_CORE_ID_SHIFT (SYNX_OBJ_HANDLE_SHIFT+SYNX_HANDLE_CORE_BITS)
#define SYNX_OBJ_GLOBAL_FLAG_SHIFT (SYNX_OBJ_CORE_ID_SHIFT+SYNX_HANDLE_GLOBAL_FLAG_BIT)
#define SYNX_OBJ_HANDLE_MASK GENMASK_ULL(SYNX_OBJ_HANDLE_SHIFT-1, 0)
#define SYNX_OBJ_CORE_ID_MASK GENMASK_ULL(SYNX_OBJ_CORE_ID_SHIFT-1, SYNX_OBJ_HANDLE_SHIFT)
#define SYNX_OBJ_GLOBAL_FLAG_MASK \
GENMASK_ULL(SYNX_OBJ_GLOBAL_FLAG_SHIFT-1, SYNX_OBJ_CORE_ID_SHIFT)
#define MAX_TIMESTAMP_SIZE 32
#define SYNX_OBJ_NAME_LEN 64
#define SYNX_PAYLOAD_WORDS 4
#define SYNX_CREATE_IM_EX_RELEASE SYNX_CREATE_MAX_FLAGS
#define SYNX_CREATE_MERGED_FENCE (SYNX_CREATE_MAX_FLAGS << 1)
#define SYNX_MAX_REF_COUNTS 100
struct synx_bind_desc {
struct synx_external_desc_v2 external_desc;
void *external_data;
};
struct error_node {
char timestamp[32];
u64 session;
u32 client_id;
u32 h_synx;
s32 error_code;
struct list_head node;
};
struct synx_entry_32 {
u32 key;
void *data;
struct hlist_node node;
};
struct synx_entry_64 {
u64 key;
u32 data[2];
struct kref refcount;
struct hlist_node node;
};
struct synx_map_entry {
struct synx_coredata *synx_obj;
struct kref refcount;
u32 flags;
u32 key;
struct work_struct dispatch;
struct hlist_node node;
};
struct synx_fence_entry {
u32 g_handle;
u32 l_handle;
u64 key;
struct hlist_node node;
};
struct synx_kernel_payload {
u32 h_synx;
u32 status;
void *data;
synx_user_callback_t cb_func;
synx_user_callback_t cancel_cb_func;
};
struct synx_cb_data {
struct synx_session *session;
u32 idx;
u32 status;
struct work_struct cb_dispatch;
struct list_head node;
};
struct synx_client_cb {
bool is_valid;
u32 idx;
struct synx_client *client;
struct synx_kernel_payload kernel_cb;
struct list_head node;
};
struct synx_registered_ops {
char name[SYNX_OBJ_NAME_LEN];
struct bind_operations ops;
enum synx_bind_client_type type;
bool valid;
};
struct synx_cleanup_cb {
void *data;
struct work_struct cb_dispatch;
};
enum synx_signal_handler {
SYNX_SIGNAL_FROM_CLIENT = 0x1,
SYNX_SIGNAL_FROM_FENCE = 0x2,
SYNX_SIGNAL_FROM_IPC = 0x4,
SYNX_SIGNAL_FROM_CALLBACK = 0x8,
};
struct synx_signal_cb {
u32 handle;
u32 status;
u64 ext_sync_id;
struct synx_coredata *synx_obj;
enum synx_signal_handler flag;
struct dma_fence_cb fence_cb;
struct work_struct cb_dispatch;
};
struct synx_coredata {
char name[SYNX_OBJ_NAME_LEN];
struct dma_fence *fence;
struct mutex obj_lock;
struct kref refcount;
u32 type;
u32 num_bound_synxs;
struct synx_bind_desc bound_synxs[SYNX_MAX_NUM_BINDINGS];
struct list_head reg_cbs_list;
u32 global_idx;
u32 map_count;
struct synx_signal_cb *signal_cb;
};
struct synx_client;
struct synx_device;
struct synx_handle_coredata {
struct synx_client *client;
struct synx_coredata *synx_obj;
void *map_entry;
struct kref refcount;
u32 key;
u32 rel_count;
struct work_struct dispatch;
struct hlist_node node;
};
struct synx_client {
u32 type;
bool active;
struct synx_device *device;
char name[SYNX_OBJ_NAME_LEN];
u64 id;
u64 dma_context;
struct kref refcount;
struct mutex event_q_lock;
struct list_head event_q;
wait_queue_head_t event_wq;
DECLARE_BITMAP(cb_bitmap, SYNX_MAX_OBJS);
struct synx_client_cb cb_table[SYNX_MAX_OBJS];
DECLARE_HASHTABLE(handle_map, 8);
spinlock_t handle_map_lock;
struct work_struct dispatch;
struct hlist_node node;
};
struct synx_native {
spinlock_t metadata_map_lock;
DECLARE_HASHTABLE(client_metadata_map, 8);
spinlock_t fence_map_lock;
DECLARE_HASHTABLE(fence_map, 10);
spinlock_t global_map_lock;
DECLARE_HASHTABLE(global_map, 10);
spinlock_t local_map_lock;
DECLARE_HASHTABLE(local_map, 8);
spinlock_t csl_map_lock;
DECLARE_HASHTABLE(csl_fence_map, 8);
DECLARE_BITMAP(bitmap, SYNX_MAX_OBJS);
};
struct synx_cdsp_ssr {
u64 ssrcnt;
void *handle;
struct notifier_block nb;
};
struct synx_device {
struct cdev cdev;
dev_t dev;
struct class *class;
struct synx_native *native;
struct workqueue_struct *wq_cb;
struct workqueue_struct *wq_cleanup;
struct mutex vtbl_lock;
struct synx_registered_ops bind_vtbl[SYNX_MAX_BIND_TYPES];
struct dentry *debugfs_root;
struct list_head error_list;
struct mutex error_lock;
struct synx_cdsp_ssr cdsp_ssr;
};
int synx_signal_core(struct synx_coredata *synx_obj,
u32 status,
bool cb_signal,
s32 ext_sync_id);
int synx_ipc_callback(uint32_t client_id,
int64_t data, void *priv);
void synx_signal_handler(struct work_struct *cb_dispatch);
int synx_native_release_core(struct synx_client *session,
u32 h_synx);
int synx_bind(struct synx_session *session,
u32 h_synx,
struct synx_external_desc_v2 external_sync);
#endif /* __SYNX_PRIVATE_H__ */

1525
msm/synx/synx_util.c Normal file

File diff suppressed because it is too large Load Diff

181
msm/synx/synx_util.h Normal file
View File

@@ -0,0 +1,181 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SYNX_UTIL_H__
#define __SYNX_UTIL_H__
#include "synx_api.h"
#include "synx_private.h"
extern struct synx_device *synx_dev;
extern void synx_fence_callback(struct dma_fence *fence,
struct dma_fence_cb *cb);
extern int synx_native_signal_fence(struct synx_coredata *synx_obj,
u32 status);
static inline bool synx_util_is_valid_bind_type(u32 type)
{
if (type < SYNX_MAX_BIND_TYPES)
return true;
return false;
}
static inline bool synx_util_is_global_handle(u32 h_synx)
{
return (h_synx & SYNX_OBJ_GLOBAL_FLAG_MASK) ? true : false;
}
static inline u32 synx_util_get_object_type(
struct synx_coredata *synx_obj)
{
return synx_obj ? synx_obj->type : 0;
}
static inline bool synx_util_is_merged_object(
struct synx_coredata *synx_obj)
{
if (synx_obj &&
(synx_obj->type & SYNX_CREATE_MERGED_FENCE))
return true;
return false;
}
static inline bool synx_util_is_global_object(
struct synx_coredata *synx_obj)
{
if (synx_obj &&
(synx_obj->type & SYNX_CREATE_GLOBAL_FENCE))
return true;
return false;
}
static inline bool synx_util_is_external_object(
struct synx_coredata *synx_obj)
{
if (synx_obj &&
(synx_obj->type & SYNX_CREATE_DMA_FENCE))
return true;
return false;
}
static inline u32 synx_util_map_params_to_type(u32 flags)
{
if (flags & SYNX_CREATE_CSL_FENCE)
return SYNX_TYPE_CSL;
return SYNX_MAX_BIND_TYPES;
}
static inline u32 synx_util_global_idx(u32 h_synx)
{
return (h_synx & SYNX_OBJ_HANDLE_MASK);
}
/* coredata memory functions */
void synx_util_get_object(struct synx_coredata *synx_obj);
void synx_util_put_object(struct synx_coredata *synx_obj);
void synx_util_object_destroy(struct synx_coredata *synx_obj);
static inline struct synx_coredata *synx_util_obtain_object(
struct synx_handle_coredata *synx_data)
{
if (IS_ERR_OR_NULL(synx_data))
return NULL;
return synx_data->synx_obj;
}
/* global/local map functions */
struct synx_map_entry *synx_util_insert_to_map(struct synx_coredata *synx_obj,
u32 h_synx, u32 flags);
struct synx_map_entry *synx_util_get_map_entry(u32 h_synx);
void synx_util_release_map_entry(struct synx_map_entry *map_entry);
/* fence map functions */
int synx_util_insert_fence_entry(struct synx_fence_entry *entry, u32 *h_synx,
u32 global);
u32 synx_util_get_fence_entry(u64 key, u32 global);
void synx_util_release_fence_entry(u64 key);
/* coredata initialize functions */
int synx_util_init_coredata(struct synx_coredata *synx_obj,
struct synx_create_params *params,
struct dma_fence_ops *ops,
u64 dma_context);
int synx_util_init_group_coredata(struct synx_coredata *synx_obj,
struct dma_fence **fences,
struct synx_merge_params *params,
u32 num_objs,
u64 dma_context);
/* handle related functions */
int synx_alloc_global_handle(u32 *new_synx);
int synx_alloc_local_handle(u32 *new_synx);
long synx_util_get_free_handle(unsigned long *bitmap, unsigned int size);
int synx_util_init_handle(struct synx_client *client, struct synx_coredata *obj,
u32 *new_h_synx,
void *map_entry);
u32 synx_encode_handle(u32 idx, u32 core_id, bool global_idx);
/* callback related functions */
int synx_util_alloc_cb_entry(struct synx_client *client,
struct synx_kernel_payload *data,
u32 *cb_idx);
int synx_util_clear_cb_entry(struct synx_client *client,
struct synx_client_cb *cb);
void synx_util_default_user_callback(u32 h_synx, int status, void *data);
void synx_util_callback_dispatch(struct synx_coredata *synx_obj, u32 state);
void synx_util_cb_dispatch(struct work_struct *cb_dispatch);
/* external fence functions */
int synx_util_activate(struct synx_coredata *synx_obj);
int synx_util_add_callback(struct synx_coredata *synx_obj, u32 h_synx);
/* merge related helper functions */
s32 synx_util_merge_error(struct synx_client *client, u32 *h_synxs, u32 num_objs);
int synx_util_validate_merge(struct synx_client *client, u32 *h_synxs, u32 num_objs,
struct dma_fence ***fences,
u32 *fence_cnt);
/* coredata status functions */
u32 synx_util_get_object_status(struct synx_coredata *synx_obj);
u32 synx_util_get_object_status_locked(struct synx_coredata *synx_obj);
/* client handle map related functions */
struct synx_handle_coredata *synx_util_acquire_handle(struct synx_client *client,
u32 h_synx);
void synx_util_release_handle(struct synx_handle_coredata *synx_data);
int synx_util_update_handle(struct synx_client *client, u32 h_synx, u32 sync_id,
u32 type, struct synx_handle_coredata **handle);
/* client memory handler functions */
struct synx_client *synx_get_client(struct synx_session *session);
void synx_put_client(struct synx_client *client);
/* error log functions */
void synx_util_generate_timestamp(char *timestamp, size_t size);
void synx_util_log_error(u32 id, u32 h_synx, s32 err);
/* external fence map functions */
int synx_util_save_data(void *fence, u32 flags, u32 data);
struct synx_entry_64 *synx_util_retrieve_data(void *fence, u32 type);
void synx_util_remove_data(void *fence, u32 type);
/* misc */
void synx_util_map_import_params_to_create(
struct synx_import_indv_params *params,
struct synx_create_params *c_params);
struct bind_operations *synx_util_get_bind_ops(u32 type);
u32 synx_util_map_client_id_to_core(enum synx_client_id id);
#endif /* __SYNX_UTIL_H__ */

19
synx_kernel_board.mk Normal file
View File

@@ -0,0 +1,19 @@
# Build synx kernel driver
TARGET_SYNX_ENABLE := false
ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true)
ifeq ($(TARGET_KERNEL_DLKM_SYNX_OVERRIDE),true)
TARGET_SYNX_ENABLE := true
endif
else
TARGET_SYNX_ENABLE := true
endif
#
ifeq ($(TARGET_SYNX_ENABLE), true)
ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true)
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/synx-driver.ko
BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/synx-driver.ko
BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/synx-driver.ko
endif
endif

12
synx_kernel_product.mk Normal file
View File

@@ -0,0 +1,12 @@
TARGET_SYNX_ENABLE := false
ifeq ($(TARGET_KERNEL_DLKM_DISABLE),true)
ifeq ($(TARGET_KERNEL_DLKM_SYNX_OVERRIDE),true)
TARGET_SYNX_ENABLE := true
endif
else
TARGET_SYNX_ENABLE := true
endif
ifeq ($(TARGET_SYNX_ENABLE), true)
PRODUCT_PACKAGES += synx-driver.ko
endif