Merge remote-tracking branch 'origin/display-kernel.lnx.5.15' into display-kernel.lnx.1.0

* origin/display-kernel.lnx.5.15:
  mm-drivers: hw_fence: add timestamp to the queue
  mm-drivers: add support for build.sh techpack display_tp
  mm-drivers: hw_fence: move mem barrier before mem read
  mm-drivers: spec_fence: increasing device_available
  mm-drivers: hw_fence: enable hw-fence driver based on cmdline var
  mm-drivers: sync: export sync_fence module symbols
  mm-drivers: hw-fence: add hardware fence driver validation ioctls
  mm-drivers: add support for compiling out mm driver modules
  mm-drivers: hw_fence: avoid hw fences creation until fctl ready
  mm-drivers: hw_fence: populate payload size in hfi header
  mm-drivers: hw_fence: avoid compiling hw_fence driver for taro
  mm-drivers: hw_fence: Add support for hw-fence driver
  mm-drivers: sync: resolve compilation of sync fence driver
  mm-drivers: sync: add api to wait for sync fence bind
  mm-drivers: sync-fence: add changes to serialize fence operations
  mm-drivers: sync_fence: avoid compiling spec_fence driver for taro
  mm-drivers: msm_ext_display: export msm-ext-display module symbols
  mm-drivers: enable mm-driver modules compilation
  mm-drivers: msm_ext_display: add snapshot
  mm-drivers: sync-fence: add sync fence driver snapshot

Change-Id: Id3fde6d894d2e73243c0a50aab018067431f211d
Signed-off-by: Ashwin Pillai <quic_ashwpill@quicinc.com>
Dieser Commit ist enthalten in:
Ashwin Pillai
2022-08-08 23:50:30 -04:00
Commit 2e0acac216
30 geänderte Dateien mit 7005 neuen und 0 gelöschten Zeilen

36
Android.bp Normale Datei
Datei anzeigen

@@ -0,0 +1,36 @@
headers_src = [
"sync_fence/include/uapi/*/**/*.h",
]
mm_drivers_headers_out = [
"sync_fence/qcom_sync_file.h",
]
mm_drivers_kernel_headers_verbose = "--verbose "
genrule {
name: "qti_generate_mm_drivers_kernel_headers",
tools: [
"headers_install.sh",
"unifdef"
],
tool_files: [
"mm_drivers_kernel_headers.py",
],
srcs: headers_src,
cmd: "python3 $(location mm_drivers_kernel_headers.py) " +
mm_drivers_kernel_headers_verbose +
"--header_arch arm64 " +
"--gen_dir $(genDir) " +
"--mm_drivers_include_uapi $(locations sync_fence/include/uapi/*/**/*.h) " +
"--unifdef $(location unifdef) " +
"--headers_install $(location headers_install.sh)",
out: mm_drivers_headers_out,
}
cc_library_headers {
name: "qti_mm_drivers_kernel_headers",
generated_headers: ["qti_generate_mm_drivers_kernel_headers"],
export_generated_headers: ["qti_generate_mm_drivers_kernel_headers"],
vendor: true,
recovery_available: true
}

16
Android.mk Normale Datei
Datei anzeigen

@@ -0,0 +1,16 @@
MM_DRIVER_PATH := $(call my-dir)
MM_DRV_DLKM_ENABLE := true
ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false)
MM_DRV_DLKM_ENABLE := false
endif
endif
ifeq ($(MM_DRV_DLKM_ENABLE), true)
include $(MM_DRIVER_PATH)/msm_ext_display/Android.mk
ifneq ($(TARGET_BOARD_PLATFORM), taro)
include $(MM_DRIVER_PATH)/hw_fence/Android.mk
include $(MM_DRIVER_PATH)/sync_fence/Android.mk
endif
endif

7
config/kalamammdrivers.conf Normale Datei
Datei anzeigen

@@ -0,0 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
# Copyright (c) 2020, The Linux Foundation. All rights reserved.
export CONFIG_MSM_EXT_DISPLAY=y
export CONFIG_QCOM_SPEC_SYNC=y
export CONFIG_QTI_HW_FENCE=y

Datei anzeigen

@@ -0,0 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
#define CONFIG_MSM_EXT_DISPLAY 1
#define CONFIG_QCOM_SPEC_SYNC 1
#define CONFIG_QTI_HW_FENCE 1

41
hw_fence/Android.mk Normale Datei
Datei anzeigen

@@ -0,0 +1,41 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
# This makefile is only for DLKM
ifneq ($(findstring vendor,$(LOCAL_PATH)),)
ifneq ($(findstring opensource,$(LOCAL_PATH)),)
MSM_HW_FENCE_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/hw_fence
endif # opensource
DLKM_DIR := $(TOP)/device/qcom/common/dlkm
LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
###########################################################
# This is set once per LOCAL_PATH, not per (kernel) module
KBUILD_OPTIONS := MSM_HW_FENCE_ROOT=$(MSM_HW_FENCE_BLD_DIR)
KBUILD_OPTIONS += MODNAME=msm_hw_fence
KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
###########################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := hw-fence-module-symvers
LOCAL_MODULE_STEM := Module.symvers
LOCAL_MODULE_KBUILD_NAME := Module.symvers
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := msm_hw_fence.ko
LOCAL_MODULE_KBUILD_NAME := msm_hw_fence.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
endif # DLKM check

20
hw_fence/Kbuild Normale Datei
Datei anzeigen

@@ -0,0 +1,20 @@
# SPDX-License-Identifier: GPL-2.0-only
KDIR := $(TOP)/kernel_platform/msm-kernel
include $(MSM_HW_FENCE_ROOT)/config/kalamammdrivers.conf
LINUXINCLUDE += -include $(MSM_HW_FENCE_ROOT)/config/kalamammdriversconf.h \
-I$(MSM_HW_FENCE_ROOT)hw_fence/include/
ifdef CONFIG_QTI_HW_FENCE
obj-m += msm_hw_fence.o
msm_hw_fence-y := src/msm_hw_fence.o \
src/hw_fence_drv_priv.o \
src/hw_fence_drv_utils.o \
src/hw_fence_drv_debug.o \
src/hw_fence_drv_ipc.o
msm_hw_fence-$(CONFIG_DEBUG_FS) += src/hw_fence_ioctl.o
CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\"
endif

14
hw_fence/Makefile Normale Datei
Datei anzeigen

@@ -0,0 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
KBUILD_OPTIONS += MSM_HW_FENCE_ROOT=$(KERNEL_SRC)/$(M)/../
all: modules
modules_install:
$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install
%:
$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
clean:
rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
rm -rf .tmp_versions

Datei anzeigen

@@ -0,0 +1,194 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HW_FENCE_DRV_DEBUG
#define __HW_FENCE_DRV_DEBUG
#include "hw_fence_drv_ipc.h"
#define HW_FENCE_NAME_SIZE 64
enum hw_fence_drv_prio {
HW_FENCE_HIGH = 0x000001, /* High density debug messages (noisy) */
HW_FENCE_LOW = 0x000002, /* Low density debug messages */
HW_FENCE_INFO = 0x000004, /* Informational prints */
HW_FENCE_INIT = 0x00008, /* Initialization logs */
HW_FENCE_QUEUE = 0x000010, /* Queue logs */
HW_FENCE_LUT = 0x000020, /* Look-up and algorithm logs */
HW_FENCE_IRQ = 0x000040, /* Interrupt-related messages */
HW_FENCE_PRINTK = 0x010000,
};
extern u32 msm_hw_fence_debug_level;
#define dprintk(__level, __fmt, ...) \
do { \
if (msm_hw_fence_debug_level & __level) \
if (msm_hw_fence_debug_level & HW_FENCE_PRINTK) \
pr_err(__fmt, ##__VA_ARGS__); \
} while (0)
#define HWFNC_ERR(fmt, ...) \
pr_err("[hwfence:%s:%d][err][%pS] "fmt, __func__, __LINE__, \
__builtin_return_address(0), ##__VA_ARGS__)
#define HWFNC_DBG_H(fmt, ...) \
dprintk(HW_FENCE_HIGH, "[hwfence:%s:%d][dbgh]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_L(fmt, ...) \
dprintk(HW_FENCE_LOW, "[hwfence:%s:%d][dbgl]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_INFO(fmt, ...) \
dprintk(HW_FENCE_INFO, "[hwfence:%s:%d][dbgi]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_INIT(fmt, ...) \
dprintk(HW_FENCE_INIT, "[hwfence:%s:%d][dbg]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_Q(fmt, ...) \
dprintk(HW_FENCE_QUEUE, "[hwfence:%s:%d][dbgq]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_LUT(fmt, ...) \
dprintk(HW_FENCE_LUT, "[hwfence:%s:%d][dbglut]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_IRQ(fmt, ...) \
dprintk(HW_FENCE_IRQ, "[hwfence:%s:%d][dbgirq]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_WARN(fmt, ...) \
pr_warn("[hwfence:%s:%d][warn][%pS] "fmt, __func__, __LINE__, \
__builtin_return_address(0), ##__VA_ARGS__)
int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data);
#if IS_ENABLED(CONFIG_DEBUG_FS)
int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, int client_id);
extern const struct file_operations hw_sync_debugfs_fops;
struct hw_fence_out_clients_map {
int ipc_client_id; /* ipc client id for the hw fence client */
int ipc_signal_id; /* ipc signal id for the hw fence client */
};
/* These signals are the ones that the actual clients should be triggering, hw-fence driver
* does not need to have knowledge of these signals. Adding them here for debugging purposes.
* Only fence controller and the cliens know these id's, since these
* are to trigger the ipcc from the 'client hw-core' to the 'hw-fence controller'
* The index of this struct must match the enum hw_fence_client_id
*/
static const struct hw_fence_out_clients_map
dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_MAX] = {
{HW_FENCE_IPC_CLIENT_ID_APPS, 0}, /* CTRL_LOOPBACK */
{HW_FENCE_IPC_CLIENT_ID_GPU, 0}, /* CTX0 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 2}, /* CTL0 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 4}, /* CTL1 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 6}, /* CTL2 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 8}, /* CTL3 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 10}, /* CTL4 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 12}, /* CTL5 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 21}, /* VAL0 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 22}, /* VAL1 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 23}, /* VAL2 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 24}, /* VAL3 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 25}, /* VAL4 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 26}, /* VAL5 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 27}, /* VAL6 */
};
/**
* struct hw_dma_fence - fences created by hw-fence for debugging.
* @base: base dma-fence structure, this must remain at beginning of the struct.
* @name: name of each fence.
* @client_handle: handle for the client owner of this fence, this is returned by the hw-fence
* driver after a successful registration of the client and used by this fence
* during release.
*/
struct hw_dma_fence {
struct dma_fence base;
char name[HW_FENCE_NAME_SIZE];
void *client_handle;
};
static inline struct hw_dma_fence *to_hw_dma_fence(struct dma_fence *fence)
{
return container_of(fence, struct hw_dma_fence, base);
}
static inline void _cleanup_fences(int i, struct dma_fence **fences, spinlock_t **fences_lock)
{
struct hw_dma_fence *dma_fence;
int fence_idx;
for (fence_idx = i; fence_idx >= 0 ; fence_idx--) {
kfree(fences_lock[fence_idx]);
dma_fence = to_hw_dma_fence(fences[fence_idx]);
kfree(dma_fence);
}
kfree(fences_lock);
kfree(fences);
}
static const char *hw_fence_dbg_get_driver_name(struct dma_fence *fence)
{
struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence);
return hw_dma_fence->name;
}
static const char *hw_fence_dbg_get_timeline_name(struct dma_fence *fence)
{
struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence);
return hw_dma_fence->name;
}
static bool hw_fence_dbg_enable_signaling(struct dma_fence *fence)
{
return true;
}
static void _hw_fence_release(struct hw_dma_fence *hw_dma_fence)
{
if (IS_ERR_OR_NULL(hw_dma_fence->client_handle)) {
HWFNC_ERR("invalid hwfence data, won't release hw_fence!\n");
return;
}
/* release hw-fence */
if (msm_hw_fence_destroy(hw_dma_fence->client_handle, &hw_dma_fence->base))
HWFNC_ERR("failed to release hw_fence!\n");
}
static void hw_fence_dbg_release(struct dma_fence *fence)
{
struct hw_dma_fence *hw_dma_fence;
if (!fence)
return;
HWFNC_DBG_H("release backing fence %pK\n", fence);
hw_dma_fence = to_hw_dma_fence(fence);
if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags))
_hw_fence_release(hw_dma_fence);
kfree(fence->lock);
kfree(hw_dma_fence);
}
static struct dma_fence_ops hw_fence_dbg_ops = {
.get_driver_name = hw_fence_dbg_get_driver_name,
.get_timeline_name = hw_fence_dbg_get_timeline_name,
.enable_signaling = hw_fence_dbg_enable_signaling,
.wait = dma_fence_default_wait,
.release = hw_fence_dbg_release,
};
#endif /* CONFIG_DEBUG_FS */
#endif /* __HW_FENCE_DRV_DEBUG */

Datei anzeigen

@@ -0,0 +1,90 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HW_FENCE_DRV_IPC_H
#define __HW_FENCE_DRV_IPC_H
#define HW_FENCE_IPC_CLIENT_ID_APPS 8
#define HW_FENCE_IPC_CLIENT_ID_GPU 9
#define HW_FENCE_IPC_CLIENT_ID_DPU 25
#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA 2
#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO 1
#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KAILUA 2
#define HW_FENCE_IPCC_HW_REV_100 0x00010000 /* Lahaina */
#define HW_FENCE_IPCC_HW_REV_110 0x00010100 /* Waipio */
#define HW_FENCE_IPCC_HW_REV_170 0x00010700 /* Kailua */
#define IPC_PROTOCOLp_CLIENTc_VERSION(base, p, c) (base + (0x40000*p) + (0x1000*c))
#define IPC_PROTOCOLp_CLIENTc_CONFIG(base, p, c) (base + 0x8 + (0x40000*p) + (0x1000*c))
#define IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(base, p, c) \
(base + 0x14 + (0x40000*p) + (0x1000*c))
#define IPC_PROTOCOLp_CLIENTc_SEND(base, p, c) ((base + 0xc) + (0x40000*p) + (0x1000*c))
/**
* hw_fence_ipcc_trigger_signal() - Trigger ipc signal for the requested client/signal pair.
* @drv_data: driver data.
* @tx_client_id: ipc client id that sends the ipc signal.
* @rx_client_id: ipc client id that receives the ipc signal.
* @signal_id: signal id to send.
*
* This API triggers the ipc 'signal_id' from the 'tx_client_id' to the 'rx_client_id'
*/
void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data,
u32 tx_client_id, u32 rx_client_id, u32 signal_id);
/**
* hw_fence_ipcc_enable_signaling() - Enable ipcc signaling for hw-fence driver.
* @drv_data: driver data.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data);
#ifdef HW_DPU_IPCC
/**
* hw_fence_ipcc_enable_dpu_signaling() - Enable ipcc signaling for dpu client.
* @drv_data: driver data.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data);
#endif /* HW_DPU_IPCC */
/**
* hw_fence_ipcc_get_client_id() - Returns the ipc client id that corresponds to the hw fence
* driver client.
* @drv_data: driver data.
* @client_id: hw fence driver client id.
*
* The ipc client id returned by this API is used by the hw fence driver when signaling the fence.
*
* Return: client_id on success or negative errno (-EINVAL)
*/
int hw_fence_ipcc_get_client_id(struct hw_fence_driver_data *drv_data, u32 client_id);
/**
* hw_fence_ipcc_get_signal_id() - Returns the ipc signal id that corresponds to the hw fence
* driver client.
* @drv_data: driver data.
* @client_id: hw fence driver client id.
*
* The ipc signal id returned by this API is used by the hw fence driver when signaling the fence.
*
* Return: client_id on success or negative errno (-EINVAL)
*/
int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id);
/**
* hw_fence_ipcc_needs_rxq_update() - Returns bool to indicate if client uses rx-queue.
* @drv_data: driver data.
* @client_id: hw fence driver client id.
*
* Return: true if client needs to update rxq, false otherwise
*/
bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id);
#endif /* __HW_FENCE_DRV_IPC_H */

Datei anzeigen

@@ -0,0 +1,412 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HW_FENCE_DRV_INTERNAL_H
#define __HW_FENCE_DRV_INTERNAL_H
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/soc/qcom/msm_hw_fence.h>
#include <linux/dma-fence-array.h>
#include <linux/slab.h>
/* Add define only for platforms that support IPCC in dpu-hw */
#define HW_DPU_IPCC 1
/* max u64 to indicate invalid fence */
#define HW_FENCE_INVALID_PARENT_FENCE (~0ULL)
/* hash algorithm constants */
#define HW_FENCE_HASH_A_MULT 4969 /* a multiplier for Hash algorithm */
#define HW_FENCE_HASH_C_MULT 907 /* c multiplier for Hash algorithm */
/* number of queues per type (i.e. ctrl or client queues) */
#define HW_FENCE_CTRL_QUEUES 2 /* Rx and Tx Queues */
#define HW_FENCE_CLIENT_QUEUES 2 /* Rx and Tx Queues */
/* hfi headers calculation */
#define HW_FENCE_HFI_TABLE_HEADER_SIZE (sizeof(struct msm_hw_fence_hfi_queue_table_header))
#define HW_FENCE_HFI_QUEUE_HEADER_SIZE (sizeof(struct msm_hw_fence_hfi_queue_header))
#define HW_FENCE_HFI_CTRL_HEADERS_SIZE (HW_FENCE_HFI_TABLE_HEADER_SIZE + \
(HW_FENCE_HFI_QUEUE_HEADER_SIZE * HW_FENCE_CTRL_QUEUES))
#define HW_FENCE_HFI_CLIENT_HEADERS_SIZE (HW_FENCE_HFI_TABLE_HEADER_SIZE + \
(HW_FENCE_HFI_QUEUE_HEADER_SIZE * HW_FENCE_CLIENT_QUEUES))
/*
* Max Payload size is the bigest size of the message that we can have in the CTRL queue
* in this case the max message is calculated like following, using 32-bits elements:
* 1 header + 1 msg-type + 1 client_id + 2 hash + 1 error
*/
#define HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE ((1 + 1 + 1 + 2 + 1) * sizeof(u32))
#define HW_FENCE_CTRL_QUEUE_PAYLOAD HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE
#define HW_FENCE_CLIENT_QUEUE_PAYLOAD (sizeof(struct msm_hw_fence_queue_payload))
/* Locks area for all the clients */
#define HW_FENCE_MEM_LOCKS_SIZE (sizeof(u64) * (HW_FENCE_CLIENT_MAX - 1))
#define HW_FENCE_TX_QUEUE 1
#define HW_FENCE_RX_QUEUE 2
/* ClientID for the internal join fence, this is used by the framework when creating a join-fence */
#define HW_FENCE_JOIN_FENCE_CLIENT_ID (~(u32)0)
/**
* msm hw fence flags:
* MSM_HW_FENCE_FLAG_SIGNAL - Flag set when the hw-fence is signaled
*/
#define MSM_HW_FENCE_FLAG_SIGNAL BIT(0)
/**
* MSM_HW_FENCE_MAX_JOIN_PARENTS:
* Maximum number of parents that a fence can have for a join-fence
*/
#define MSM_HW_FENCE_MAX_JOIN_PARENTS 3
enum hw_fence_lookup_ops {
HW_FENCE_LOOKUP_OP_CREATE = 0x1,
HW_FENCE_LOOKUP_OP_DESTROY,
HW_FENCE_LOOKUP_OP_CREATE_JOIN,
HW_FENCE_LOOKUP_OP_FIND_FENCE
};
/**
* enum hw_fence_loopback_id - Enum with the clients having a loopback signal (i.e AP to AP signal).
* HW_FENCE_LOOPBACK_DPU_CTL_0: dpu client 0. Used in platforms with no dpu-ipc.
* HW_FENCE_LOOPBACK_DPU_CTL_1: dpu client 1. Used in platforms with no dpu-ipc.
* HW_FENCE_LOOPBACK_DPU_CTL_2: dpu client 2. Used in platforms with no dpu-ipc.
* HW_FENCE_LOOPBACK_DPU_CTL_3: dpu client 3. Used in platforms with no dpu-ipc.
* HW_FENCE_LOOPBACK_DPU_CTL_4: dpu client 4. Used in platforms with no dpu-ipc.
* HW_FENCE_LOOPBACK_DPU_CTL_5: dpu client 5. Used in platforms with no dpu-ipc.
* HW_FENCE_LOOPBACK_DPU_CTX_0: gfx client 0. Used in platforms with no gmu support.
* HW_FENCE_LOOPBACK_VAL_0: debug validation client 0.
* HW_FENCE_LOOPBACK_VAL_1: debug validation client 1.
* HW_FENCE_LOOPBACK_VAL_2: debug validation client 2.
* HW_FENCE_LOOPBACK_VAL_3: debug validation client 3.
* HW_FENCE_LOOPBACK_VAL_4: debug validation client 4.
* HW_FENCE_LOOPBACK_VAL_5: debug validation client 5.
* HW_FENCE_LOOPBACK_VAL_6: debug validation client 6.
*/
enum hw_fence_loopback_id {
HW_FENCE_LOOPBACK_DPU_CTL_0,
HW_FENCE_LOOPBACK_DPU_CTL_1,
HW_FENCE_LOOPBACK_DPU_CTL_2,
HW_FENCE_LOOPBACK_DPU_CTL_3,
HW_FENCE_LOOPBACK_DPU_CTL_4,
HW_FENCE_LOOPBACK_DPU_CTL_5,
HW_FENCE_LOOPBACK_GFX_CTX_0,
#if IS_ENABLED(CONFIG_DEBUG_FS)
HW_FENCE_LOOPBACK_VAL_0,
HW_FENCE_LOOPBACK_VAL_1,
HW_FENCE_LOOPBACK_VAL_2,
HW_FENCE_LOOPBACK_VAL_3,
HW_FENCE_LOOPBACK_VAL_4,
HW_FENCE_LOOPBACK_VAL_5,
HW_FENCE_LOOPBACK_VAL_6,
#endif /* CONFIG_DEBUG_FS */
HW_FENCE_LOOPBACK_MAX,
};
#define HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS (HW_FENCE_LOOPBACK_DPU_CTL_5 + 1)
/**
* struct msm_hw_fence_queue - Structure holding the data of the hw fence queues.
* @va_queue: pointer to the virtual address of the queue elements
* @q_size_bytes: size of the queue
* @va_header: pointer to the hfi header virtual address
* @pa_queue: physical address of the queue
*/
struct msm_hw_fence_queue {
void *va_queue;
u32 q_size_bytes;
void *va_header;
phys_addr_t pa_queue;
};
/**
* struct msm_hw_fence_client - Structure holding the per-Client allocated resources.
* @client_id: id of the client
* @mem_descriptor: hfi header memory descriptor
* @queues: queues descriptor
* @ipc_signal_id: id of the signal to be triggered for this client
* @ipc_client_id: id of the ipc client for this hw fence driver client
* @update_rxq: bool to indicate if client uses rx-queue
* @wait_queue: wait queue for the validation clients
* @val_signal: doorbell flag to signal the validation clients in the wait queue
*/
struct msm_hw_fence_client {
enum hw_fence_client_id client_id;
struct msm_hw_fence_mem_addr mem_descriptor;
struct msm_hw_fence_queue queues[HW_FENCE_CLIENT_QUEUES];
int ipc_signal_id;
int ipc_client_id;
bool update_rxq;
#if IS_ENABLED(CONFIG_DEBUG_FS)
wait_queue_head_t wait_queue;
atomic_t val_signal;
#endif /* CONFIG_DEBUG_FS */
};
/**
* struct msm_hw_fence_mem_data - Structure holding internal memory attributes
*
* @attrs: attributes for the memory allocation
*/
struct msm_hw_fence_mem_data {
unsigned long attrs;
};
/**
* struct msm_hw_fence_dbg_data - Structure holding debugfs data
*
* @root: debugfs root
* @entry_rd: flag to indicate if debugfs dumps a single line or table
* @context_rd: debugfs setting to indicate which context id to dump
* @seqno_rd: debugfs setting to indicate which seqno to dump
* @hw_fence_sim_release_delay: delay in micro seconds for the debugfs node that simulates the
* hw-fences behavior, to release the hw-fences
* @create_hw_fences: boolean to continuosly create hw-fences within debugfs
* @clients_list: list of debug clients registered
* @clients_list_lock: lock to synchronize access to the clients list
*/
struct msm_hw_fence_dbg_data {
struct dentry *root;
bool entry_rd;
u64 context_rd;
u64 seqno_rd;
u32 hw_fence_sim_release_delay;
bool create_hw_fences;
struct list_head clients_list;
struct mutex clients_list_lock;
};
/**
* struct hw_fence_driver_data - Structure holding internal hw-fence driver data
*
* @dev: device driver pointer
* @resources_ready: value set by driver at end of probe, once all resources are ready
* @hw_fence_table_entries: total number of hw-fences in the global table
* @hw_fence_mem_fences_table_size: hw-fences global table total size
* @hw_fence_queue_entries: total number of entries that can be available in the queue
* @hw_fence_ctrl_queue_size: size of the ctrl queue for the payload
* @hw_fence_mem_ctrl_queues_size: total size of ctrl queues, including: header + rxq + txq
* @hw_fence_client_queue_size: size of the client queue for the payload
* @hw_fence_mem_clients_queues_size: total size of client queues, including: header + rxq + txq
* @hw_fences_tbl: pointer to the hw-fences table
* @hw_fences_tbl_cnt: number of elements in the hw-fence table
* @client_lock_tbl: pointer to the per-client locks table
* @client_lock_tbl_cnt: number of elements in the locks table
* @hw_fences_mem_desc: memory descriptor for the hw-fence table
* @clients_locks_mem_desc: memory descriptor for the locks table
* @ctrl_queue_mem_desc: memory descriptor for the ctrl queues
* @ctrl_queues: pointer to the ctrl queues
* @io_mem_base: pointer to the carved-out io memory
* @res: resources for the carved out memory
* @size: size of the carved-out memory
* @label: label for the carved-out memory (this is used by SVM to find the memory)
* @peer_name: peer name for this carved-out memory
* @rm_nb: hyp resource manager notifier
* @memparcel: memparcel for the allocated memory
* @db_label: doorbell label
* @rx_dbl: handle to the Rx doorbell
* @debugfs_data: debugfs info
* @ipcc_reg_base: base for ipcc regs mapping
* @ipcc_io_mem: base for the ipcc io mem map
* @ipcc_size: size of the ipcc io mem mapping
* @protocol_id: ipcc protocol id used by this driver
* @ipcc_client_id: ipcc client id for this driver
* @ipc_clients_table: table with the ipcc mapping for each client of this driver
* @qtime_reg_base: qtimer register base address
* @qtime_io_mem: qtimer io mem map
* @qtime_size: qtimer io mem map size
* @ctl_start_ptr: pointer to the ctl_start registers of the display hw (platforms with no dpu-ipc)
* @ctl_start_size: size of the ctl_start registers of the display hw (platforms with no dpu-ipc)
* @client_id_mask: bitmask for tracking registered client_ids
* @clients_mask_lock: lock to synchronize access to the clients mask
* @msm_hw_fence_client: table with the handles of the registered clients
* @vm_ready: flag to indicate if vm has been initialized
* @ipcc_dpu_initialized: flag to indicate if dpu hw is initialized
*/
struct hw_fence_driver_data {
struct device *dev;
bool resources_ready;
/* Table & Queues info */
u32 hw_fence_table_entries;
u32 hw_fence_mem_fences_table_size;
u32 hw_fence_queue_entries;
/* ctrl queues */
u32 hw_fence_ctrl_queue_size;
u32 hw_fence_mem_ctrl_queues_size;
/* client queues */
u32 hw_fence_client_queue_size;
u32 hw_fence_mem_clients_queues_size;
/* HW Fences Table VA */
struct msm_hw_fence *hw_fences_tbl;
u32 hw_fences_tbl_cnt;
/* Table with a Per-Client Lock */
u64 *client_lock_tbl;
u32 client_lock_tbl_cnt;
/* Memory Descriptors */
struct msm_hw_fence_mem_addr hw_fences_mem_desc;
struct msm_hw_fence_mem_addr clients_locks_mem_desc;
struct msm_hw_fence_mem_addr ctrl_queue_mem_desc;
struct msm_hw_fence_queue ctrl_queues[HW_FENCE_CTRL_QUEUES];
/* carved out memory */
void __iomem *io_mem_base;
struct resource res;
size_t size;
u32 label;
u32 peer_name;
struct notifier_block rm_nb;
u32 memparcel;
/* doorbell */
u32 db_label;
/* VM virq */
void *rx_dbl;
/* debugfs */
struct msm_hw_fence_dbg_data debugfs_data;
/* ipcc regs */
phys_addr_t ipcc_reg_base;
void __iomem *ipcc_io_mem;
uint32_t ipcc_size;
u32 protocol_id;
u32 ipcc_client_id;
/* table with mapping of ipc client for each hw-fence client */
struct hw_fence_client_ipc_map *ipc_clients_table;
/* qtime reg */
phys_addr_t qtime_reg_base;
void __iomem *qtime_io_mem;
uint32_t qtime_size;
/* base address for dpu ctl start regs */
void *ctl_start_ptr[HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS];
uint32_t ctl_start_size[HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS];
/* bitmask for tracking registered client_ids */
u64 client_id_mask;
struct mutex clients_mask_lock;
/* table with registered client handles */
struct msm_hw_fence_client *clients[HW_FENCE_CLIENT_MAX];
bool vm_ready;
#ifdef HW_DPU_IPCC
/* state variables */
bool ipcc_dpu_initialized;
#endif /* HW_DPU_IPCC */
};
/**
* struct msm_hw_fence_queue_payload - hardware fence clients queues payload.
* @ctxt_id: context id of the dma fence
* @seqno: sequence number of the dma fence
* @hash: fence hash
* @flags: see MSM_HW_FENCE_FLAG_* flags descriptions
* @error: error code for this fence, fence controller receives this
* error from the signaling client through the tx queue and
* propagates the error to the waiting client through rx queue
* @timestamp: qtime when the payload is written into the queue
*/
struct msm_hw_fence_queue_payload {
u64 ctxt_id;
u64 seqno;
u64 hash;
u64 flags;
u32 error;
u32 timestamp;
};
/**
* struct msm_hw_fence - structure holding each hw fence data.
* @valid: field updated when a hw-fence is reserved. True if hw-fence is in use
* @error: field to hold a hw-fence error
* @ctx_id: context id
* @seq_id: sequence id
* @wait_client_mask: bitmask holding the waiting-clients of the fence
* @fence_allocator: field to indicate the client_id that reserved the fence
* @fence_signal-client:
* @lock: this field is required to share information between the Driver & Driver ||
* Driver & FenceCTL. Needs to be 64-bit atomic inter-processor lock.
* @flags: field to indicate the state of the fence
* @parent_list: list of indexes with the parents for a child-fence in a join-fence
* @parent_cnt: total number of parents for a child-fence in a join-fence
* @pending_child_cnt: children refcount for a parent-fence in a join-fence. Access must be atomic
* or locked
* @fence_create_time: debug info with the create time timestamp
* @fence_trigger_time: debug info with the trigger time timestamp
* @fence_wait_time: debug info with the register-for-wait timestamp
* @debug_refcount: refcount used for debugging
*/
struct msm_hw_fence {
u32 valid;
u32 error;
u64 ctx_id;
u64 seq_id;
u64 wait_client_mask;
u32 fence_allocator;
u32 fence_signal_client;
u64 lock; /* Datatype must be 64-bit. */
u64 flags;
u64 parent_list[MSM_HW_FENCE_MAX_JOIN_PARENTS];
u32 parents_cnt;
u32 pending_child_cnt;
u64 fence_create_time;
u64 fence_trigger_time;
u64 fence_wait_time;
u64 debug_refcount;
};
int hw_fence_init(struct hw_fence_driver_data *drv_data);
int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client,
struct msm_hw_fence_mem_addr *mem_descriptor);
int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client);
int hw_fence_init_controller_resources(struct msm_hw_fence_client *hw_fence_client);
void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client);
int hw_fence_create(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client,
u64 context, u64 seqno, u64 *hash);
int hw_fence_destroy(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client,
u64 context, u64 seqno);
int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client,
struct dma_fence_array *array);
int hw_fence_process_fence(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence);
int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash,
u64 flags, u32 error, int queue_type);
inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data);
int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client,
struct msm_hw_fence_queue_payload *payload, int queue_type);
int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, u64 context, u64 seqno);
struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client,
u64 context, u64 seqno, u64 *hash);
#endif /* __HW_FENCE_DRV_INTERNAL_H */

Datei anzeigen

@@ -0,0 +1,113 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HW_FENCE_DRV_UTILS_H
#define __HW_FENCE_DRV_UTILS_H
/**
* enum hw_fence_mem_reserve - Types of reservations for the carved-out memory.
* HW_FENCE_MEM_RESERVE_CTRL_QUEUE: Reserve memory for the ctrl rx/tx queues.
* HW_FENCE_MEM_RESERVE_LOCKS_REGION: Reserve memory for the per-client locks memory region.
* HW_FENCE_MEM_RESERVE_TABLE: Reserve memory for the hw-fences global table.
* HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: Reserve memory per-client for the rx/tx queues.
*/
enum hw_fence_mem_reserve {
HW_FENCE_MEM_RESERVE_CTRL_QUEUE,
HW_FENCE_MEM_RESERVE_LOCKS_REGION,
HW_FENCE_MEM_RESERVE_TABLE,
HW_FENCE_MEM_RESERVE_CLIENT_QUEUE
};
/**
* global_atomic_store() - Inter-processor lock
* @lock: memory to lock
* @val: if true, api locks the memory, if false it unlocks the memory
*/
void global_atomic_store(uint64_t *lock, bool val);
/**
* hw_fence_utils_init_virq() - Initialilze doorbell (i.e. vIRQ) for SVM to HLOS signaling
* @drv_data: hw fence driver data
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data);
/**
* hw_fence_utils_process_doorbell_mask() - Sends doorbell mask to process the signaled clients
* this API is only exported for simulation purposes.
* @drv_data: hw fence driver data.
* @db_flags: doorbell flag
*/
void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags);
/**
* hw_fence_utils_alloc_mem() - Allocates the carved-out memory pool that will be used for the HW
* Fence global table, locks and queues.
* @hw_fence_drv_data: hw fence driver data
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *hw_fence_drv_data);
/**
* hw_fence_utils_reserve_mem() - Reserves memory from the carved-out memory pool.
* @drv_data: hw fence driver data.
* @type: memory reservation type.
* @phys: physical address of the carved-out memory pool
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data,
enum hw_fence_mem_reserve type, phys_addr_t *phys, void **pa, u32 *size, int client_id);
/**
* hw_fence_utils_parse_dt_props() - Init dt properties
* @drv_data: hw fence driver data
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data);
/**
* hw_fence_utils_map_ipcc() - Maps IPCC registers and enable signaling
* @drv_data: hw fence driver data
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data);
/**
* hw_fence_utils_map_qtime() - Maps qtime register
* @drv_data: hw fence driver data
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data);
/**
* hw_fence_utils_map_ctl_start() - Maps ctl_start registers from dpu hw
* @drv_data: hw fence driver data
*
* Returns zero if success, otherwise returns negative error code. This API is only used
* for simulation purposes in platforms where dpu does not support ipc signal.
*/
int hw_fence_utils_map_ctl_start(struct hw_fence_driver_data *drv_data);
/**
* hw_fence_utils_cleanup_fence() - Cleanup the hw-fence from a specified client
* @drv_data: hw fence driver data
* @hw_fence_client: client, for which the fence must be cleared
* @hw_fence: hw-fence to cleanup
* @hash: hash of the hw-fence to cleanup
* @reset_flags: flags to determine how to handle the reset
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash,
u32 reset_flags);
#endif /* __HW_FENCE_DRV_UTILS_H */

Datei-Diff unterdrückt, da er zu groß ist Diff laden

Datei anzeigen

@@ -0,0 +1,265 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hw_fence_drv_priv.h"
#include "hw_fence_drv_utils.h"
#include "hw_fence_drv_ipc.h"
#include "hw_fence_drv_debug.h"
/**
* struct hw_fence_client_ipc_map - map client id with ipc signal for trigger.
* @ipc_client_id: ipc client id for the hw-fence client.
* @ipc_signal_id: ipc signal id for the hw-fence client.
* @update_rxq: bool to indicate if clinet uses rx-queue.
*/
struct hw_fence_client_ipc_map {
int ipc_client_id;
int ipc_signal_id;
bool update_rxq;
};
/**
* struct hw_fence_clients_ipc_map_no_dpu - Table makes the 'client to signal' mapping, which
* is used by the hw fence driver to trigger ipc signal when the hw fence is already
* signaled.
* This no_dpu version is for targets that do not support dpu client id
*
* Notes:
* The index of this struct must match the enum hw_fence_client_id.
* To change to a loopback signal instead of GMU, change ctx0 row to use:
* {HW_FENCE_IPC_CLIENT_ID_APPS, 20}.
*/
struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_MAX] = {
{HW_FENCE_IPC_CLIENT_ID_APPS, 1, true}, /* ctrl queue loopback */
{HW_FENCE_IPC_CLIENT_ID_GPU, 0, true}, /* ctx0 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 14, false}, /* ctl0 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 15, false}, /* ctl1 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 16, false}, /* ctl2 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 17, false}, /* ctl3 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 18, false}, /* ctl4 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 19, false}, /* ctl5 */
#if IS_ENABLED(CONFIG_DEBUG_FS)
{HW_FENCE_IPC_CLIENT_ID_APPS, 21, true}, /* val0 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 22, true}, /* val1 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 23, true}, /* val2 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 24, true}, /* val3 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 25, true}, /* val4 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 26, true}, /* val5 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 27, true}, /* val6 */
#endif /* CONFIG_DEBUG_FS */
};
/**
* struct hw_fence_clients_ipc_map - Table makes the 'client to signal' mapping, which is
* used by the hw fence driver to trigger ipc signal when hw fence is already
* signaled.
* This version is for targets that support dpu client id.
*
* Note that the index of this struct must match the enum hw_fence_client_id
*/
struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_CLIENT_MAX] = {
{HW_FENCE_IPC_CLIENT_ID_APPS, 1, true}, /* ctrl queue loopback */
{HW_FENCE_IPC_CLIENT_ID_GPU, 0, true}, /* ctx0 */
{HW_FENCE_IPC_CLIENT_ID_DPU, 0, false}, /* ctl0 */
{HW_FENCE_IPC_CLIENT_ID_DPU, 1, false}, /* ctl1 */
{HW_FENCE_IPC_CLIENT_ID_DPU, 2, false}, /* ctl2 */
{HW_FENCE_IPC_CLIENT_ID_DPU, 3, false}, /* ctl3 */
{HW_FENCE_IPC_CLIENT_ID_DPU, 4, false}, /* ctl4 */
{HW_FENCE_IPC_CLIENT_ID_DPU, 5, false}, /* ctl5 */
#if IS_ENABLED(CONFIG_DEBUG_FS)
{HW_FENCE_IPC_CLIENT_ID_APPS, 21, true}, /* val0 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 22, true}, /* val1 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 23, true}, /* val2 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 24, true}, /* val3 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 25, true}, /* val4 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 26, true}, /* val5 */
{HW_FENCE_IPC_CLIENT_ID_APPS, 27, true}, /* val6 */
#endif /* CONFIG_DEBUG_FS */
};
int hw_fence_ipcc_get_client_id(struct hw_fence_driver_data *drv_data, u32 client_id)
{
if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX)
return -EINVAL;
return drv_data->ipc_clients_table[client_id].ipc_client_id;
}
int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id)
{
if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX)
return -EINVAL;
return drv_data->ipc_clients_table[client_id].ipc_signal_id;
}
bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id)
{
if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX)
return -EINVAL;
return drv_data->ipc_clients_table[client_id].update_rxq;
}
/**
* _get_ipc_client_name() - Returns ipc client name, used for debugging.
*/
static inline char *_get_ipc_client_name(u32 client_id)
{
switch (client_id) {
case HW_FENCE_IPC_CLIENT_ID_APPS:
return "APPS";
case HW_FENCE_IPC_CLIENT_ID_GPU:
return "GPU";
case HW_FENCE_IPC_CLIENT_ID_DPU:
return "DPU";
}
return "UNKNOWN";
}
void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data,
u32 tx_client_id, u32 rx_client_id, u32 signal_id)
{
void __iomem *ptr;
u32 val;
/* Send signal */
ptr = IPC_PROTOCOLp_CLIENTc_SEND(drv_data->ipcc_io_mem, drv_data->protocol_id,
tx_client_id);
val = (rx_client_id << 16) | signal_id;
HWFNC_DBG_IRQ("Sending ipcc from %s (%d) to %s (%d) signal_id:%d [wr:0x%x to off:0x%pK]\n",
_get_ipc_client_name(tx_client_id), tx_client_id,
_get_ipc_client_name(rx_client_id), rx_client_id,
signal_id, val, ptr);
HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr);
writel_relaxed(val, ptr);
/* Make sure value is written */
wmb();
}
/**
* _hw_fence_ipcc_hwrev_init() - Initializes internal driver struct with corresponding ipcc data,
* according to the ipcc hw revision.
* @drv_data: driver data.
* @hwrev: ipcc hw revision.
*/
static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 hwrev)
{
switch (hwrev) {
case HW_FENCE_IPCC_HW_REV_100:
drv_data->ipcc_client_id = HW_FENCE_IPC_CLIENT_ID_APPS;
drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA;
drv_data->ipc_clients_table = hw_fence_clients_ipc_map_no_dpu;
HWFNC_DBG_INIT("ipcc protocol_id: Lahaina\n");
break;
case HW_FENCE_IPCC_HW_REV_110:
drv_data->ipcc_client_id = HW_FENCE_IPC_CLIENT_ID_APPS;
drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO;
drv_data->ipc_clients_table = hw_fence_clients_ipc_map_no_dpu;
HWFNC_DBG_INIT("ipcc protocol_id: Waipio\n");
break;
case HW_FENCE_IPCC_HW_REV_170:
drv_data->ipcc_client_id = HW_FENCE_IPC_CLIENT_ID_APPS;
drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KAILUA;
drv_data->ipc_clients_table = hw_fence_clients_ipc_map;
HWFNC_DBG_INIT("ipcc protocol_id: Kailua\n");
break;
default:
return -1;
}
return 0;
}
int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data)
{
void __iomem *ptr;
u32 val;
HWFNC_DBG_H("enable ipc +\n");
/* Read IPC Version from Client=0x8 (apps) for protocol=2 (compute_l1) */
val = readl_relaxed(IPC_PROTOCOLp_CLIENTc_VERSION(drv_data->ipcc_io_mem,
HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA, HW_FENCE_IPC_CLIENT_ID_APPS));
HWFNC_DBG_INIT("ipcc version:0x%x\n", val);
if (_hw_fence_ipcc_hwrev_init(drv_data, val)) {
HWFNC_ERR("ipcc protocol id not supported\n");
return -EINVAL;
}
/* Enable compute l1 (protocol_id = 2) */
val = 0x00000000;
ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, drv_data->protocol_id,
HW_FENCE_IPC_CLIENT_ID_APPS);
HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr);
writel_relaxed(val, ptr);
/* Enable Client-Signal pairs from APPS(NS) (0x8) to APPS(NS) (0x8) */
val = 0x000080000;
ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem, drv_data->protocol_id,
HW_FENCE_IPC_CLIENT_ID_APPS);
HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr);
writel_relaxed(val, ptr);
HWFNC_DBG_H("enable ipc -\n");
return 0;
}
#ifdef HW_DPU_IPCC
int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data)
{
struct hw_fence_client_ipc_map *hw_fence_client;
void __iomem *ptr;
u32 val;
int i;
HWFNC_DBG_H("enable dpu ipc +\n");
if (!drv_data || !drv_data->protocol_id || !drv_data->ipc_clients_table) {
HWFNC_ERR("invalid drv data\n");
return -1;
}
HWFNC_DBG_H("ipcc_io_mem:0x%lx\n", (u64)drv_data->ipcc_io_mem);
/*
* Enable compute l1 (protocol_id = 2) for dpu (25)
* Sets bit(1) to clear when RECV_ID is read
*/
val = 0x00000001;
ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, drv_data->protocol_id,
HW_FENCE_IPC_CLIENT_ID_DPU);
HWFNC_DBG_H("Write:0x%x to RegOffset:0x%lx\n", val, (u64)ptr);
writel_relaxed(val, ptr);
HWFNC_DBG_H("Initialize dpu signals\n");
/* Enable Client-Signal pairs from DPU (25) to APPS(NS) (8) */
for (i = 0; i < HW_FENCE_CLIENT_MAX; i++) {
hw_fence_client = &drv_data->ipc_clients_table[i];
/* skip any client that is not a dpu client */
if (hw_fence_client->ipc_client_id != HW_FENCE_IPC_CLIENT_ID_DPU)
continue;
/* Enable signals for dpu client */
HWFNC_DBG_H("dpu:%d client:%d signal:%d\n", hw_fence_client->ipc_client_id, i,
hw_fence_client->ipc_signal_id);
val = 0x000080000 | (hw_fence_client->ipc_signal_id & 0xFFFF);
ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem,
drv_data->protocol_id, HW_FENCE_IPC_CLIENT_ID_DPU);
HWFNC_DBG_H("Write:0x%x to RegOffset:0x%lx\n", val, (u64)ptr);
writel_relaxed(val, ptr);
}
HWFNC_DBG_H("enable dpu ipc -\n");
return 0;
}
#endif /* HW_DPU_IPCC */

Datei-Diff unterdrückt, da er zu groß ist Diff laden

Datei anzeigen

@@ -0,0 +1,657 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/io.h>
#include <linux/gunyah/gh_rm_drv.h>
#include <linux/gunyah/gh_dbl.h>
#include <soc/qcom/secure_buffer.h>
#include "hw_fence_drv_priv.h"
#include "hw_fence_drv_utils.h"
#include "hw_fence_drv_ipc.h"
#include "hw_fence_drv_debug.h"
static void _lock(uint64_t *wait)
{
/* WFE Wait */
#if defined(__aarch64__)
__asm__("SEVL\n\t"
"PRFM PSTL1KEEP, [%x[i_lock]]\n\t"
"1:\n\t"
"WFE\n\t"
"LDAXR W5, [%x[i_lock]]\n\t"
"CBNZ W5, 1b\n\t"
"STXR W5, W0, [%x[i_lock]]\n\t"
"CBNZ W5, 1b\n"
:
: [i_lock] "r" (wait)
: "memory");
#endif
}
static void _unlock(uint64_t *lock)
{
/* Signal Client */
#if defined(__aarch64__)
__asm__("STLR WZR, [%x[i_out]]\n\t"
"SEV\n"
:
: [i_out] "r" (lock)
: "memory");
#endif
}
void global_atomic_store(uint64_t *lock, bool val)
{
if (val)
_lock(lock);
else
_unlock(lock);
}
/*
* Each bit in this mask represents each of the loopback clients supported in
* the enum hw_fence_loopback_id
*/
#define HW_FENCE_LOOPBACK_CLIENTS_MASK 0x7f
static inline int _process_dpu_client_loopback(struct hw_fence_driver_data *drv_data,
int client_id)
{
int ctl_id = client_id; /* dpu ctl path id is mapped to client id used for the loopback */
void *ctl_start_reg;
u32 val;
if (ctl_id > HW_FENCE_LOOPBACK_DPU_CTL_5) {
HWFNC_ERR("invalid ctl_id:%d\n", ctl_id);
return -EINVAL;
}
ctl_start_reg = drv_data->ctl_start_ptr[ctl_id];
if (!ctl_start_reg) {
HWFNC_ERR("ctl_start reg not valid for ctl_id:%d\n", ctl_id);
return -EINVAL;
}
HWFNC_DBG_H("Processing DPU loopback ctl_id:%d\n", ctl_id);
val = 0x1; /* ctl_start trigger */
#ifdef CTL_START_SIM
HWFNC_DBG_IRQ("ctl_id:%d Write: to RegOffset:0x%pK val:0x%x\n", ctl_start_reg, val, ctl_id);
writel_relaxed(val, ctl_start_reg);
#else
HWFNC_DBG_IRQ("ctl_id:%d Write: to RegOffset:0x%pK val:0x%x (COMMENTED)\n", ctl_id,
ctl_start_reg, val);
#endif
return 0;
}
static inline int _process_gfx_client_loopback(struct hw_fence_driver_data *drv_data,
int client_id)
{
int queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */
struct msm_hw_fence_queue_payload payload;
int read = 1;
HWFNC_DBG_IRQ("Processing GFX loopback client_id:%d\n", client_id);
while (read) {
/*
* 'client_id' is the loopback-client-id, not the hw-fence client_id,
* so use GFX hw-fence client id, to get the client data
*/
read = hw_fence_read_queue(drv_data->clients[HW_FENCE_CLIENT_ID_CTX0], &payload,
queue_type);
if (read < 0) {
HWFNC_ERR("unable to read gfx rxq\n");
break;
}
HWFNC_DBG_L("GFX loopback rxq read: hash:%llu ctx:%llu seq:%llu f:%llu e:%lu\n",
payload.hash, payload.ctxt_id, payload.seqno, payload.flags, payload.error);
}
return read;
}
static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int client_id)
{
int ret;
HWFNC_DBG_H("Processing loopback client_id:%d\n", client_id);
switch (client_id) {
case HW_FENCE_LOOPBACK_DPU_CTL_0:
case HW_FENCE_LOOPBACK_DPU_CTL_1:
case HW_FENCE_LOOPBACK_DPU_CTL_2:
case HW_FENCE_LOOPBACK_DPU_CTL_3:
case HW_FENCE_LOOPBACK_DPU_CTL_4:
case HW_FENCE_LOOPBACK_DPU_CTL_5:
ret = _process_dpu_client_loopback(drv_data, client_id);
break;
case HW_FENCE_LOOPBACK_GFX_CTX_0:
ret = _process_gfx_client_loopback(drv_data, client_id);
break;
#if IS_ENABLED(CONFIG_DEBUG_FS)
case HW_FENCE_LOOPBACK_VAL_0:
case HW_FENCE_LOOPBACK_VAL_1:
case HW_FENCE_LOOPBACK_VAL_2:
case HW_FENCE_LOOPBACK_VAL_3:
case HW_FENCE_LOOPBACK_VAL_4:
case HW_FENCE_LOOPBACK_VAL_5:
case HW_FENCE_LOOPBACK_VAL_6:
ret = process_validation_client_loopback(drv_data, client_id);
break;
#endif /* CONFIG_DEBUG_FS */
default:
HWFNC_ERR("unknown client:%d\n", client_id);
ret = -EINVAL;
}
return ret;
}
void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags)
{
int client_id = HW_FENCE_LOOPBACK_DPU_CTL_0;
u64 mask;
for (; client_id < HW_FENCE_LOOPBACK_MAX; client_id++) {
mask = 1 << client_id;
if (mask & db_flags) {
HWFNC_DBG_H("client_id:%d signaled! flags:0x%llx\n", client_id, db_flags);
/* process client */
if (_process_doorbell_client(drv_data, client_id))
HWFNC_ERR("Failed to process client:%d\n", client_id);
/* clear mask for this client and if nothing else pending finish */
db_flags = db_flags & ~(mask);
HWFNC_DBG_H("client_id:%d cleared flags:0x%llx mask:0x%llx ~mask:0x%llx\n",
client_id, db_flags, mask, ~(mask));
if (!db_flags)
break;
}
}
}
/* doorbell callback */
static void _hw_fence_cb(int irq, void *data)
{
struct hw_fence_driver_data *drv_data = (struct hw_fence_driver_data *)data;
gh_dbl_flags_t clear_flags = HW_FENCE_LOOPBACK_CLIENTS_MASK;
int ret;
if (!drv_data)
return;
ret = gh_dbl_read_and_clean(drv_data->rx_dbl, &clear_flags, 0);
if (ret) {
HWFNC_ERR("hw_fence db callback, retrieve flags fail ret:%d\n", ret);
return;
}
HWFNC_DBG_IRQ("db callback label:%d irq:%d flags:0x%llx qtime:%llu\n", drv_data->db_label,
irq, clear_flags, hw_fence_get_qtime(drv_data));
hw_fence_utils_process_doorbell_mask(drv_data, clear_flags);
}
int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data)
{
struct device_node *node = drv_data->dev->of_node;
struct device_node *node_compat;
const char *compat = "qcom,msm-hw-fence-db";
int ret;
node_compat = of_find_compatible_node(node, NULL, compat);
if (!node_compat) {
HWFNC_ERR("Failed to find dev node with compat:%s\n", compat);
return -EINVAL;
}
ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->db_label);
if (ret) {
HWFNC_ERR("failed to find label info %d\n", ret);
return ret;
}
HWFNC_DBG_IRQ("registering doorbell db_label:%d\n", drv_data->db_label);
drv_data->rx_dbl = gh_dbl_rx_register(drv_data->db_label, _hw_fence_cb, drv_data);
if (IS_ERR_OR_NULL(drv_data->rx_dbl)) {
ret = PTR_ERR(drv_data->rx_dbl);
HWFNC_ERR("Failed to register doorbell\n");
return ret;
}
return 0;
}
static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data,
gh_vmid_t self, gh_vmid_t peer)
{
u32 src_vmlist[1] = {self};
int src_perms[2] = {PERM_READ | PERM_WRITE | PERM_EXEC};
int dst_vmlist[2] = {self, peer};
int dst_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
struct gh_acl_desc *acl;
struct gh_sgl_desc *sgl;
int ret;
ret = hyp_assign_phys(drv_data->res.start, resource_size(&drv_data->res),
src_vmlist, 1, dst_vmlist, dst_perms, 2);
if (ret) {
HWFNC_ERR("%s: hyp_assign_phys failed addr=%x size=%u err=%d\n",
__func__, drv_data->res.start, drv_data->size, ret);
return ret;
}
acl = kzalloc(offsetof(struct gh_acl_desc, acl_entries[2]), GFP_KERNEL);
if (!acl)
return -ENOMEM;
sgl = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]), GFP_KERNEL);
if (!sgl) {
kfree(acl);
return -ENOMEM;
}
acl->n_acl_entries = 2;
acl->acl_entries[0].vmid = (u16)self;
acl->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W;
acl->acl_entries[1].vmid = (u16)peer;
acl->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W;
sgl->n_sgl_entries = 1;
sgl->sgl_entries[0].ipa_base = drv_data->res.start;
sgl->sgl_entries[0].size = resource_size(&drv_data->res);
ret = gh_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, drv_data->label,
acl, sgl, NULL, &drv_data->memparcel);
if (ret) {
HWFNC_ERR("%s: gh_rm_mem_share failed addr=%x size=%u err=%d\n",
__func__, drv_data->res.start, drv_data->size, ret);
/* Attempt to give resource back to HLOS */
hyp_assign_phys(drv_data->res.start, resource_size(&drv_data->res),
dst_vmlist, 2,
src_vmlist, src_perms, 1);
ret = -EPROBE_DEFER;
}
kfree(acl);
kfree(sgl);
return ret;
}
static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *data)
{
struct gh_rm_notif_vm_status_payload *vm_status_payload;
struct hw_fence_driver_data *drv_data;
gh_vmid_t peer_vmid;
gh_vmid_t self_vmid;
drv_data = container_of(nb, struct hw_fence_driver_data, rm_nb);
HWFNC_DBG_INIT("cmd:0x%lx ++\n", cmd);
if (cmd != GH_RM_NOTIF_VM_STATUS)
goto end;
vm_status_payload = data;
HWFNC_DBG_INIT("payload vm_status:%d\n", vm_status_payload->vm_status);
if (vm_status_payload->vm_status != GH_RM_VM_STATUS_READY &&
vm_status_payload->vm_status != GH_RM_VM_STATUS_RESET)
goto end;
if (gh_rm_get_vmid(drv_data->peer_name, &peer_vmid))
goto end;
if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
goto end;
if (peer_vmid != vm_status_payload->vmid)
goto end;
switch (vm_status_payload->vm_status) {
case GH_RM_VM_STATUS_READY:
HWFNC_DBG_INIT("init mem\n");
if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid))
HWFNC_ERR("failed to share memory\n");
else
drv_data->vm_ready = true;
break;
case GH_RM_VM_STATUS_RESET:
HWFNC_DBG_INIT("reset\n");
break;
}
end:
return NOTIFY_DONE;
}
/* Allocates carved-out mapped memory */
int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data)
{
struct device_node *node = drv_data->dev->of_node;
struct device_node *node_compat;
const char *compat = "qcom,msm-hw-fence-mem";
struct device *dev = drv_data->dev;
struct device_node *np;
int notifier_ret, ret;
node_compat = of_find_compatible_node(node, NULL, compat);
if (!node_compat) {
HWFNC_ERR("Failed to find dev node with compat:%s\n", compat);
return -EINVAL;
}
ret = of_property_read_u32(node_compat, "gunyah-label", &drv_data->label);
if (ret) {
HWFNC_ERR("failed to find label info %d\n", ret);
return ret;
}
np = of_parse_phandle(node_compat, "shared-buffer", 0);
if (!np) {
HWFNC_ERR("failed to read shared-buffer info\n");
return -ENOMEM;
}
ret = of_address_to_resource(np, 0, &drv_data->res);
of_node_put(np);
if (ret) {
HWFNC_ERR("of_address_to_resource failed %d\n", ret);
return -EINVAL;
}
drv_data->io_mem_base = devm_ioremap(dev, drv_data->res.start,
resource_size(&drv_data->res));
if (!drv_data->io_mem_base) {
HWFNC_ERR("ioremap failed!\n");
return -ENXIO;
}
drv_data->size = resource_size(&drv_data->res);
HWFNC_DBG_INIT("io_mem_base:0x%x start:0x%x end:0x%x size:0x%x name:%s\n",
drv_data->io_mem_base, drv_data->res.start,
drv_data->res.end, drv_data->size, drv_data->res.name);
memset_io(drv_data->io_mem_base, 0x0, drv_data->size);
/* Register memory with HYP */
ret = of_property_read_u32(node_compat, "peer-name", &drv_data->peer_name);
if (ret)
drv_data->peer_name = GH_SELF_VM;
drv_data->rm_nb.notifier_call = hw_fence_rm_cb;
drv_data->rm_nb.priority = INT_MAX;
notifier_ret = gh_rm_register_notifier(&drv_data->rm_nb);
HWFNC_DBG_INIT("notifier: ret:%d peer_name:%d notifier_ret:%d\n", ret,
drv_data->peer_name, notifier_ret);
if (notifier_ret) {
HWFNC_ERR("fail to register notifier ret:%d\n", notifier_ret);
return -EPROBE_DEFER;
}
return 0;
}
char *_get_mem_reserve_type(enum hw_fence_mem_reserve type)
{
switch (type) {
case HW_FENCE_MEM_RESERVE_CTRL_QUEUE:
return "HW_FENCE_MEM_RESERVE_CTRL_QUEUE";
case HW_FENCE_MEM_RESERVE_LOCKS_REGION:
return "HW_FENCE_MEM_RESERVE_LOCKS_REGION";
case HW_FENCE_MEM_RESERVE_TABLE:
return "HW_FENCE_MEM_RESERVE_TABLE";
case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
return "HW_FENCE_MEM_RESERVE_CLIENT_QUEUE";
}
return "Unknown";
}
/* Calculates the memory range for each of the elements in the carved-out memory */
int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data,
enum hw_fence_mem_reserve type, phys_addr_t *phys, void **pa, u32 *size, int client_id)
{
int ret = 0;
u32 start_offset = 0;
switch (type) {
case HW_FENCE_MEM_RESERVE_CTRL_QUEUE:
start_offset = 0;
*size = drv_data->hw_fence_mem_ctrl_queues_size;
break;
case HW_FENCE_MEM_RESERVE_LOCKS_REGION:
/* Locks region starts at the end of the ctrl queues */
start_offset = drv_data->hw_fence_mem_ctrl_queues_size;
*size = HW_FENCE_MEM_LOCKS_SIZE;
break;
case HW_FENCE_MEM_RESERVE_TABLE:
/* HW Fence table starts at the end of the Locks region */
start_offset = drv_data->hw_fence_mem_ctrl_queues_size + HW_FENCE_MEM_LOCKS_SIZE;
*size = drv_data->hw_fence_mem_fences_table_size;
break;
case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
if (client_id >= HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("unexpected client_id:%d\n", client_id);
ret = -EINVAL;
goto exit;
}
start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size +
HW_FENCE_MEM_LOCKS_SIZE +
drv_data->hw_fence_mem_fences_table_size) +
((client_id - 1) * drv_data->hw_fence_mem_clients_queues_size);
*size = drv_data->hw_fence_mem_clients_queues_size;
break;
default:
HWFNC_ERR("Invalid mem reserve type:%d\n", type);
ret = -EINVAL;
break;
}
if (start_offset + *size > drv_data->size) {
HWFNC_ERR("reservation request:%lu exceeds total size:%d\n",
start_offset + *size, drv_data->size);
return -ENOMEM;
}
HWFNC_DBG_INIT("type:%s (%d) io_mem_base:0x%x start:0x%x start_offset:%lu size:0x%x\n",
_get_mem_reserve_type(type), type, drv_data->io_mem_base, drv_data->res.start,
start_offset, *size);
*phys = drv_data->res.start + (phys_addr_t)start_offset;
*pa = (drv_data->io_mem_base + start_offset); /* offset is in bytes */
HWFNC_DBG_H("phys:0x%x pa:0x%pK\n", *phys, *pa);
exit:
return ret;
}
int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data)
{
int ret;
u32 val = 0;
ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-table-entries", &val);
if (ret || !val) {
HWFNC_ERR("missing hw fences table entry or invalid ret:%d val:%d\n", ret, val);
return ret;
}
drv_data->hw_fence_table_entries = val;
if (drv_data->hw_fence_table_entries >= U32_MAX / sizeof(struct msm_hw_fence)) {
HWFNC_ERR("table entries:%lu will overflow table size\n",
drv_data->hw_fence_table_entries);
return -EINVAL;
}
drv_data->hw_fence_mem_fences_table_size = (sizeof(struct msm_hw_fence) *
drv_data->hw_fence_table_entries);
ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-queue-entries", &val);
if (ret || !val) {
HWFNC_ERR("missing queue entries table entry or invalid ret:%d val:%d\n", ret, val);
return ret;
}
drv_data->hw_fence_queue_entries = val;
/* ctrl queues init */
if (drv_data->hw_fence_queue_entries >= U32_MAX / HW_FENCE_CTRL_QUEUE_PAYLOAD) {
HWFNC_ERR("queue entries:%lu will overflow ctrl queue size\n",
drv_data->hw_fence_queue_entries);
return -EINVAL;
}
drv_data->hw_fence_ctrl_queue_size = HW_FENCE_CTRL_QUEUE_PAYLOAD *
drv_data->hw_fence_queue_entries;
if (drv_data->hw_fence_ctrl_queue_size >= (U32_MAX - HW_FENCE_HFI_CTRL_HEADERS_SIZE) /
HW_FENCE_CTRL_QUEUES) {
HWFNC_ERR("queue size:%lu will overflow ctrl queue mem size\n",
drv_data->hw_fence_ctrl_queue_size);
return -EINVAL;
}
drv_data->hw_fence_mem_ctrl_queues_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE +
(HW_FENCE_CTRL_QUEUES * drv_data->hw_fence_ctrl_queue_size);
/* clients queues init */
if (drv_data->hw_fence_queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) {
HWFNC_ERR("queue entries:%lu will overflow client queue size\n",
drv_data->hw_fence_queue_entries);
return -EINVAL;
}
drv_data->hw_fence_client_queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD *
drv_data->hw_fence_queue_entries;
if (drv_data->hw_fence_client_queue_size >= ((U32_MAX & PAGE_MASK) -
HW_FENCE_HFI_CLIENT_HEADERS_SIZE) / HW_FENCE_CLIENT_QUEUES) {
HWFNC_ERR("queue size:%lu will overflow client queue mem size\n",
drv_data->hw_fence_client_queue_size);
return -EINVAL;
}
drv_data->hw_fence_mem_clients_queues_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE +
(HW_FENCE_CLIENT_QUEUES * drv_data->hw_fence_client_queue_size));
HWFNC_DBG_INIT("table: entries=%lu mem_size=%lu queue: entries=%lu\b",
drv_data->hw_fence_table_entries, drv_data->hw_fence_mem_fences_table_size,
drv_data->hw_fence_queue_entries);
HWFNC_DBG_INIT("ctrl queue: size=%lu mem_size=%lu clients queues: size=%lu mem_size=%lu\b",
drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size,
drv_data->hw_fence_client_queue_size, drv_data->hw_fence_mem_clients_queues_size);
return 0;
}
int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data)
{
int ret;
u32 reg_config[2];
void __iomem *ptr;
/* Get ipcc memory range */
ret = of_property_read_u32_array(drv_data->dev->of_node, "qcom,ipcc-reg",
reg_config, 2);
if (ret) {
HWFNC_ERR("failed to read ipcc reg: %d\n", ret);
return ret;
}
drv_data->ipcc_reg_base = reg_config[0];
drv_data->ipcc_size = reg_config[1];
/* Mmap ipcc registers */
ptr = devm_ioremap(drv_data->dev, drv_data->ipcc_reg_base, drv_data->ipcc_size);
if (!ptr) {
HWFNC_ERR("failed to ioremap ipcc regs\n");
return -ENOMEM;
}
drv_data->ipcc_io_mem = ptr;
HWFNC_DBG_H("mapped address:0x%x size:0x%x io_mem:0x%pK\n",
drv_data->ipcc_reg_base, drv_data->ipcc_size,
drv_data->ipcc_io_mem);
hw_fence_ipcc_enable_signaling(drv_data);
return ret;
}
int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data)
{
int ret = 0;
unsigned int reg_config[2];
void __iomem *ptr;
ret = of_property_read_u32_array(drv_data->dev->of_node, "qcom,qtime-reg",
reg_config, 2);
if (ret) {
HWFNC_ERR("failed to read qtimer reg: %d\n", ret);
return ret;
}
drv_data->qtime_reg_base = reg_config[0];
drv_data->qtime_size = reg_config[1];
ptr = devm_ioremap(drv_data->dev, drv_data->qtime_reg_base, drv_data->qtime_size);
if (!ptr) {
HWFNC_ERR("failed to ioremap qtime regs\n");
return -ENOMEM;
}
drv_data->qtime_io_mem = ptr;
return ret;
}
static int _map_ctl_start(struct hw_fence_driver_data *drv_data, u32 ctl_id,
void **iomem_ptr, uint32_t *iomem_size)
{
u32 reg_config[2];
void __iomem *ptr;
char name[30] = {0};
int ret;
snprintf(name, sizeof(name), "qcom,dpu-ctl-start-%d-reg", ctl_id);
ret = of_property_read_u32_array(drv_data->dev->of_node, name, reg_config, 2);
if (ret)
return 0; /* this is an optional property */
/* Mmap registers */
ptr = devm_ioremap(drv_data->dev, reg_config[0], reg_config[1]);
if (!ptr) {
HWFNC_ERR("failed to ioremap %s reg\n", name);
return -ENOMEM;
}
*iomem_ptr = ptr;
*iomem_size = reg_config[1];
HWFNC_DBG_INIT("mapped ctl_start ctl_id:%d name:%s address:0x%x size:0x%x io_mem:0x%pK\n",
ctl_id, name, reg_config[0], reg_config[1], ptr);
return 0;
}
int hw_fence_utils_map_ctl_start(struct hw_fence_driver_data *drv_data)
{
u32 ctl_id = HW_FENCE_LOOPBACK_DPU_CTL_0;
for (; ctl_id <= HW_FENCE_LOOPBACK_DPU_CTL_5; ctl_id++) {
if (_map_ctl_start(drv_data, ctl_id, &drv_data->ctl_start_ptr[ctl_id],
&drv_data->ctl_start_size[ctl_id])) {
HWFNC_ERR("cannot map ctl_start ctl_id:%d\n", ctl_id);
} else {
if (drv_data->ctl_start_ptr[ctl_id])
HWFNC_DBG_INIT("mapped ctl_id:%d ctl_start_ptr:0x%pK size:%u\n",
ctl_id, drv_data->ctl_start_ptr[ctl_id],
drv_data->ctl_start_size[ctl_id]);
}
}
return 0;
}

711
hw_fence/src/hw_fence_ioctl.c Normale Datei
Datei anzeigen

@@ -0,0 +1,711 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/types.h>
#include <linux/sync_file.h>
#include "hw_fence_drv_priv.h"
#include "hw_fence_drv_utils.h"
#include "hw_fence_drv_ipc.h"
#include "hw_fence_drv_debug.h"
#define HW_SYNC_IOCTL_COUNT ARRAY_SIZE(hw_sync_debugfs_ioctls)
#define HW_FENCE_ARRAY_SIZE 10
#define HW_SYNC_IOC_MAGIC 'W'
#define HW_SYNC_IOC_REG_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 10, unsigned long)
#define HW_SYNC_IOC_UNREG_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 11, unsigned long)
#define HW_SYNC_IOC_CREATE_FENCE _IOWR(HW_SYNC_IOC_MAGIC, 12,\
struct hw_fence_sync_create_data)
#define HW_SYNC_IOC_DESTROY_FENCE _IOWR(HW_SYNC_IOC_MAGIC, 13,\
struct hw_fence_sync_create_data)
#define HW_SYNC_IOC_CREATE_FENCE_ARRAY _IOWR(HW_SYNC_IOC_MAGIC, 14,\
struct hw_fence_array_sync_create_data)
#define HW_SYNC_IOC_DESTROY_FENCE_ARRAY _IOWR(HW_SYNC_IOC_MAGIC, 15,\
struct hw_fence_array_sync_create_data)
#define HW_SYNC_IOC_REG_FOR_WAIT _IOWR(HW_SYNC_IOC_MAGIC, 16, int)
#define HW_SYNC_IOC_FENCE_SIGNAL _IOWR(HW_SYNC_IOC_MAGIC, 17, unsigned long)
#define HW_SYNC_IOC_FENCE_WAIT _IOWR(HW_SYNC_IOC_MAGIC, 18, int)
#define HW_SYNC_IOC_RESET_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 19, unsigned long)
#define HW_FENCE_IOCTL_NR(n) (_IOC_NR(n) - 2)
#define HW_IOCTL_DEF(ioctl, _func) \
[HW_FENCE_IOCTL_NR(ioctl)] = { \
.cmd = ioctl, \
.func = _func, \
.name = #ioctl \
}
/**
* struct hw_sync_obj - per client hw sync object.
* @context: context id used to create fences.
* @client_id: to uniquely represent client.
* @client_handle: Pointer to the structure holding the resources
* allocated to the client.
* @mem_descriptor: Memory descriptor of the queue allocated by the
* hardware fence driver for each client during register.
*/
struct hw_sync_obj {
u64 context;
int client_id;
void *client_handle;
struct msm_hw_fence_mem_addr mem_descriptor;
};
/**
* struct hw_fence_sync_create_data - data used in creating fences.
* @seqno: sequence number.
* @incr_context: if set, then the context would be incremented.
* @fence: returns the fd of the new sync_file with the created fence.
* @hash: fence hash
*/
struct hw_fence_sync_create_data {
u64 seqno;
bool incr_context;
__s32 fence;
u64 hash;
};
/**
* struct hw_fence_array_sync_create_data - data used in creating multiple fences.
* @seqno: array of sequence numbers used to create fences.
* @num_fences: number of fences to be created.
* @fence: return the fd of the new sync_file with the created fence.
* @hash: array of fence hash
*/
struct hw_fence_array_sync_create_data {
u64 seqno[HW_FENCE_ARRAY_SIZE];
int num_fences;
__s32 fence;
u64 hash[HW_FENCE_ARRAY_SIZE];
};
/**
* struct hw_fence_sync_signal_data - data used to signal fences.
* @hash: hash of the fence.
* @error_flag: error flag
*/
struct hw_fence_sync_signal_data {
u64 hash;
u32 error_flag;
};
/**
* struct hw_fence_sync_wait_data - data used to wait on fences.
* @fence: fence fd.
* @timeout_ms: fence wait time out.
*/
struct hw_fence_sync_wait_data {
__s32 fence;
u64 timeout_ms;
};
/**
* struct hw_fence_sync_reset_data - data used to reset client.
* @client_id: client id.
* @reset_flag: reset flag
*/
struct hw_fence_sync_reset_data {
int client_id;
u32 reset_flag;
};
typedef long hw_fence_ioctl_t(struct hw_sync_obj *obj, unsigned long arg);
/**
* struct hw_sync_ioctl_def - hw_sync driver ioctl entry
* @cmd: ioctl command number, without flags
* @func: handler for this ioctl
* @name: user-readable name for debug output
*/
struct hw_sync_ioctl_def {
unsigned int cmd;
hw_fence_ioctl_t *func;
const char *name;
};
static bool _is_valid_client(struct hw_sync_obj *obj)
{
if (!obj)
return false;
if (obj->client_id < HW_FENCE_CLIENT_ID_VAL0 || obj->client_id >= HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", obj->client_id,
HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_MAX);
return false;
}
return true;
}
static int _get_client_id(struct hw_sync_obj *obj, unsigned long arg)
{
int client_id;
if (copy_from_user(&client_id, (void __user *)arg, sizeof(client_id)))
return -EFAULT;
if (!obj)
return -EINVAL;
if (client_id < HW_FENCE_CLIENT_ID_VAL0 || client_id >= HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", client_id,
HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_MAX);
return -EINVAL;
}
return client_id;
}
static void *_hw_sync_get_fence(int fd)
{
return fd >= 0 ? sync_file_get_fence(fd) : NULL;
}
static int hw_sync_debugfs_open(struct inode *inode, struct file *file)
{
struct hw_sync_obj *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return -ENOMEM;
obj->context = dma_fence_context_alloc(1);
file->private_data = obj;
return 0;
}
static int hw_sync_debugfs_release(struct inode *inode, struct file *file)
{
struct hw_sync_obj *obj = file->private_data;
if (!obj)
return -EINVAL;
kfree(obj);
return 0;
}
static long hw_sync_ioctl_reg_client(struct hw_sync_obj *obj, unsigned long arg)
{
int client_id = _get_client_id(obj, arg);
if (IS_ERR(&client_id)) {
return client_id;
} else if (obj->client_handle) {
HWFNC_ERR("client:%d already registered as validation client\n", client_id);
return -EINVAL;
}
obj->client_id = client_id;
obj->client_handle = msm_hw_fence_register(obj->client_id, &obj->mem_descriptor);
if (IS_ERR_OR_NULL(obj->client_handle))
return -EINVAL;
return 0;
}
static long hw_sync_ioctl_unreg_client(struct hw_sync_obj *obj, unsigned long arg)
{
int client_id = _get_client_id(obj, arg);
if (IS_ERR(&client_id))
return client_id;
return msm_hw_fence_deregister(obj->client_handle);
}
static long hw_sync_ioctl_create_fence(struct hw_sync_obj *obj, unsigned long arg)
{
struct msm_hw_fence_create_params params;
struct hw_fence_sync_create_data data;
struct hw_dma_fence *fence;
spinlock_t *fence_lock;
u64 hash;
struct sync_file *sync_file;
int fd, ret;
if (!_is_valid_client(obj)) {
return -EINVAL;
} else if (IS_ERR_OR_NULL(obj->client_handle)) {
HWFNC_ERR("client:%d is not register as validation client\n", obj->client_id);
return -EINVAL;
}
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
/* create dma fence */
fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL);
if (!fence_lock)
return -ENOMEM;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence) {
kfree(fence_lock);
return -ENOMEM;
}
snprintf(fence->name, HW_FENCE_NAME_SIZE, "hwfence:id:%d:ctx=%lu:seqno:%lu",
obj->client_id, obj->context, data.seqno);
spin_lock_init(fence_lock);
dma_fence_init(&fence->base, &hw_fence_dbg_ops, fence_lock, obj->context, data.seqno);
HWFNC_DBG_H("creating hw_fence for client:%d ctx:%llu seqno:%llu\n", obj->client_id,
obj->context, data.seqno);
params.fence = &fence->base;
params.handle = &hash;
/* create hw fence */
ret = msm_hw_fence_create(obj->client_handle, &params);
if (ret) {
HWFNC_ERR("failed to create hw_fence for client:%d ctx:%llu seqno:%llu\n",
obj->client_id, obj->context, data.seqno);
dma_fence_put(&fence->base);
return -EINVAL;
}
/* keep handle in dma_fence, to destroy hw-fence during release */
fence->client_handle = obj->client_handle;
if (data.incr_context)
obj->context = dma_fence_context_alloc(1);
/* create fd */
fd = get_unused_fd_flags(0);
if (fd < 0) {
HWFNC_ERR("failed to get fd for client:%d\n", obj->client_id);
dma_fence_put(&fence->base);
return fd;
}
sync_file = sync_file_create(&fence->base);
if (sync_file == NULL) {
HWFNC_ERR("couldn't create fence fd, %d\n", fd);
dma_fence_put(&fence->base);
ret = -EINVAL;
goto exit;
}
/* Decrement the refcount that sync_file_create increments */
dma_fence_put(&fence->base);
data.fence = fd;
data.hash = hash;
if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
dma_fence_put(&fence->base);
fput(sync_file->file);
ret = -EFAULT;
goto exit;
}
fd_install(fd, sync_file->file);
return 0;
exit:
put_unused_fd(fd);
return ret;
}
static long hw_sync_ioctl_destroy_fence(struct hw_sync_obj *obj, unsigned long arg)
{
int fd;
struct hw_dma_fence *fence;
struct hw_fence_sync_create_data data;
if (!_is_valid_client(obj))
return -EINVAL;
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
fd = data.fence;
fence = (struct hw_dma_fence *)_hw_sync_get_fence(fd);
if (!fence) {
HWFNC_ERR("fence for fd:%d not found\n", fd);
return -EINVAL;
}
/* Decrement the refcount that hw_sync_get_fence increments */
dma_fence_put(&fence->base);
/* To destroy fence */
dma_fence_put(&fence->base);
return 0;
}
static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned long arg)
{
struct dma_fence_array *fence_array;
struct hw_fence_array_sync_create_data data;
struct dma_fence **fences = NULL;
struct msm_hw_fence_create_params params;
struct sync_file *sync_file;
spinlock_t **fence_lock = NULL;
int num_fences, i, fd, ret;
u64 hash;
if (!_is_valid_client(obj)) {
return -EINVAL;
} else if (IS_ERR_OR_NULL(obj->client_handle)) {
HWFNC_ERR("client:%d is not register as validation client\n", obj->client_id);
return -EINVAL;
}
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
num_fences = data.num_fences;
if (num_fences >= HW_FENCE_ARRAY_SIZE) {
HWFNC_ERR("Number of fences: %d is greater than allowed size: %d\n",
num_fences, HW_FENCE_ARRAY_SIZE);
return -EINVAL;
}
fence_lock = kcalloc(num_fences, sizeof(*fence_lock), GFP_KERNEL);
if (!fence_lock)
return -ENOMEM;
fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
if (!fences) {
kfree(fence_lock);
return -ENOMEM;
}
/*
* Create the array of dma fences
* This API takes seqno[num_fences] as the seqno for the fence-array
* and from 0 to (num_fences - 1) for the fences in the array.
*/
for (i = 0; i < num_fences; i++) {
struct hw_dma_fence *dma_fence;
fence_lock[i] = kzalloc(sizeof(spinlock_t), GFP_KERNEL);
if (!fence_lock[i]) {
_cleanup_fences(i, fences, fence_lock);
return -ENOMEM;
}
dma_fence = kzalloc(sizeof(*dma_fence), GFP_KERNEL);
if (!dma_fence) {
_cleanup_fences(i, fences, fence_lock);
return -ENOMEM;
}
fences[i] = &dma_fence->base;
spin_lock_init(fence_lock[i]);
dma_fence_init(fences[i], &hw_fence_dbg_ops, fence_lock[i],
obj->context, data.seqno[i]);
}
/* create the fence array from array of dma fences */
fence_array = dma_fence_array_create(num_fences, fences, obj->context, data.seqno[i], 0);
if (!fence_array) {
HWFNC_ERR("Error creating fence_array\n");
_cleanup_fences(num_fences - 1, fences, fence_lock);
return -EINVAL;
}
/* create hw fences */
for (i = 0; i < num_fences; i++) {
params.fence = fences[i];
params.handle = &hash;
ret = msm_hw_fence_create(obj->client_handle, &params);
if (ret) {
HWFNC_ERR("Error creating HW fence\n");
dma_fence_put(&fence_array->base);
/*
* free array of pointers, no need to call kfree in 'fences',
* since that is released from the fence-array release api
*/
kfree(fence_lock);
kfree(fence_array);
return -EINVAL;
}
/* keep handle in dma_fence, to destroy hw-fence during release */
to_hw_dma_fence(fences[i])->client_handle = obj->client_handle;
data.hash[i] = hash;
}
/* create fd */
fd = get_unused_fd_flags(0);
if (fd < 0) {
HWFNC_ERR("failed to get fd for client:%d\n", obj->client_id);
dma_fence_put(&fence_array->base);
kfree(fence_lock);
kfree(fence_array);
return fd;
}
sync_file = sync_file_create(&fence_array->base);
if (sync_file == NULL) {
HWFNC_ERR("couldn't create fence fd, %d\n", fd);
dma_fence_put(&fence_array->base);
kfree(fence_lock);
kfree(fence_array);
ret = -EINVAL;
goto exit;
}
/* Decrement the refcount that sync_file_create increments */
dma_fence_put(&fence_array->base);
data.fence = fd;
if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
fput(sync_file->file);
dma_fence_put(&fence_array->base);
kfree(fence_lock);
kfree(fence_array);
ret = -EFAULT;
goto exit;
}
fd_install(fd, sync_file->file);
return 0;
exit:
put_unused_fd(fd);
return ret;
}
static long hw_sync_ioctl_destroy_fence_array(struct hw_sync_obj *obj, unsigned long arg)
{
struct dma_fence_array *fence_array;
struct dma_fence *fence;
struct hw_fence_array_sync_create_data data;
int fd;
if (!_is_valid_client(obj))
return -EINVAL;
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
fd = data.fence;
fence = (struct dma_fence *)_hw_sync_get_fence(fd);
if (!fence) {
HWFNC_ERR("Invalid fence fd: %d\n", fd);
return -EINVAL;
}
/* Decrement the refcount that hw_sync_get_fence increments */
dma_fence_put(fence);
fence_array = to_dma_fence_array(fence);
if (!fence_array) {
HWFNC_ERR("Invalid fence array fd: %d\n", fd);
return -EINVAL;
}
/* Destroy fence array */
dma_fence_put(&fence_array->base);
return 0;
}
/*
* this IOCTL only supports receiving one fence as input-parameter, which can be
* either a "dma_fence" or a "dma_fence_array", but eventually we would expand
* this API to receive more fences
*/
static long hw_sync_ioctl_reg_for_wait(struct hw_sync_obj *obj, unsigned long arg)
{
struct dma_fence *fence;
int ret, fd, num_fences = 1;
if (!_is_valid_client(obj))
return -EINVAL;
if (copy_from_user(&fd, (void __user *)arg, sizeof(fd)))
return -EFAULT;
fence = (struct dma_fence *)_hw_sync_get_fence(fd);
if (!fence) {
HWFNC_ERR("Invalid fence fd: %d\n", fd);
return -EINVAL;
}
ret = msm_hw_fence_wait_update(obj->client_handle, &fence, num_fences, 1);
/* Decrement the refcount that hw_sync_get_fence increments */
dma_fence_put(fence);
return ret;
}
static long hw_sync_ioctl_fence_signal(struct hw_sync_obj *obj, unsigned long arg)
{
struct hw_fence_sync_signal_data data;
int ret, tx_client, rx_client, signal_id;
if (!_is_valid_client(obj)) {
return -EINVAL;
} else if (IS_ERR_OR_NULL(obj->client_handle)) {
HWFNC_ERR("invalid client handle for the client_id: %d\n", obj->client_id);
return -EINVAL;
}
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
ret = msm_hw_fence_update_txq(obj->client_handle, data.hash, 0, data.error_flag);
if (ret) {
HWFNC_ERR("hw fence update txq has failed client_id: %d\n", obj->client_id);
return ret;
}
signal_id = dbg_out_clients_signal_map_no_dpu[obj->client_id].ipc_signal_id;
if (signal_id < 0)
return -EINVAL;
tx_client = HW_FENCE_IPC_CLIENT_ID_APPS;
rx_client = HW_FENCE_IPC_CLIENT_ID_APPS;
ret = msm_hw_fence_trigger_signal(obj->client_handle, tx_client, rx_client, signal_id);
if (ret) {
HWFNC_ERR("hw fence trigger signal has failed\n");
return ret;
}
return 0;
}
static long hw_sync_ioctl_fence_wait(struct hw_sync_obj *obj, unsigned long arg)
{
struct msm_hw_fence_client *hw_fence_client;
struct msm_hw_fence_queue_payload payload;
struct hw_fence_sync_wait_data data;
struct dma_fence *fence;
int fd, ret, read = 1, queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */
if (!_is_valid_client(obj))
return -EINVAL;
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
fd = data.fence;
fence = (struct dma_fence *)_hw_sync_get_fence(fd);
if (!fence) {
HWFNC_ERR("Invalid fence fd: %d\n", fd);
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)obj->client_handle;
if (!hw_fence_client) {
HWFNC_ERR("invalid client handle for fd:%d\n", fd);
/* Decrement the refcount that hw_sync_get_fence increments */
dma_fence_put(fence);
return -EINVAL;
}
ret = wait_event_timeout(hw_fence_client->wait_queue,
atomic_read(&hw_fence_client->val_signal) > 0,
msecs_to_jiffies(data.timeout_ms));
if (!ret) {
HWFNC_ERR("timed out waiting for the client signal %d\n", data.timeout_ms);
/* Decrement the refcount that hw_sync_get_fence increments */
dma_fence_put(fence);
return -ETIMEDOUT;
}
/* clear doorbell signal flag */
atomic_set(&hw_fence_client->val_signal, 0);
while (read) {
read = hw_fence_read_queue(obj->client_handle, &payload, queue_type);
if (read < 0) {
HWFNC_ERR("unable to read client rxq client_id:%d\n", obj->client_id);
break;
}
HWFNC_DBG_L("rxq read: hash:%llu, flags:%llu, error:%lu\n",
payload.hash, payload.flags, payload.error);
if (payload.ctxt_id == fence->context && payload.seqno == fence->seqno) {
/* Decrement the refcount that hw_sync_get_fence increments */
dma_fence_put(fence);
return 0;
}
}
/* Decrement the refcount that hw_sync_get_fence increments */
dma_fence_put(fence);
HWFNC_ERR("fence received did not match the fence expected\n");
HWFNC_ERR("fence received: context:%d seqno:%d fence expected: context:%d seqno:%d\n",
payload.ctxt_id, payload.seqno, fence->context, fence->seqno);
return read;
}
static long hw_sync_ioctl_reset_client(struct hw_sync_obj *obj, unsigned long arg)
{
int ret;
struct hw_fence_sync_reset_data data;
if (!_is_valid_client(obj)) {
return -EINVAL;
} else if (IS_ERR_OR_NULL(obj->client_handle)) {
HWFNC_ERR("client:%d handle doesn't exists\n", data.client_id);
return -EINVAL;
}
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
ret = msm_hw_fence_reset_client(obj->client_handle, data.reset_flag);
if (ret) {
HWFNC_ERR("hw fence reset client has failed\n");
return ret;
}
return 0;
}
static const struct hw_sync_ioctl_def hw_sync_debugfs_ioctls[] = {
HW_IOCTL_DEF(HW_SYNC_IOC_REG_CLIENT, hw_sync_ioctl_reg_client),
HW_IOCTL_DEF(HW_SYNC_IOC_UNREG_CLIENT, hw_sync_ioctl_unreg_client),
HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE, hw_sync_ioctl_create_fence),
HW_IOCTL_DEF(HW_SYNC_IOC_DESTROY_FENCE, hw_sync_ioctl_destroy_fence),
HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE_ARRAY, hw_sync_ioctl_create_fence_array),
HW_IOCTL_DEF(HW_SYNC_IOC_DESTROY_FENCE_ARRAY, hw_sync_ioctl_destroy_fence_array),
HW_IOCTL_DEF(HW_SYNC_IOC_REG_FOR_WAIT, hw_sync_ioctl_reg_for_wait),
HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_SIGNAL, hw_sync_ioctl_fence_signal),
HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_WAIT, hw_sync_ioctl_fence_wait),
HW_IOCTL_DEF(HW_SYNC_IOC_RESET_CLIENT, hw_sync_ioctl_reset_client)
};
static long hw_sync_debugfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct hw_sync_obj *obj = file->private_data;
int num = HW_FENCE_IOCTL_NR(cmd);
hw_fence_ioctl_t *func;
if (num >= HW_SYNC_IOCTL_COUNT) {
HWFNC_ERR("invalid ioctl num = %d\n", num);
return -EINVAL;
}
func = (&hw_sync_debugfs_ioctls[num])->func;
if (unlikely(!func)) {
HWFNC_ERR("no function num = %d\n", num);
return -ENOTTY;
}
return func(obj, arg);
}
const struct file_operations hw_sync_debugfs_fops = {
.open = hw_sync_debugfs_open,
.release = hw_sync_debugfs_release,
.unlocked_ioctl = hw_sync_debugfs_ioctl,
};

519
hw_fence/src/msm_hw_fence.c Normale Datei
Datei anzeigen

@@ -0,0 +1,519 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include "hw_fence_drv_priv.h"
#include "hw_fence_drv_utils.h"
#include "hw_fence_drv_debug.h"
#include "hw_fence_drv_ipc.h"
struct hw_fence_driver_data *hw_fence_drv_data;
static bool hw_fence_driver_enable;
void *msm_hw_fence_register(enum hw_fence_client_id client_id,
struct msm_hw_fence_mem_addr *mem_descriptor)
{
struct msm_hw_fence_client *hw_fence_client;
int ret;
HWFNC_DBG_H("++ client_id:%d\n", client_id);
if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) {
HWFNC_ERR("hw fence driver not ready\n");
return ERR_PTR(-EAGAIN);
}
if (!mem_descriptor || client_id >= HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("Invalid params: %d client_id:%d\n",
!mem_descriptor, client_id);
return ERR_PTR(-EINVAL);
}
/* Avoid race condition if multiple-threads request same client at same time */
mutex_lock(&hw_fence_drv_data->clients_mask_lock);
if (hw_fence_drv_data->client_id_mask & BIT(client_id)) {
HWFNC_ERR("client with id %d already registered\n", client_id);
mutex_unlock(&hw_fence_drv_data->clients_mask_lock);
return ERR_PTR(-EINVAL);
}
/* Mark client as registered */
hw_fence_drv_data->client_id_mask |= BIT(client_id);
mutex_unlock(&hw_fence_drv_data->clients_mask_lock);
/* Alloc client handle */
hw_fence_client = kzalloc(sizeof(*hw_fence_client), GFP_KERNEL);
if (!hw_fence_client) {
mutex_lock(&hw_fence_drv_data->clients_mask_lock);
hw_fence_drv_data->client_id_mask &= ~BIT(client_id);
mutex_unlock(&hw_fence_drv_data->clients_mask_lock);
return ERR_PTR(-ENOMEM);
}
hw_fence_client->client_id = client_id;
hw_fence_client->ipc_client_id = hw_fence_ipcc_get_client_id(hw_fence_drv_data, client_id);
if (hw_fence_client->ipc_client_id <= 0) {
HWFNC_ERR("Failed to find client:%d ipc id\n", client_id);
ret = -EINVAL;
goto error;
}
hw_fence_client->ipc_signal_id = hw_fence_ipcc_get_signal_id(hw_fence_drv_data, client_id);
if (hw_fence_client->ipc_signal_id < 0) {
HWFNC_ERR("Failed to find client:%d signal\n", client_id);
ret = -EINVAL;
goto error;
}
hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id);
hw_fence_drv_data->clients[client_id] = hw_fence_client;
/* Alloc Client HFI Headers and Queues */
ret = hw_fence_alloc_client_resources(hw_fence_drv_data,
hw_fence_client, mem_descriptor);
if (ret)
goto error;
/* Initialize signal for communication withe FenceCTL */
ret = hw_fence_init_controller_signal(hw_fence_drv_data, hw_fence_client);
if (ret)
goto error;
/*
* Update Fence Controller with the address of the Queues and
* the Fences Tables for this client
*/
ret = hw_fence_init_controller_resources(hw_fence_client);
if (ret)
goto error;
HWFNC_DBG_INIT("-- Initialized ptr:0x%p client_id:%d ipc_signal_id:%d ipc_client_id:%d\n",
hw_fence_client, hw_fence_client->client_id, hw_fence_client->ipc_signal_id,
hw_fence_client->ipc_client_id);
#if IS_ENABLED(CONFIG_DEBUG_FS)
init_waitqueue_head(&hw_fence_client->wait_queue);
#endif /* CONFIG_DEBUG_FS */
return (void *)hw_fence_client;
error:
/* Free all the allocated resources */
hw_fence_cleanup_client(hw_fence_drv_data, hw_fence_client);
HWFNC_ERR("failed with error:%d\n", ret);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(msm_hw_fence_register);
int msm_hw_fence_deregister(void *client_handle)
{
struct msm_hw_fence_client *hw_fence_client;
if (IS_ERR_OR_NULL(client_handle)) {
HWFNC_ERR("Invalid client handle\n");
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
HWFNC_DBG_H("+\n");
/* Free all the allocated resources */
hw_fence_cleanup_client(hw_fence_drv_data, hw_fence_client);
HWFNC_DBG_H("-\n");
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_deregister);
int msm_hw_fence_create(void *client_handle,
struct msm_hw_fence_create_params *params)
{
struct msm_hw_fence_client *hw_fence_client;
struct dma_fence_array *array;
struct dma_fence *fence;
int ret;
if (IS_ERR_OR_NULL(client_handle) || !params || !params->fence || !params->handle) {
HWFNC_ERR("Invalid input\n");
return -EINVAL;
}
if (!hw_fence_drv_data->vm_ready) {
HWFNC_DBG_H("VM not ready, cannot create fence\n");
return -EAGAIN;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
fence = (struct dma_fence *)params->fence;
HWFNC_DBG_H("+\n");
/* Block any Fence-Array, we should only get individual fences */
array = to_dma_fence_array(fence);
if (array) {
HWFNC_ERR("HW Fence must be created for individual fences\n");
return -EINVAL;
}
/* This Fence is already a HW-Fence */
if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) {
HWFNC_ERR("DMA Fence already has HW Fence Flag set\n");
return -EINVAL;
}
/* Create the HW Fence, i.e. add entry in the Global Table for this Fence */
ret = hw_fence_create(hw_fence_drv_data, hw_fence_client,
fence->context, fence->seqno, params->handle);
if (ret) {
HWFNC_ERR("Error creating HW fence\n");
return ret;
}
/* If no error, set the HW Fence Flag in the dma-fence */
set_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags);
HWFNC_DBG_H("-\n");
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_create);
int msm_hw_fence_destroy(void *client_handle,
struct dma_fence *fence)
{
struct msm_hw_fence_client *hw_fence_client;
struct dma_fence_array *array;
int ret;
if (IS_ERR_OR_NULL(client_handle) || !fence) {
HWFNC_ERR("Invalid data\n");
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
HWFNC_DBG_H("+\n");
/* Block any Fence-Array, we should only get individual fences */
array = to_dma_fence_array(fence);
if (array) {
HWFNC_ERR("HW Fence must be destroy for individual fences\n");
return -EINVAL;
}
/* This Fence not a HW-Fence */
if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) {
HWFNC_ERR("DMA Fence is not a HW Fence flags:0x%llx\n", fence->flags);
return -EINVAL;
}
/* Destroy the HW Fence, i.e. remove entry in the Global Table for the Fence */
ret = hw_fence_destroy(hw_fence_drv_data, hw_fence_client,
fence->context, fence->seqno);
if (ret) {
HWFNC_ERR("Error destroying the HW fence\n");
return ret;
}
/* Clear the HW Fence Flag in the dma-fence */
clear_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags);
HWFNC_DBG_H("-\n");
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_destroy);
int msm_hw_fence_wait_update(void *client_handle,
struct dma_fence **fence_list, u32 num_fences, bool create)
{
struct msm_hw_fence_client *hw_fence_client;
struct dma_fence_array *array;
int i, ret = 0;
if (IS_ERR_OR_NULL(client_handle) || !fence_list || !*fence_list) {
HWFNC_ERR("Invalid data\n");
return -EINVAL;
}
if (!hw_fence_drv_data->vm_ready) {
HWFNC_DBG_H("VM not ready, cannot destroy fence\n");
return -EAGAIN;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
HWFNC_DBG_H("+\n");
/* Process all the list of fences */
for (i = 0; i < num_fences; i++) {
struct dma_fence *fence = fence_list[i];
/* Process a Fence-Array */
array = to_dma_fence_array(fence);
if (array) {
ret = hw_fence_process_fence_array(hw_fence_drv_data, hw_fence_client,
array);
if (ret) {
HWFNC_ERR("Failed to create FenceArray\n");
return ret;
}
} else {
/* Process individual Fence */
ret = hw_fence_process_fence(hw_fence_drv_data, hw_fence_client, fence);
if (ret) {
HWFNC_ERR("Failed to create Fence\n");
return ret;
}
}
}
HWFNC_DBG_H("-\n");
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_wait_update);
int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags)
{
struct msm_hw_fence_client *hw_fence_client;
struct msm_hw_fence *hw_fences_tbl;
int i;
if (IS_ERR_OR_NULL(client_handle)) {
HWFNC_ERR("Invalid client handle!\n");
return -EINVAL;
}
if (!hw_fence_drv_data->vm_ready) {
HWFNC_DBG_H("VM not ready, cannot reset client\n");
return -EAGAIN;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
hw_fences_tbl = hw_fence_drv_data->hw_fences_tbl;
HWFNC_DBG_L("reset fences for client:%d\n", hw_fence_client->client_id);
for (i = 0; i < hw_fence_drv_data->hw_fences_tbl_cnt; i++)
hw_fence_utils_cleanup_fence(hw_fence_drv_data, hw_fence_client,
&hw_fences_tbl[i], i, reset_flags);
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_reset_client);
int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error)
{
struct msm_hw_fence_client *hw_fence_client;
if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready ||
!hw_fence_drv_data->vm_ready) {
HWFNC_ERR("hw fence driver or vm not ready\n");
return -EAGAIN;
} else if (IS_ERR_OR_NULL(client_handle) ||
(handle >= hw_fence_drv_data->hw_fences_tbl_cnt)) {
HWFNC_ERR("Invalid handle:%d or client handle:%d max:%d\n", handle,
IS_ERR_OR_NULL(client_handle), hw_fence_drv_data->hw_fences_tbl_cnt);
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
/* Write to Tx queue */
hw_fence_update_queue(hw_fence_drv_data, hw_fence_client,
hw_fence_drv_data->hw_fences_tbl[handle].ctx_id,
hw_fence_drv_data->hw_fences_tbl[handle].seq_id, handle,
flags, error, HW_FENCE_TX_QUEUE - 1);
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_update_txq);
int msm_hw_fence_trigger_signal(void *client_handle,
u32 tx_client_id, u32 rx_client_id,
u32 signal_id)
{
struct msm_hw_fence_client *hw_fence_client;
if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready
|| !hw_fence_drv_data->vm_ready) {
HWFNC_ERR("hw fence driver or vm not ready\n");
return -EAGAIN;
} else if (IS_ERR_OR_NULL(client_handle)) {
HWFNC_ERR("Invalid client\n");
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
HWFNC_DBG_H("sending ipc for client:%d\n", hw_fence_client->client_id);
hw_fence_ipcc_trigger_signal(hw_fence_drv_data, tx_client_id,
rx_client_id, signal_id);
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_trigger_signal);
/* Function used for simulation purposes only. */
int msm_hw_fence_driver_doorbell_sim(u64 db_mask)
{
if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) {
HWFNC_ERR("hw fence driver not ready\n");
return -EAGAIN;
}
HWFNC_DBG_IRQ("db callback sim-mode flags:0x%llx qtime:%llu\n",
db_mask, hw_fence_get_qtime(hw_fence_drv_data));
hw_fence_utils_process_doorbell_mask(hw_fence_drv_data, db_mask);
return 0;
}
EXPORT_SYMBOL(msm_hw_fence_driver_doorbell_sim);
static int msm_hw_fence_probe_init(struct platform_device *pdev)
{
int rc;
HWFNC_DBG_H("+\n");
hw_fence_drv_data = kzalloc(sizeof(*hw_fence_drv_data), GFP_KERNEL);
if (!hw_fence_drv_data)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, hw_fence_drv_data);
hw_fence_drv_data->dev = &pdev->dev;
/* Initialize HW Fence Driver resources */
rc = hw_fence_init(hw_fence_drv_data);
if (rc)
goto error;
mutex_init(&hw_fence_drv_data->clients_mask_lock);
/* set ready ealue so clients can register */
hw_fence_drv_data->resources_ready = true;
HWFNC_DBG_H("-\n");
return rc;
error:
dev_set_drvdata(&pdev->dev, NULL);
kfree(hw_fence_drv_data);
hw_fence_drv_data = (void *) -EPROBE_DEFER;
HWFNC_ERR("error %d\n", rc);
return rc;
}
static int msm_hw_fence_probe(struct platform_device *pdev)
{
int rc = -EINVAL;
HWFNC_DBG_H("+\n");
if (!pdev) {
HWFNC_ERR("null platform dev\n");
return -EINVAL;
}
if (!hw_fence_driver_enable) {
HWFNC_DBG_INFO("hw fence driver not enabled\n");
return -EOPNOTSUPP;
}
if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-hw-fence"))
rc = msm_hw_fence_probe_init(pdev);
if (rc)
goto err_exit;
HWFNC_DBG_H("-\n");
return 0;
err_exit:
HWFNC_ERR("error %d\n", rc);
return rc;
}
static int msm_hw_fence_remove(struct platform_device *pdev)
{
HWFNC_DBG_H("+\n");
if (!pdev) {
HWFNC_ERR("null platform dev\n");
return -EINVAL;
}
hw_fence_drv_data = dev_get_drvdata(&pdev->dev);
if (!hw_fence_drv_data) {
HWFNC_ERR("null driver data\n");
return -EINVAL;
}
dev_set_drvdata(&pdev->dev, NULL);
kfree(hw_fence_drv_data);
hw_fence_drv_data = (void *) -EPROBE_DEFER;
HWFNC_DBG_H("-\n");
return 0;
}
static const struct of_device_id msm_hw_fence_dt_match[] = {
{.compatible = "qcom,msm-hw-fence"},
{}
};
static struct platform_driver msm_hw_fence_driver = {
.probe = msm_hw_fence_probe,
.remove = msm_hw_fence_remove,
.driver = {
.name = "msm-hw-fence",
.of_match_table = of_match_ptr(msm_hw_fence_dt_match),
},
};
static int __init msm_hw_fence_init(void)
{
int rc = 0;
HWFNC_DBG_H("+\n");
rc = platform_driver_register(&msm_hw_fence_driver);
if (rc) {
HWFNC_ERR("%s: failed to register platform driver\n",
__func__);
return rc;
}
HWFNC_DBG_H("-\n");
return 0;
}
static void __exit msm_hw_fence_exit(void)
{
HWFNC_DBG_H("+\n");
platform_driver_unregister(&msm_hw_fence_driver);
HWFNC_DBG_H("-\n");
}
module_param_named(enable, hw_fence_driver_enable, bool, 0600);
MODULE_PARM_DESC(enable, "Enable hardware fences");
module_init(msm_hw_fence_init);
module_exit(msm_hw_fence_exit);
MODULE_DESCRIPTION("QTI HW Fence Driver");
MODULE_LICENSE("GPL v2");

26
mm_driver_board.mk Normale Datei
Datei anzeigen

@@ -0,0 +1,26 @@
#SPDX-License-Identifier: GPL-2.0-only
MM_DRV_DLKM_ENABLE := true
ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false)
MM_DRV_DLKM_ENABLE := false
endif
endif
ifeq ($(MM_DRV_DLKM_ENABLE), true)
ifneq ($(TARGET_BOARD_AUTO),true)
ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true)
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko
BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko
BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko
ifneq ($(TARGET_BOARD_PLATFORM), taro)
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \
$(KERNEL_MODULES_OUT)/msm_hw_fence.ko
BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \
$(KERNEL_MODULES_OUT)/msm_hw_fence.ko
BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/sync_fence.ko \
$(KERNEL_MODULES_OUT)/msm_hw_fence.ko
endif
endif
endif
endif

17
mm_driver_product.mk Normale Datei
Datei anzeigen

@@ -0,0 +1,17 @@
PRODUCT_PACKAGES += msm_ext_display.ko
MM_DRV_DLKM_ENABLE := true
ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false)
MM_DRV_DLKM_ENABLE := false
endif
endif
ifeq ($(MM_DRV_DLKM_ENABLE), true)
ifneq ($(TARGET_BOARD_PLATFORM), taro)
PRODUCT_PACKAGES += sync_fence.ko msm_hw_fence.ko
endif
endif
DISPLAY_MM_DRIVER := msm_ext_display.ko sync_fence.ko msm_hw_fence.ko

95
mm_drivers_kernel_headers.py Normale Datei
Datei anzeigen

@@ -0,0 +1,95 @@
# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
# Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import filecmp
import os
import re
import subprocess
import sys
def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h):
if not h.startswith(prefix):
print('error: expected prefix [%s] on header [%s]' % (prefix, h))
return False
out_h = os.path.join(gen_dir, h[len(prefix):])
(out_h_dirname, out_h_basename) = os.path.split(out_h)
env = os.environ.copy()
env["LOC_UNIFDEF"] = unifdef
cmd = ["sh", headers_install, h, out_h]
if True:
print('run_headers_install: cmd is %s' % cmd)
result = subprocess.call(cmd, env=env)
if result != 0:
print('error: run_headers_install: cmd %s failed %d' % (cmd, result))
return False
return True
def gen_mm_drivers_headers(verbose, gen_dir, headers_install, unifdef, mm_drivers_include_uapi):
error_count = 0
for h in mm_drivers_include_uapi:
mm_drivers_uapi_include_prefix = os.path.join(h.split('sync_fence/include/uapi')[0],
'sync_fence', 'include', 'uapi') + os.sep
if not run_headers_install(
verbose, gen_dir, headers_install, unifdef,
mm_drivers_uapi_include_prefix, h): error_count += 1
return error_count
def main():
"""Parse command line arguments and perform top level control."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Arguments that apply to every invocation of this script.
parser.add_argument(
'--verbose', action='store_true',
help='Print output that describes the workings of this script.')
parser.add_argument(
'--header_arch', required=True,
help='The arch for which to generate headers.')
parser.add_argument(
'--gen_dir', required=True,
help='Where to place the generated files.')
parser.add_argument(
'--mm_drivers_include_uapi', required=True, nargs='*',
help='The list of techpack/*/include/uapi header files.')
parser.add_argument(
'--headers_install', required=True,
help='The headers_install tool to process input headers.')
parser.add_argument(
'--unifdef',
required=True,
help='The unifdef tool used by headers_install.')
args = parser.parse_args()
if args.verbose:
print('header_arch [%s]' % args.header_arch)
print('gen_dir [%s]' % args.gen_dir)
print('mm_drivers_include_uapi [%s]' % args.mm_drivers_include_uapi)
print('headers_install [%s]' % args.headers_install)
print('unifdef [%s]' % args.unifdef)
return gen_mm_drivers_headers(args.verbose, args.gen_dir,
args.headers_install, args.unifdef, args.mm_drivers_include_uapi)
if __name__ == '__main__':
sys.exit(main())

42
msm_ext_display/Android.mk Normale Datei
Datei anzeigen

@@ -0,0 +1,42 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
# This makefile is only for DLKM
ifneq ($(findstring vendor,$(LOCAL_PATH)),)
ifneq ($(findstring opensource,$(LOCAL_PATH)),)
MSM_EXT_DISPLAY_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/msm_ext_display
endif # opensource
DLKM_DIR := $(TOP)/device/qcom/common/dlkm
LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
###########################################################
# This is set once per LOCAL_PATH, not per (kernel) module
KBUILD_OPTIONS := MSM_EXT_DISPLAY_ROOT=$(MSM_EXT_DISPLAY_BLD_DIR)
KBUILD_OPTIONS += MODNAME=msm_ext_display
KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
###########################################################
include $(CLEAR_VARS)
# For incremental compilation
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := msm-ext-disp-module-symvers
LOCAL_MODULE_STEM := Module.symvers
LOCAL_MODULE_KBUILD_NAME := Module.symvers
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := msm_ext_display.ko
LOCAL_MODULE_KBUILD_NAME := msm_ext_display.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
endif # DLKM check

10
msm_ext_display/Kbuild Normale Datei
Datei anzeigen

@@ -0,0 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
include $(MSM_EXT_DISPLAY_ROOT)/config/kalamammdrivers.conf
LINUXINCLUDE += -include $(MSM_EXT_DISPLAY_ROOT)/config/kalamammdriversconf.h
obj-m += msm_ext_display.o
msm_ext_display-y := src/msm_ext_display.o
CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\"

15
msm_ext_display/Makefile Normale Datei
Datei anzeigen

@@ -0,0 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
KBUILD_OPTIONS += MSM_EXT_DISPLAY_ROOT=$(KERNEL_SRC)/$(M)/../
all: modules
modules_install:
$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install
%:
$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
clean:
rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
rm -rf .tmp_versions

Datei anzeigen

@@ -0,0 +1,702 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/iopoll.h>
#include <linux/types.h>
#include <linux/of_platform.h>
#include <linux/extcon-provider.h>
#include <linux/soc/qcom/msm_ext_display.h>
#include <linux/extcon-provider.h>
struct msm_ext_disp_list {
struct msm_ext_disp_init_data *data;
struct list_head list;
};
struct msm_ext_disp {
struct msm_ext_disp_data ext_disp_data;
struct platform_device *pdev;
struct msm_ext_disp_codec_id current_codec;
struct msm_ext_disp_audio_codec_ops *ops;
struct extcon_dev *audio_sdev[MSM_EXT_DISP_MAX_CODECS];
bool audio_session_on;
struct list_head display_list;
struct mutex lock;
bool update_audio;
};
static const unsigned int msm_ext_disp_supported_cable[] = {
EXTCON_DISP_DP,
EXTCON_DISP_HDMI,
EXTCON_NONE,
};
static int msm_ext_disp_extcon_register(struct msm_ext_disp *ext_disp, int id)
{
int ret = 0;
if (!ext_disp || !ext_disp->pdev || id >= MSM_EXT_DISP_MAX_CODECS) {
pr_err("invalid params\n");
return -EINVAL;
}
ext_disp->audio_sdev[id] = devm_extcon_dev_allocate(
&ext_disp->pdev->dev,
msm_ext_disp_supported_cable);
if (IS_ERR(ext_disp->audio_sdev[id]))
return PTR_ERR(ext_disp->audio_sdev[id]);
ret = devm_extcon_dev_register(&ext_disp->pdev->dev,
ext_disp->audio_sdev[id]);
if (ret) {
pr_err("audio registration failed\n");
return ret;
}
pr_debug("extcon registration done\n");
return ret;
}
static void msm_ext_disp_extcon_unregister(struct msm_ext_disp *ext_disp,
int id)
{
if (!ext_disp || !ext_disp->pdev || id >= MSM_EXT_DISP_MAX_CODECS) {
pr_err("Invalid params\n");
return;
}
devm_extcon_dev_unregister(&ext_disp->pdev->dev,
ext_disp->audio_sdev[id]);
}
static const char *msm_ext_disp_name(enum msm_ext_disp_type type)
{
switch (type) {
case EXT_DISPLAY_TYPE_HDMI:
return "EXT_DISPLAY_TYPE_HDMI";
case EXT_DISPLAY_TYPE_DP:
return "EXT_DISPLAY_TYPE_DP";
default: return "???";
}
}
static int msm_ext_disp_add_intf_data(struct msm_ext_disp *ext_disp,
struct msm_ext_disp_init_data *data)
{
struct msm_ext_disp_list *node;
if (!ext_disp || !data) {
pr_err("Invalid params\n");
return -EINVAL;
}
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
node->data = data;
list_add(&node->list, &ext_disp->display_list);
pr_debug("Added new display (%s) ctld (%d) stream (%d)\n",
msm_ext_disp_name(data->codec.type),
data->codec.ctrl_id, data->codec.stream_id);
return 0;
}
static int msm_ext_disp_remove_intf_data(struct msm_ext_disp *ext_disp,
struct msm_ext_disp_init_data *data)
{
struct msm_ext_disp_list *node;
struct list_head *pos = NULL;
if (!ext_disp || !data) {
pr_err("Invalid params\n");
return -EINVAL;
}
list_for_each(pos, &ext_disp->display_list) {
node = list_entry(pos, struct msm_ext_disp_list, list);
if (node->data == data) {
list_del(pos);
pr_debug("Deleted the intf data\n");
kfree(node);
return 0;
}
}
pr_debug("Intf data not present for delete op\n");
return 0;
}
static int msm_ext_disp_get_intf_data(struct msm_ext_disp *ext_disp,
struct msm_ext_disp_codec_id *codec,
struct msm_ext_disp_init_data **data)
{
int ret = 0;
struct msm_ext_disp_list *node;
struct list_head *position = NULL;
if (!ext_disp || !data || !codec) {
pr_err("Invalid params\n");
ret = -EINVAL;
goto end;
}
*data = NULL;
list_for_each(position, &ext_disp->display_list) {
node = list_entry(position, struct msm_ext_disp_list, list);
if (node->data->codec.type == codec->type &&
node->data->codec.stream_id == codec->stream_id &&
node->data->codec.ctrl_id == codec->ctrl_id) {
*data = node->data;
break;
}
}
if (!*data)
ret = -ENODEV;
end:
return ret;
}
static int msm_ext_disp_process_audio(struct msm_ext_disp *ext_disp,
struct msm_ext_disp_codec_id *codec,
enum msm_ext_disp_cable_state new_state)
{
int ret = 0;
int state;
struct extcon_dev *audio_sdev;
if (!ext_disp->ops) {
pr_err("codec not registered, skip notification\n");
ret = -EPERM;
goto end;
}
audio_sdev = ext_disp->audio_sdev[codec->stream_id];
state = extcon_get_state(audio_sdev, codec->type);
if (state == !!new_state) {
ret = -EEXIST;
pr_debug("same state\n");
goto end;
}
ret = extcon_set_state_sync(audio_sdev,
codec->type, !!new_state);
if (ret)
pr_err("Failed to set state. Error = %d\n", ret);
else
pr_debug("state changed to %d\n", new_state);
end:
return ret;
}
static struct msm_ext_disp *msm_ext_disp_validate_and_get(
struct platform_device *pdev,
struct msm_ext_disp_codec_id *codec,
enum msm_ext_disp_cable_state state)
{
struct msm_ext_disp_data *ext_disp_data;
struct msm_ext_disp *ext_disp;
if (!pdev) {
pr_err("invalid platform device\n");
goto err;
}
if (!codec ||
codec->type >= EXT_DISPLAY_TYPE_MAX ||
codec->ctrl_id != 0 ||
codec->stream_id >= MSM_EXT_DISP_MAX_CODECS) {
pr_err("invalid display codec id\n");
goto err;
}
if (state < EXT_DISPLAY_CABLE_DISCONNECT ||
state >= EXT_DISPLAY_CABLE_STATE_MAX) {
pr_err("invalid HPD state (%d)\n", state);
goto err;
}
ext_disp_data = platform_get_drvdata(pdev);
if (!ext_disp_data) {
pr_err("invalid drvdata\n");
goto err;
}
ext_disp = container_of(ext_disp_data,
struct msm_ext_disp, ext_disp_data);
return ext_disp;
err:
return ERR_PTR(-EINVAL);
}
static int msm_ext_disp_update_audio_ops(struct msm_ext_disp *ext_disp,
struct msm_ext_disp_codec_id *codec)
{
int ret = 0;
struct msm_ext_disp_init_data *data = NULL;
ret = msm_ext_disp_get_intf_data(ext_disp, codec, &data);
if (ret || !data) {
pr_err("Display not found (%s) ctld (%d) stream (%d)\n",
msm_ext_disp_name(codec->type),
codec->ctrl_id, codec->stream_id);
goto end;
}
if (ext_disp->ops) {
*ext_disp->ops = data->codec_ops;
ext_disp->current_codec = *codec;
/* update pdev for interface to use */
ext_disp->ext_disp_data.intf_pdev = data->pdev;
ext_disp->ext_disp_data.intf_data = data->intf_data;
}
end:
return ret;
}
static int msm_ext_disp_audio_config(struct platform_device *pdev,
struct msm_ext_disp_codec_id *codec,
enum msm_ext_disp_cable_state state)
{
int ret = 0;
struct msm_ext_disp *ext_disp;
ext_disp = msm_ext_disp_validate_and_get(pdev, codec, state);
if (IS_ERR(ext_disp)) {
ret = PTR_ERR(ext_disp);
goto end;
}
if (state == EXT_DISPLAY_CABLE_CONNECT) {
ret = msm_ext_disp_select_audio_codec(pdev, codec);
} else {
mutex_lock(&ext_disp->lock);
if (ext_disp->ops)
memset(ext_disp->ops, 0, sizeof(*ext_disp->ops));
pr_debug("codec ops cleared for %s\n",
msm_ext_disp_name(ext_disp->current_codec.type));
ext_disp->current_codec.type = EXT_DISPLAY_TYPE_MAX;
mutex_unlock(&ext_disp->lock);
}
end:
return ret;
}
static int msm_ext_disp_audio_notify(struct platform_device *pdev,
struct msm_ext_disp_codec_id *codec,
enum msm_ext_disp_cable_state state)
{
int ret = 0;
struct msm_ext_disp *ext_disp;
ext_disp = msm_ext_disp_validate_and_get(pdev, codec, state);
if (IS_ERR(ext_disp)) {
ret = PTR_ERR(ext_disp);
goto end;
}
mutex_lock(&ext_disp->lock);
ret = msm_ext_disp_process_audio(ext_disp, codec, state);
mutex_unlock(&ext_disp->lock);
end:
return ret;
}
static void msm_ext_disp_ready_for_display(struct msm_ext_disp *ext_disp)
{
int ret;
struct msm_ext_disp_init_data *data = NULL;
if (!ext_disp) {
pr_err("invalid input\n");
return;
}
ret = msm_ext_disp_get_intf_data(ext_disp,
&ext_disp->current_codec, &data);
if (ret) {
pr_err("%s not found\n",
msm_ext_disp_name(ext_disp->current_codec.type));
return;
}
*ext_disp->ops = data->codec_ops;
data->codec_ops.ready(ext_disp->pdev);
}
int msm_hdmi_register_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_audio_codec_ops *ops)
{
return msm_ext_disp_register_audio_codec(pdev, ops);
}
/**
* Register audio codec ops to display driver
* for HDMI/Display Port usecase support.
*
* @return 0 on success, negative value on error
*
*/
int msm_ext_disp_register_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_audio_codec_ops *ops)
{
int ret = 0;
struct msm_ext_disp *ext_disp = NULL;
struct msm_ext_disp_data *ext_disp_data = NULL;
if (!pdev || !ops) {
pr_err("Invalid params\n");
return -EINVAL;
}
ext_disp_data = platform_get_drvdata(pdev);
if (!ext_disp_data) {
pr_err("Invalid drvdata\n");
return -EINVAL;
}
ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
ext_disp_data);
mutex_lock(&ext_disp->lock);
if (ext_disp->ops) {
pr_err("Codec already registered\n");
ret = -EINVAL;
goto end;
}
ext_disp->ops = ops;
pr_debug("audio codec registered\n");
if (ext_disp->update_audio) {
ext_disp->update_audio = false;
msm_ext_disp_update_audio_ops(ext_disp, &ext_disp->current_codec);
msm_ext_disp_process_audio(ext_disp, &ext_disp->current_codec,
EXT_DISPLAY_CABLE_CONNECT);
}
end:
mutex_unlock(&ext_disp->lock);
if (ext_disp->current_codec.type != EXT_DISPLAY_TYPE_MAX)
msm_ext_disp_ready_for_display(ext_disp);
return ret;
}
EXPORT_SYMBOL(msm_ext_disp_register_audio_codec);
int msm_ext_disp_select_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_codec_id *codec)
{
int ret = 0;
struct msm_ext_disp *ext_disp = NULL;
struct msm_ext_disp_data *ext_disp_data = NULL;
if (!pdev || !codec) {
pr_err("Invalid params\n");
return -EINVAL;
}
ext_disp_data = platform_get_drvdata(pdev);
if (!ext_disp_data) {
pr_err("Invalid drvdata\n");
return -EINVAL;
}
ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
ext_disp_data);
mutex_lock(&ext_disp->lock);
if (!ext_disp->ops) {
pr_warn("Codec is not registered\n");
ext_disp->update_audio = true;
ext_disp->current_codec = *codec;
ret = -EINVAL;
goto end;
}
ret = msm_ext_disp_update_audio_ops(ext_disp, codec);
end:
mutex_unlock(&ext_disp->lock);
return ret;
}
EXPORT_SYMBOL(msm_ext_disp_select_audio_codec);
static int msm_ext_disp_validate_intf(struct msm_ext_disp_init_data *init_data)
{
struct msm_ext_disp_audio_codec_ops *ops;
if (!init_data) {
pr_err("Invalid init_data\n");
return -EINVAL;
}
if (!init_data->pdev) {
pr_err("Invalid display intf pdev\n");
return -EINVAL;
}
if (init_data->codec.type >= EXT_DISPLAY_TYPE_MAX ||
init_data->codec.ctrl_id != 0 ||
init_data->codec.stream_id >= MSM_EXT_DISP_MAX_CODECS) {
pr_err("Invalid codec info type(%d), ctrl(%d) stream(%d)\n",
init_data->codec.type,
init_data->codec.ctrl_id,
init_data->codec.stream_id);
return -EINVAL;
}
ops = &init_data->codec_ops;
if (!ops->audio_info_setup || !ops->get_audio_edid_blk ||
!ops->cable_status || !ops->get_intf_id ||
!ops->teardown_done || !ops->acknowledge ||
!ops->ready) {
pr_err("Invalid codec operation pointers\n");
return -EINVAL;
}
return 0;
}
int msm_ext_disp_register_intf(struct platform_device *pdev,
struct msm_ext_disp_init_data *init_data)
{
int ret = 0;
struct msm_ext_disp_init_data *data = NULL;
struct msm_ext_disp *ext_disp = NULL;
struct msm_ext_disp_data *ext_disp_data = NULL;
if (!pdev || !init_data) {
pr_err("Invalid params\n");
return -EINVAL;
}
ext_disp_data = platform_get_drvdata(pdev);
if (!ext_disp_data) {
pr_err("Invalid drvdata\n");
return -EINVAL;
}
ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
ext_disp_data);
mutex_lock(&ext_disp->lock);
ret = msm_ext_disp_validate_intf(init_data);
if (ret)
goto end;
ret = msm_ext_disp_get_intf_data(ext_disp, &init_data->codec, &data);
if (!ret) {
pr_err("%s already registered. ctrl(%d) stream(%d)\n",
msm_ext_disp_name(init_data->codec.type),
init_data->codec.ctrl_id,
init_data->codec.stream_id);
goto end;
}
ret = msm_ext_disp_add_intf_data(ext_disp, init_data);
if (ret)
goto end;
init_data->intf_ops.audio_config = msm_ext_disp_audio_config;
init_data->intf_ops.audio_notify = msm_ext_disp_audio_notify;
pr_debug("%s registered. ctrl(%d) stream(%d)\n",
msm_ext_disp_name(init_data->codec.type),
init_data->codec.ctrl_id,
init_data->codec.stream_id);
end:
mutex_unlock(&ext_disp->lock);
return ret;
}
EXPORT_SYMBOL(msm_ext_disp_register_intf);
int msm_ext_disp_deregister_intf(struct platform_device *pdev,
struct msm_ext_disp_init_data *init_data)
{
int ret = 0;
struct msm_ext_disp *ext_disp = NULL;
struct msm_ext_disp_data *ext_disp_data = NULL;
if (!pdev || !init_data) {
pr_err("Invalid params\n");
return -EINVAL;
}
ext_disp_data = platform_get_drvdata(pdev);
if (!ext_disp_data) {
pr_err("Invalid drvdata\n");
return -EINVAL;
}
ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
ext_disp_data);
mutex_lock(&ext_disp->lock);
ret = msm_ext_disp_remove_intf_data(ext_disp, init_data);
if (ret)
goto end;
init_data->intf_ops.audio_config = NULL;
init_data->intf_ops.audio_notify = NULL;
pr_debug("%s deregistered\n",
msm_ext_disp_name(init_data->codec.type));
end:
mutex_unlock(&ext_disp->lock);
return ret;
}
EXPORT_SYMBOL(msm_ext_disp_deregister_intf);
static int msm_ext_disp_probe(struct platform_device *pdev)
{
int ret = 0, id;
struct device_node *of_node = NULL;
struct msm_ext_disp *ext_disp = NULL;
if (!pdev) {
pr_err("No platform device found\n");
ret = -ENODEV;
goto end;
}
of_node = pdev->dev.of_node;
if (!of_node) {
pr_err("No device node found\n");
ret = -ENODEV;
goto end;
}
ext_disp = devm_kzalloc(&pdev->dev, sizeof(*ext_disp), GFP_KERNEL);
if (!ext_disp) {
ret = -ENOMEM;
goto end;
}
platform_set_drvdata(pdev, &ext_disp->ext_disp_data);
ext_disp->pdev = pdev;
for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++) {
ret = msm_ext_disp_extcon_register(ext_disp, id);
if (ret)
goto child_node_failure;
}
ret = of_platform_populate(of_node, NULL, NULL, &pdev->dev);
if (ret) {
pr_err("Failed to add child devices. Error = %d\n", ret);
goto child_node_failure;
} else {
pr_debug("%s: Added child devices.\n", __func__);
}
mutex_init(&ext_disp->lock);
INIT_LIST_HEAD(&ext_disp->display_list);
ext_disp->current_codec.type = EXT_DISPLAY_TYPE_MAX;
ext_disp->update_audio = false;
return ret;
child_node_failure:
for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++)
msm_ext_disp_extcon_unregister(ext_disp, id);
devm_kfree(&ext_disp->pdev->dev, ext_disp);
end:
return ret;
}
static int msm_ext_disp_remove(struct platform_device *pdev)
{
int ret = 0, id;
struct msm_ext_disp *ext_disp = NULL;
struct msm_ext_disp_data *ext_disp_data = NULL;
if (!pdev) {
pr_err("No platform device\n");
ret = -ENODEV;
goto end;
}
ext_disp_data = platform_get_drvdata(pdev);
if (!ext_disp_data) {
pr_err("No drvdata found\n");
ret = -ENODEV;
goto end;
}
ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
ext_disp_data);
for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++)
msm_ext_disp_extcon_unregister(ext_disp, id);
mutex_destroy(&ext_disp->lock);
devm_kfree(&ext_disp->pdev->dev, ext_disp);
end:
return ret;
}
static const struct of_device_id msm_ext_dt_match[] = {
{.compatible = "qcom,msm-ext-disp",},
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, msm_ext_dt_match);
static struct platform_driver this_driver = {
.probe = msm_ext_disp_probe,
.remove = msm_ext_disp_remove,
.driver = {
.name = "msm-ext-disp",
.of_match_table = msm_ext_dt_match,
},
};
static int __init msm_ext_disp_init(void)
{
int ret = 0;
ret = platform_driver_register(&this_driver);
if (ret)
pr_err("failed, ret = %d\n", ret);
return ret;
}
subsys_initcall(msm_ext_disp_init);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MSM External Display");

41
sync_fence/Android.mk Normale Datei
Datei anzeigen

@@ -0,0 +1,41 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
# This makefile is only for DLKM
ifneq ($(findstring vendor,$(LOCAL_PATH)),)
ifneq ($(findstring opensource,$(LOCAL_PATH)),)
SYNC_FENCE_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/sync_fence
endif # opensource
DLKM_DIR := $(TOP)/device/qcom/common/dlkm
LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
###########################################################
# This is set once per LOCAL_PATH, not per (kernel) module
KBUILD_OPTIONS := SYNC_FENCE_ROOT=$(SYNC_FENCE_BLD_DIR)
KBUILD_OPTIONS += MODNAME=sync_fence
KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
###########################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := sync-fence-module-symvers
LOCAL_MODULE_STEM := Module.symvers
LOCAL_MODULE_KBUILD_NAME := Module.symvers
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := sync_fence.ko
LOCAL_MODULE_KBUILD_NAME := sync_fence.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
endif # DLKM check

14
sync_fence/Kbuild Normale Datei
Datei anzeigen

@@ -0,0 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
KDIR := $(TOP)/kernel_platform/msm-kernel
LINUXINCLUDE += -I$(SYNC_FENCE_ROOT)sync_fence/include/
include $(SYNC_FENCE_ROOT)/config/kalamammdrivers.conf
LINUXINCLUDE += -include $(SYNC_FENCE_ROOT)/config/kalamammdriversconf.h
ifdef CONFIG_QCOM_SPEC_SYNC
obj-m += sync_fence.o
sync_fence-y := src/qcom_sync_file.o
CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\"
endif

15
sync_fence/Makefile Normale Datei
Datei anzeigen

@@ -0,0 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
KBUILD_OPTIONS += SYNC_FENCE_ROOT=$(KERNEL_SRC)/$(M)/../
all: modules
modules_install:
$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install
%:
$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
clean:
rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
rm -rf .tmp_versions

Datei anzeigen

@@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
# Top-level Makefile calls into asm-$(ARCH)
# List only non-arch directories below
header-y += sync_fence/

Datei anzeigen

@@ -0,0 +1,63 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
*/
#ifndef _UAPI_LINUX_SPEC_SYNC_H
#define _UAPI_LINUX_SPEC_SYNC_H
#include <linux/ioctl.h>
#include <linux/types.h>
#define SPEC_FENCE_SIGNAL_ANY 0x1
#define SPEC_FENCE_SIGNAL_ALL 0x2
/**
* struct fence_bind_data - data passed to bind ioctl
* @out_bind_fd: file descriptor of second fence
* @fds: file descriptor list of child fences
*/
struct fence_bind_data {
__u32 out_bind_fd;
__u64 fds;
};
/**
* struct fence_create_data - detailed fence information
* @num_fences: Total fences that array needs to carry.
* @flags: Flags specifying on how to signal the array
* @out_bind_fd: Returns the fence fd.
*/
struct fence_create_data {
__u32 num_fences;
__u32 flags;
__u32 out_bind_fd;
};
#define SPEC_SYNC_MAGIC '>'
/**
* DOC: SPEC_SYNC_IOC_BIND - bind two fences
*
* Takes a struct fence_bind_data. binds the child fds with the fence array
* pointed by fd1.
*/
#define SPEC_SYNC_IOC_BIND _IOWR(SPEC_SYNC_MAGIC, 3, struct fence_bind_data)
/**
* DOC: SPEC_SYNC_IOC_CREATE_FENCE - Create a fence array
*
* Takes a struct fence_create_data. If num_fences is > 0, fence array will be
* created and returns the array fd in fence_create_data.fd1
*/
#define SPEC_SYNC_IOC_CREATE_FENCE _IOWR(SPEC_SYNC_MAGIC, 4, struct fence_create_data)
/**
* DOC: SPEC_SYNC_IOC_GET_VER - Get Spec driver version
*
* Returns Spec driver version.
*/
#define SPEC_SYNC_IOC_GET_VER _IOWR(SPEC_SYNC_MAGIC, 5, __u64)
#endif /* _UAPI_LINUX_SPEC_SYNC_H */

Datei anzeigen

@@ -0,0 +1,514 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/cdev.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/dma-fence.h>
#include <linux/dma-fence-array.h>
#include <linux/sync_file.h>
#include <uapi/sync_fence/qcom_sync_file.h>
#include <linux/soc/qcom/qcom_sync_file.h>
#define CLASS_NAME "sync"
#define DRV_NAME "spec_sync"
#define DRV_VERSION 1
#define NAME_LEN 32
#define FENCE_MIN 1
#define FENCE_MAX 32
struct sync_device {
/* device info */
struct class *dev_class;
dev_t dev_num;
struct device *dev;
struct cdev *cdev;
struct mutex lock;
/* device drv data */
atomic_t device_available;
char name[NAME_LEN];
uint32_t version;
struct mutex l_lock;
struct list_head fence_array_list;
wait_queue_head_t wait_queue;
};
struct fence_array_node {
struct dma_fence_array *fence_array;
struct list_head list;
};
/* Speculative Sync Device Driver State */
static struct sync_device sync_dev;
static bool sanitize_fence_array(struct dma_fence_array *fence)
{
struct fence_array_node *node;
int ret = false;
mutex_lock(&sync_dev.l_lock);
list_for_each_entry(node, &sync_dev.fence_array_list, list) {
if (node->fence_array == fence) {
ret = true;
break;
}
}
mutex_unlock(&sync_dev.l_lock);
return ret;
}
static void clear_fence_array_tracker(bool force_clear)
{
struct fence_array_node *node, *temp;
struct dma_fence_array *array;
struct dma_fence *fence;
bool is_signaled;
mutex_lock(&sync_dev.l_lock);
list_for_each_entry_safe(node, temp, &sync_dev.fence_array_list, list) {
array = node->fence_array;
fence = &array->base;
is_signaled = dma_fence_is_signaled(fence);
if (force_clear && !array->fences)
array->num_fences = 0;
pr_debug("force_clear:%d is_signaled:%d pending:%d\n", force_clear, is_signaled,
atomic_read(&array->num_pending));
if (force_clear && !is_signaled && atomic_dec_and_test(&array->num_pending))
dma_fence_signal(fence);
if (force_clear || is_signaled) {
dma_fence_put(fence);
list_del(&node->list);
kfree(node);
}
}
mutex_unlock(&sync_dev.l_lock);
}
static struct sync_device *spec_fence_init_locked(struct sync_device *obj, const char *name)
{
if (atomic_read(&obj->device_available) > 1) {
pr_err("number of device fds are limited by 2, device opened:%d\n",
atomic_read(&obj->device_available));
return NULL;
} else if (!atomic_read(&obj->device_available)) {
memset(obj->name, 0, NAME_LEN);
strscpy(obj->name, name, sizeof(obj->name));
}
atomic_inc(&obj->device_available);
return obj;
}
static int spec_sync_open(struct inode *inode, struct file *file)
{
char task_comm[TASK_COMM_LEN];
struct sync_device *obj = &sync_dev;
int ret = 0;
if (!inode || !inode->i_cdev || !file) {
pr_err("NULL pointer passed\n");
return -EINVAL;
}
mutex_lock(&sync_dev.lock);
get_task_comm(task_comm, current);
obj = spec_fence_init_locked(obj, task_comm);
if (!obj) {
pr_err("Spec device exists owner:%s caller:%s\n", sync_dev.name, task_comm);
ret = -EEXIST;
goto end;
}
file->private_data = obj;
end:
mutex_unlock(&sync_dev.lock);
return ret;
}
static int spec_sync_release(struct inode *inode, struct file *file)
{
int ret = 0;
struct sync_device *obj = file->private_data;
mutex_lock(&sync_dev.lock);
if (!atomic_read(&obj->device_available)) {
pr_err("no device to release!!\n");
ret = -ENODEV;
goto end;
}
atomic_dec(&obj->device_available);
if (!atomic_read(&obj->device_available))
clear_fence_array_tracker(true);
end:
mutex_unlock(&sync_dev.lock);
return ret;
}
static int spec_sync_ioctl_get_ver(struct sync_device *obj, unsigned long __user arg)
{
uint32_t version = obj->version;
if (copy_to_user((void __user *)arg, &version, sizeof(uint32_t)))
return -EFAULT;
return 0;
}
static int spec_sync_create_array(struct fence_create_data *f)
{
int fd = get_unused_fd_flags(O_CLOEXEC);
struct sync_file *sync_file;
struct dma_fence_array *fence_array;
struct fence_array_node *node;
bool signal_any;
int ret = 0;
if (fd < 0) {
pr_err("failed to get_unused_fd_flags\n");
return fd;
}
if (f->num_fences < FENCE_MIN || f->num_fences > FENCE_MAX) {
pr_err("invalid arguments num_fences:%d\n", f->num_fences);
ret = -ERANGE;
goto error_args;
}
signal_any = f->flags & SPEC_FENCE_SIGNAL_ALL ? false : true;
fence_array = dma_fence_array_create(f->num_fences, NULL,
dma_fence_context_alloc(1), 0, signal_any);
/* Set the enable signal such that signalling is not done during wait*/
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence_array->base.flags);
set_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence_array->base.flags);
sync_file = sync_file_create(&fence_array->base);
if (!sync_file) {
pr_err("sync_file_create fail\n");
ret = -EINVAL;
goto err;
}
node = kzalloc((sizeof(struct fence_array_node)), GFP_KERNEL);
if (!node) {
fput(sync_file->file);
ret = -ENOMEM;
goto err;
}
fd_install(fd, sync_file->file);
node->fence_array = fence_array;
mutex_lock(&sync_dev.l_lock);
list_add_tail(&node->list, &sync_dev.fence_array_list);
mutex_unlock(&sync_dev.l_lock);
pr_debug("spec fd:%d num_fences:%u\n", fd, f->num_fences);
return fd;
err:
dma_fence_put(&fence_array->base);
error_args:
put_unused_fd(fd);
return ret;
}
static int spec_sync_ioctl_create_fence(struct sync_device *obj, unsigned long __user arg)
{
struct fence_create_data f;
int fd;
if (copy_from_user(&f, (void __user *)arg, sizeof(f)))
return -EFAULT;
fd = spec_sync_create_array(&f);
if (fd < 0)
return fd;
f.out_bind_fd = fd;
if (copy_to_user((void __user *)arg, &f, sizeof(f)))
return -EFAULT;
return 0;
}
int spec_sync_wait_bind_array(struct dma_fence_array *fence_array, u32 timeout_ms)
{
int ret;
/* Check if fence-array is a speculative fence */
if (!fence_array || !test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence_array->base.flags)) {
pr_err("invalid fence!\n");
return -EINVAL;
} else if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags)) {
/* This fence-array is already bound, just return success */
return 0;
}
/* Wait for the fence-array bind */
ret = wait_event_timeout(sync_dev.wait_queue,
test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags),
msecs_to_jiffies(timeout_ms));
if (!ret) {
pr_err("timed out waiting for bind fence-array %d\n", timeout_ms);
ret = -ETIMEDOUT;
} else {
ret = 0;
}
return ret;
}
EXPORT_SYMBOL(spec_sync_wait_bind_array);
static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info)
{
struct dma_fence_array *fence_array;
struct dma_fence *fence = NULL;
struct dma_fence *user_fence = NULL;
struct dma_fence **fence_list;
int *user_fds, ret = 0, i;
u32 num_fences, counter;
fence = sync_file_get_fence(sync_bind_info->out_bind_fd);
if (!fence) {
pr_err("dma fence failure out_fd:%d\n", sync_bind_info->out_bind_fd);
return -EINVAL;
}
fence_array = container_of(fence, struct dma_fence_array, base);
if (!sanitize_fence_array(fence_array)) {
pr_err("spec fence not found in the registered list out_fd:%d\n",
sync_bind_info->out_bind_fd);
ret = -EINVAL;
goto end;
}
if (fence_array->fences) {
pr_err("fence array already populated, spec fd:%d status:%d flags:0x%x\n",
sync_bind_info->out_bind_fd, dma_fence_get_status(fence), fence->flags);
goto end;
}
num_fences = fence_array->num_fences;
counter = num_fences;
user_fds = kzalloc(num_fences * (sizeof(int)), GFP_KERNEL);
if (!user_fds) {
ret = -ENOMEM;
goto end;
}
fence_list = kmalloc_array(num_fences, sizeof(void *), GFP_KERNEL|__GFP_ZERO);
if (!fence_list) {
ret = -ENOMEM;
goto out;
}
if (copy_from_user(user_fds, (void __user *)sync_bind_info->fds,
num_fences * sizeof(int))) {
kfree(fence_list);
ret = -EFAULT;
goto out;
}
spin_lock(fence->lock);
fence_array->fences = fence_list;
for (i = 0; i < num_fences; i++) {
user_fence = sync_file_get_fence(user_fds[i]);
if (!user_fence) {
pr_warn("bind fences are invalid !! user_fd:%d out_bind_fd:%d\n",
user_fds[i], sync_bind_info->out_bind_fd);
counter = i;
ret = -EINVAL;
goto bind_invalid;
}
fence_array->fences[i] = user_fence;
pr_debug("spec fd:%d i:%d bind fd:%d error:%d\n", sync_bind_info->out_bind_fd,
i, user_fds[i], fence_array->fences[i]->error);
}
clear_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
spin_unlock(fence->lock);
dma_fence_enable_sw_signaling(&fence_array->base);
clear_fence_array_tracker(false);
bind_invalid:
set_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags);
wake_up_all(&sync_dev.wait_queue);
if (ret) {
for (i = counter - 1; i >= 0; i--)
dma_fence_put(fence_array->fences[i]);
kfree(fence_list);
fence_array->fences = NULL;
fence_array->num_fences = 0;
dma_fence_set_error(fence, -EINVAL);
spin_unlock(fence->lock);
dma_fence_signal(fence);
clear_fence_array_tracker(false);
}
out:
kfree(user_fds);
end:
dma_fence_put(fence);
return ret;
}
static int spec_sync_ioctl_bind(struct sync_device *obj, unsigned long __user arg)
{
struct fence_bind_data sync_bind_info;
if (copy_from_user(&sync_bind_info, (void __user *)arg, sizeof(struct fence_bind_data)))
return -EFAULT;
if (sync_bind_info.out_bind_fd < 0) {
pr_err("Invalid out_fd:%d\n", sync_bind_info.out_bind_fd);
return -EINVAL;
}
return spec_sync_bind_array(&sync_bind_info);
}
static long spec_sync_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct sync_device *obj = file->private_data;
int ret = 0;
switch (cmd) {
case SPEC_SYNC_IOC_CREATE_FENCE:
ret = spec_sync_ioctl_create_fence(obj, arg);
break;
case SPEC_SYNC_IOC_BIND:
ret = spec_sync_ioctl_bind(obj, arg);
break;
case SPEC_SYNC_IOC_GET_VER:
ret = spec_sync_ioctl_get_ver(obj, arg);
break;
default:
ret = -ENOTTY;
}
return ret;
}
const struct file_operations spec_sync_fops = {
.owner = THIS_MODULE,
.open = spec_sync_open,
.release = spec_sync_release,
.unlocked_ioctl = spec_sync_ioctl,
};
static int spec_sync_register_device(void)
{
int ret;
sync_dev.dev_class = class_create(THIS_MODULE, CLASS_NAME);
if (sync_dev.dev_class == NULL) {
pr_err("%s: class_create fail.\n", __func__);
goto res_err;
}
ret = alloc_chrdev_region(&sync_dev.dev_num, 0, 1, DRV_NAME);
if (ret) {
pr_err("%s: alloc_chrdev_region fail.\n", __func__);
goto alloc_chrdev_region_err;
}
sync_dev.dev = device_create(sync_dev.dev_class, NULL,
sync_dev.dev_num,
&sync_dev, DRV_NAME);
if (IS_ERR(sync_dev.dev)) {
pr_err("%s: device_create fail.\n", __func__);
goto device_create_err;
}
sync_dev.cdev = cdev_alloc();
if (sync_dev.cdev == NULL) {
pr_err("%s: cdev_alloc fail.\n", __func__);
goto cdev_alloc_err;
}
cdev_init(sync_dev.cdev, &spec_sync_fops);
sync_dev.cdev->owner = THIS_MODULE;
ret = cdev_add(sync_dev.cdev, sync_dev.dev_num, 1);
if (ret) {
pr_err("%s: cdev_add fail.\n", __func__);
goto cdev_add_err;
}
sync_dev.version = DRV_VERSION;
mutex_init(&sync_dev.lock);
mutex_init(&sync_dev.l_lock);
INIT_LIST_HEAD(&sync_dev.fence_array_list);
init_waitqueue_head(&sync_dev.wait_queue);
return 0;
cdev_add_err:
cdev_del(sync_dev.cdev);
cdev_alloc_err:
device_destroy(sync_dev.dev_class, sync_dev.dev_num);
device_create_err:
unregister_chrdev_region(sync_dev.dev_num, 1);
alloc_chrdev_region_err:
class_destroy(sync_dev.dev_class);
res_err:
return -ENODEV;
}
static int __init spec_sync_init(void)
{
int ret = 0;
ret = spec_sync_register_device();
if (ret) {
pr_err("%s: speculative sync driver register fail.\n", __func__);
return ret;
}
return ret;
}
static void __exit spec_sync_deinit(void)
{
cdev_del(sync_dev.cdev);
device_destroy(sync_dev.dev_class, sync_dev.dev_num);
unregister_chrdev_region(sync_dev.dev_num, 1);
class_destroy(sync_dev.dev_class);
}
module_init(spec_sync_init);
module_exit(spec_sync_deinit);
MODULE_DESCRIPTION("QCOM Speculative Sync Driver");
MODULE_LICENSE("GPL v2");