Browse Source

Merge 2f76940f774c193f8cf52ea69ce0897f28d46fe5 on remote branch

Change-Id: If37bd7755f85e3cc1c32cf855786c84e7057a3c7
Linux Build Service Account 1 year ago
parent
commit
f8e1fb5877

+ 36 - 0
BUILD.bazel

@@ -0,0 +1,36 @@
+load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
+
+package(
+    default_visibility = [
+      "//visibility:public"],
+)
+
+ddk_headers(
+    name = "mm_drivers_configs",
+    hdrs  = glob([
+      "config/*.h"]),
+    includes = ["config"]
+)
+
+ddk_headers(
+    name = "hw_fence_headers",
+    hdrs = glob([
+      "hw_fence/include/*.h"]),
+    includes = ["hw_fence/include"]
+)
+
+ddk_headers(
+    name = "sync_fence_uapi_headers",
+    hdrs = glob([
+      "sync_fence/include/uapi/sync_fence/*.h",
+      "sync_fence/include/*.h"]),
+    includes = ["sync_fence/include"]
+)
+
+ddk_headers(
+    name = "mm_drivers_headers",
+    hdrs = [":mm_drivers_configs",  ":hw_fence_headers", ":sync_fence_uapi_headers"]
+)
+
+load(":target.bzl", "define_pineapple")
+define_pineapple()

+ 8 - 5
hw_fence/Kbuild

@@ -3,9 +3,7 @@
 KDIR := $(TOP)/kernel_platform/msm-kernel
 include $(MSM_HW_FENCE_ROOT)/config/kalamammdrivers.conf
 LINUXINCLUDE += -include $(MSM_HW_FENCE_ROOT)/config/kalamammdriversconf.h \
-		-I$(MSM_HW_FENCE_ROOT)hw_fence/include/ \
-		-I$(MSM_HW_FENCE_ROOT)/../synx-kernel/msm/synx/ \
-		-I$(MSM_HW_FENCE_ROOT)/../synx-kernel/include/uapi/synx/media/
+		-I$(MSM_HW_FENCE_ROOT)hw_fence/include/
 
 ifdef CONFIG_QTI_HW_FENCE
 obj-m += msm_hw_fence.o
@@ -14,8 +12,13 @@ msm_hw_fence-y := src/msm_hw_fence.o \
 		src/hw_fence_drv_priv.o \
 		src/hw_fence_drv_utils.o \
 		src/hw_fence_drv_debug.o \
-		src/hw_fence_drv_ipc.o \
-		src/msm_hw_fence_synx_translation.o
+		src/hw_fence_drv_ipc.o
+
+ifneq ($(CONFIG_ARCH_KALAMA), y)
+LINUXINCLUDE += -I$(MSM_HW_FENCE_ROOT)/../synx-kernel/msm/synx/ \
+		-I$(MSM_HW_FENCE_ROOT)/../synx-kernel/include/uapi/synx/media/
+msm_hw_fence-y += src/msm_hw_fence_synx_translation.o
+endif
 
 msm_hw_fence-$(CONFIG_DEBUG_FS) += src/hw_fence_ioctl.o
 

+ 1 - 7
hw_fence/include/hw_fence_drv_ipc.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef __HW_FENCE_DRV_IPC_H
@@ -36,14 +36,10 @@
 #define HW_FENCE_IPC_CLIENT_ID_IFE6_PID 17
 #define HW_FENCE_IPC_CLIENT_ID_IFE7_PID 18
 
-#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA 2
-#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO 1
 #define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KALAMA 2
 #define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_PINEAPPLE 2
 #define HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE 4
 
-#define HW_FENCE_IPCC_HW_REV_100 0x00010000  /* Lahaina */
-#define HW_FENCE_IPCC_HW_REV_110 0x00010100  /* Waipio */
 #define HW_FENCE_IPCC_HW_REV_170 0x00010700  /* Kalama */
 #define HW_FENCE_IPCC_HW_REV_203 0x00020003  /* Pineapple */
 
@@ -73,7 +69,6 @@ void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data,
  */
 int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data);
 
-#ifdef HW_DPU_IPCC
 /**
  * hw_fence_ipcc_enable_dpu_signaling() - Enable ipcc signaling for dpu client.
  * @drv_data: driver data.
@@ -81,7 +76,6 @@ int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data);
  * Return: 0 on success or negative errno (-EINVAL)
  */
 int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data);
-#endif /* HW_DPU_IPCC */
 
 /**
  * hw_fence_ipcc_get_client_virt_id() - Returns the ipc client virtual id that corresponds to the

+ 73 - 62
hw_fence/include/hw_fence_drv_priv.h

@@ -14,9 +14,6 @@
 #include <linux/dma-fence-array.h>
 #include <linux/slab.h>
 
-/* Add define only for platforms that support IPCC in dpu-hw */
-#define HW_DPU_IPCC 1
-
 /* max u64 to indicate invalid fence */
 #define HW_FENCE_INVALID_PARENT_FENCE (~0ULL)
 
@@ -75,6 +72,12 @@
  */
 #define HW_FENCE_PAYLOAD_REV(major, minor) (major << 8 | (minor & 0xFF))
 
+/**
+ * HW_FENCE_EVENT_MAX_DATA:
+ * Maximum data that can be added to the debug event
+ */
+#define HW_FENCE_EVENT_MAX_DATA 12
+
 enum hw_fence_lookup_ops {
 	HW_FENCE_LOOKUP_OP_CREATE = 0x1,
 	HW_FENCE_LOOKUP_OP_DESTROY,
@@ -82,45 +85,6 @@ enum hw_fence_lookup_ops {
 	HW_FENCE_LOOKUP_OP_FIND_FENCE
 };
 
-/**
- * enum hw_fence_loopback_id - Enum with the clients having a loopback signal (i.e AP to AP signal).
- * HW_FENCE_LOOPBACK_DPU_CTL_0: dpu client 0. Used in platforms with no dpu-ipc.
- * HW_FENCE_LOOPBACK_DPU_CTL_1: dpu client 1. Used in platforms with no dpu-ipc.
- * HW_FENCE_LOOPBACK_DPU_CTL_2: dpu client 2. Used in platforms with no dpu-ipc.
- * HW_FENCE_LOOPBACK_DPU_CTL_3: dpu client 3. Used in platforms with no dpu-ipc.
- * HW_FENCE_LOOPBACK_DPU_CTL_4: dpu client 4. Used in platforms with no dpu-ipc.
- * HW_FENCE_LOOPBACK_DPU_CTL_5: dpu client 5. Used in platforms with no dpu-ipc.
- * HW_FENCE_LOOPBACK_DPU_CTX_0: gfx client 0. Used in platforms with no gmu support.
- * HW_FENCE_LOOPBACK_VAL_0: debug validation client 0.
- * HW_FENCE_LOOPBACK_VAL_1: debug validation client 1.
- * HW_FENCE_LOOPBACK_VAL_2: debug validation client 2.
- * HW_FENCE_LOOPBACK_VAL_3: debug validation client 3.
- * HW_FENCE_LOOPBACK_VAL_4: debug validation client 4.
- * HW_FENCE_LOOPBACK_VAL_5: debug validation client 5.
- * HW_FENCE_LOOPBACK_VAL_6: debug validation client 6.
- */
-enum hw_fence_loopback_id {
-	HW_FENCE_LOOPBACK_DPU_CTL_0,
-	HW_FENCE_LOOPBACK_DPU_CTL_1,
-	HW_FENCE_LOOPBACK_DPU_CTL_2,
-	HW_FENCE_LOOPBACK_DPU_CTL_3,
-	HW_FENCE_LOOPBACK_DPU_CTL_4,
-	HW_FENCE_LOOPBACK_DPU_CTL_5,
-	HW_FENCE_LOOPBACK_GFX_CTX_0,
-#if IS_ENABLED(CONFIG_DEBUG_FS)
-	HW_FENCE_LOOPBACK_VAL_0 = HW_FENCE_CLIENT_ID_VAL0,
-	HW_FENCE_LOOPBACK_VAL_1,
-	HW_FENCE_LOOPBACK_VAL_2,
-	HW_FENCE_LOOPBACK_VAL_3,
-	HW_FENCE_LOOPBACK_VAL_4,
-	HW_FENCE_LOOPBACK_VAL_5,
-	HW_FENCE_LOOPBACK_VAL_6,
-#endif /* CONFIG_DEBUG_FS */
-	HW_FENCE_LOOPBACK_MAX,
-};
-
-#define HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS (HW_FENCE_LOOPBACK_DPU_CTL_5 + 1)
-
 /**
  * enum hw_fence_client_data_id - Enum with the clients having client_data, an optional
  *                                parameter passed from the waiting client and returned
@@ -150,12 +114,19 @@ enum hw_fence_client_data_id {
  * @q_size_bytes: size of the queue
  * @va_header: pointer to the hfi header virtual address
  * @pa_queue: physical address of the queue
+ * @rd_wr_idx_start: start read and write indexes for client queue (zero by default)
+ * @rd_wr_idx_factor: factor to multiply custom index to get index in dwords (one by default)
+ * @skip_wr_idx: bool to indicate if update to write_index is skipped within hw fence driver and
+ *               hfi_header->tx_wm is updated instead
  */
 struct msm_hw_fence_queue {
 	void *va_queue;
 	u32 q_size_bytes;
 	void *va_header;
 	phys_addr_t pa_queue;
+	u32 rd_wr_idx_start;
+	u32 rd_wr_idx_factor;
+	bool skip_wr_idx;
 };
 
 /**
@@ -172,13 +143,12 @@ enum payload_type {
  *                 number of sub-clients (e.g. ife clients)
  * @mem_descriptor: hfi header memory descriptor
  * @queues: queues descriptor
+ * @queues_num: number of client queues
  * @ipc_signal_id: id of the signal to be triggered for this client
  * @ipc_client_vid: virtual id of the ipc client for this hw fence driver client
  * @ipc_client_pid: physical id of the ipc client for this hw fence driver client
  * @update_rxq: bool to indicate if client uses rx-queue
  * @send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences
- * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence
- *                   driver and hfi_header->tx_wm is updated instead
  * @wait_queue: wait queue for the validation clients
  * @val_signal: doorbell flag to signal the validation clients in the wait queue
  */
@@ -187,12 +157,12 @@ struct msm_hw_fence_client {
 	enum hw_fence_client_id client_id_ext;
 	struct msm_hw_fence_mem_addr mem_descriptor;
 	struct msm_hw_fence_queue queues[HW_FENCE_CLIENT_QUEUES];
+	int queues_num;
 	int ipc_signal_id;
 	int ipc_client_vid;
 	int ipc_client_pid;
 	bool update_rxq;
 	bool send_ipc;
-	bool skip_txq_wr_idx;
 #if IS_ENABLED(CONFIG_DEBUG_FS)
 	wait_queue_head_t wait_queue;
 	atomic_t val_signal;
@@ -239,24 +209,52 @@ struct msm_hw_fence_dbg_data {
 };
 
 /**
- * struct hw_fence_client_queue_size_desc - Structure holding client queue properties for a client.
+ * struct hw_fence_client_type_desc - Structure holding client type properties, including static
+ *                                    properties and client queue properties read from device-tree.
  *
- * @queues_num: number of client queues
- * @queue_entries: number of queue entries per client queue
- * @mem_size: size of memory allocated for client queues
- * @start_offset: start offset of client queue memory region, from beginning of carved-out memory
- *                allocation for hw fence driver
+ * @name: name of client type, used to parse properties from device-tree
+ * @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g.
+ *           HW_FENCE_CLIENT_ID_CTL0 for DPU clients
+ * @max_clients_num: maximum number of clients of given client type
+ * @clients_num: number of clients of given client type
+ * @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or
+ *              two (for both Tx and Rx Queues)
+ * @queue_entries: number of entries per client queue of given client type
+ * @start_padding: size of padding between queue table header and first queue header in bytes
+ * @end_padding: size of padding between queue header(s) and first queue payload in bytes
+ * @mem_size: size of memory allocated for client queue(s) per client in bytes
+ * @txq_idx_start: start read and write indexes for client tx queue (zero by default)
+ * @txq_idx_factor: factor to multiply custom TxQ idx to get index in dwords (one by default)
  * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence
  *                   driver and hfi_header->tx_wm is updated instead
  */
-struct hw_fence_client_queue_size_desc {
+struct hw_fence_client_type_desc {
+	char *name;
+	enum hw_fence_client_id init_id;
+	u32 max_clients_num;
+	u32 clients_num;
 	u32 queues_num;
 	u32 queue_entries;
+	u32 start_padding;
+	u32 end_padding;
 	u32 mem_size;
-	u32 start_offset;
+	u32 txq_idx_start;
+	u32 txq_idx_factor;
 	bool skip_txq_wr_idx;
 };
 
+/**
+ * struct hw_fence_client_queue_desc - Structure holding client queue properties for a client.
+ *
+ * @type: pointer to client queue properties of client type
+ * @start_offset: start offset of client queue memory region, from beginning of carved-out memory
+ *                allocation for hw fence driver
+ */
+struct hw_fence_client_queue_desc {
+	struct hw_fence_client_type_desc *type;
+	u32 start_offset;
+};
+
 /**
  * struct hw_fence_driver_data - Structure holding internal hw-fence driver data
  *
@@ -268,10 +266,13 @@ struct hw_fence_client_queue_size_desc {
  * @hw_fence_ctrl_queue_size: size of the ctrl queue for the payload
  * @hw_fence_mem_ctrl_queues_size: total size of ctrl queues, including: header + rxq + txq
  * @hw_fence_client_queue_size: descriptors of client queue properties for each hw fence client
+ * @hw_fence_client_types: descriptors of properties for each hw fence client type
  * @rxq_clients_num: number of supported hw fence clients with rxq (configured based on device-tree)
  * @clients_num: number of supported hw fence clients (configured based on device-tree)
  * @hw_fences_tbl: pointer to the hw-fences table
  * @hw_fences_tbl_cnt: number of elements in the hw-fence table
+ * @events: start address of hw fence debug events
+ * @total_events: total number of hw fence debug events supported
  * @client_lock_tbl: pointer to the per-client locks table
  * @client_lock_tbl_cnt: number of elements in the locks table
  * @hw_fences_mem_desc: memory descriptor for the hw-fence table
@@ -299,8 +300,6 @@ struct hw_fence_client_queue_size_desc {
  * @qtime_reg_base: qtimer register base address
  * @qtime_io_mem: qtimer io mem map
  * @qtime_size: qtimer io mem map size
- * @ctl_start_ptr: pointer to the ctl_start registers of the display hw (platforms with no dpu-ipc)
- * @ctl_start_size: size of the ctl_start registers of the display hw (platforms with no dpu-ipc)
  * @client_id_mask: bitmask for tracking registered client_ids
  * @clients_register_lock: lock to synchronize clients registration and deregistration
  * @clients: table with the handles of the registered clients; size is equal to clients_num
@@ -320,7 +319,7 @@ struct hw_fence_driver_data {
 	u32 hw_fence_ctrl_queue_size;
 	u32 hw_fence_mem_ctrl_queues_size;
 	/* client queues */
-	struct hw_fence_client_queue_size_desc *hw_fence_client_queue_size;
+	struct hw_fence_client_queue_desc *hw_fence_client_queue_size;
 	struct hw_fence_client_type_desc *hw_fence_client_types;
 	u32 rxq_clients_num;
 	u32 clients_num;
@@ -329,6 +328,10 @@ struct hw_fence_driver_data {
 	struct msm_hw_fence *hw_fences_tbl;
 	u32 hw_fences_tbl_cnt;
 
+	/* events */
+	struct msm_hw_fence_event *events;
+	u32 total_events;
+
 	/* Table with a Per-Client Lock */
 	u64 *client_lock_tbl;
 	u32 client_lock_tbl_cnt;
@@ -374,10 +377,6 @@ struct hw_fence_driver_data {
 	void __iomem *qtime_io_mem;
 	uint32_t qtime_size;
 
-	/* base address for dpu ctl start regs */
-	void *ctl_start_ptr[HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS];
-	uint32_t ctl_start_size[HW_FENCE_MAX_DPU_LOOPBACK_CLIENTS];
-
 	/* synchronize client_ids registration and deregistration */
 	struct mutex clients_register_lock;
 
@@ -385,10 +384,8 @@ struct hw_fence_driver_data {
 	struct msm_hw_fence_client **clients;
 
 	bool vm_ready;
-#ifdef HW_DPU_IPCC
 	/* state variables */
 	bool ipcc_dpu_initialized;
-#endif /* HW_DPU_IPCC */
 };
 
 /**
@@ -423,6 +420,20 @@ struct msm_hw_fence_queue_payload {
 	u32 reserve;
 };
 
+/**
+ * struct msm_hw_fence_event - hardware fence ctl debug event
+ * time: qtime when the event is logged
+ * cpu: cpu id where the event is logged
+ * data_cnt: count of valid data available in the data field
+ * data: debug data logged by the event
+ */
+struct msm_hw_fence_event {
+	u64 time;
+	u32 cpu;
+	u32 data_cnt;
+	u32 data[HW_FENCE_EVENT_MAX_DATA];
+};
+
 /**
  * struct msm_hw_fence - structure holding each hw fence data.
  * @valid: field updated when a hw-fence is reserved. True if hw-fence is in use

+ 6 - 43
hw_fence/include/hw_fence_drv_utils.h

@@ -30,39 +30,14 @@
  * HW_FENCE_MEM_RESERVE_LOCKS_REGION: Reserve memory for the per-client locks memory region.
  * HW_FENCE_MEM_RESERVE_TABLE: Reserve memory for the hw-fences global table.
  * HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: Reserve memory per-client for the rx/tx queues.
+ * HW_FENCE_MEM_RESERVE_EVENTS_BUFF: Reserve memory for the debug events
  */
 enum hw_fence_mem_reserve {
 	HW_FENCE_MEM_RESERVE_CTRL_QUEUE,
 	HW_FENCE_MEM_RESERVE_LOCKS_REGION,
 	HW_FENCE_MEM_RESERVE_TABLE,
-	HW_FENCE_MEM_RESERVE_CLIENT_QUEUE
-};
-
-/**
- * struct hw_fence_client_type_desc - Structure holding client type properties, including static
- *                                    properties and client queue properties read from device-tree.
- *
- * @name: name of client type, used to parse properties from device-tree
- * @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g.
- *           HW_FENCE_CLIENT_ID_CTL0 for DPU clients
- * @max_clients_num: maximum number of clients of given client type
- * @clients_num: number of clients of given client type
- * @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or
- *              two (for both Tx and Rx Queues)
- * @queue_entries: number of entries per client queue of given client type
- * @mem_size: size of memory allocated for client queue(s) per client
- * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence
- *                   driver and hfi_header->tx_wm is updated instead
- */
-struct hw_fence_client_type_desc {
-	char *name;
-	enum hw_fence_client_id init_id;
-	u32 max_clients_num;
-	u32 clients_num;
-	u32 queues_num;
-	u32 queue_entries;
-	u32 mem_size;
-	bool skip_txq_wr_idx;
+	HW_FENCE_MEM_RESERVE_CLIENT_QUEUE,
+	HW_FENCE_MEM_RESERVE_EVENTS_BUFF
 };
 
 /**
@@ -133,15 +108,6 @@ int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data);
  */
 int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data);
 
-/**
- * hw_fence_utils_map_ctl_start() -  Maps ctl_start registers from dpu hw
- * @drv_data: hw fence driver data
- *
- * Returns zero if success, otherwise returns negative error code. This API is only used
- * for simulation purposes in platforms where dpu does not support ipc signal.
- */
-int hw_fence_utils_map_ctl_start(struct hw_fence_driver_data *drv_data);
-
 /**
  * hw_fence_utils_cleanup_fence() -  Cleanup the hw-fence from a specified client
  * @drv_data: hw fence driver data
@@ -174,16 +140,13 @@ enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver
 	enum hw_fence_client_id client_id);
 
 /**
- * hw_fence_utils_skips_txq_wr_index() - Returns bool to indicate if client Tx Queue write_index
- *                                       is not updated in hw fence driver. Instead,
- *                                       hfi_header->tx_wm tracks where payload is written within
- *                                       the queue.
+ * hw_fence_utils_get_queues_num() - Returns number of client queues for the client_id.
  *
  * @drv_data: driver data
  * @client_id: hw fence driver client id
  *
- * Returns: true if hw fence driver skips update to client tx queue write_index, false otherwise
+ * Returns: number of client queues
  */
-bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id);
+int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id);
 
 #endif /* __HW_FENCE_DRV_UTILS_H */

+ 137 - 6
hw_fence/src/hw_fence_drv_debug.c

@@ -14,6 +14,11 @@
 
 #define HW_FENCE_DEBUG_MAX_LOOPS 200
 
+/* event dump data includes one "32-bit" element + "|" separator */
+#define HW_FENCE_MAX_DATA_PER_EVENT_DUMP (HW_FENCE_EVENT_MAX_DATA * 9)
+
+#define HFENCE_EVT_MSG "[%d][cpu:%d][%lu] data[%d]:%s\n"
+
 u32 msm_hw_fence_debug_level = HW_FENCE_PRINTK;
 
 /**
@@ -112,7 +117,6 @@ static ssize_t hw_fence_dbg_ipcc_write(struct file *file, const char __user *use
 		drv_data->ipcc_client_vid);
 }
 
-#ifdef HW_DPU_IPCC
 /**
  * hw_fence_dbg_ipcc_dpu_write() - debugfs write to trigger an ipcc irq to dpu core.
  * @file: file handler.
@@ -137,7 +141,6 @@ static const struct file_operations hw_fence_dbg_ipcc_dpu_fops = {
 	.open = simple_open,
 	.write = hw_fence_dbg_ipcc_dpu_write,
 };
-#endif /* HW_DPU_IPCC */
 
 static const struct file_operations hw_fence_dbg_ipcc_fops = {
 	.open = simple_open,
@@ -542,6 +545,129 @@ static int dump_full_table(struct hw_fence_driver_data *drv_data, char *buf, u32
 	return len;
 }
 
+static inline int _dump_event(struct msm_hw_fence_event *event, char *buf, int len, int max_size,
+	u32 index)
+{
+	char data[HW_FENCE_MAX_DATA_PER_EVENT_DUMP];
+	u32 data_cnt;
+	int i, tmp_len = 0, ret = 0;
+
+	if (!event->time)
+		return 0;
+
+	memset(&data, 0, sizeof(data));
+	if (event->data_cnt > HW_FENCE_EVENT_MAX_DATA) {
+		HWFNC_ERR("event[%d] has invalid data_cnt:%lu greater than max_data_cnt:%lu\n",
+			index, event->data_cnt, HW_FENCE_EVENT_MAX_DATA);
+		data_cnt = HW_FENCE_EVENT_MAX_DATA;
+	} else {
+		data_cnt = event->data_cnt;
+	}
+
+	for (i = 0; i < data_cnt; i++)
+		tmp_len += scnprintf(data + tmp_len, HW_FENCE_MAX_DATA_PER_EVENT_DUMP - tmp_len,
+			"%lx|", event->data[i]);
+
+	ret = scnprintf(buf + len, max_size - len, HFENCE_EVT_MSG, index, event->cpu, event->time,
+		event->data_cnt, data);
+
+	HWFNC_DBG_INFO(HFENCE_EVT_MSG, index, event->cpu, event->time, event->data_cnt, data);
+
+	return ret;
+}
+
+/**
+ * hw_fence_dbg_dump_events_rd() - debugfs read to dump the fctl events.
+ * @file: file handler.
+ * @user_buf: user buffer content for debugfs.
+ * @user_buf_size: size of the user buffer.
+ * @ppos: position offset of the user buffer.
+ */
+static ssize_t hw_fence_dbg_dump_events_rd(struct file *file, char __user *user_buf,
+	size_t user_buf_size, loff_t *ppos)
+{
+	struct hw_fence_driver_data *drv_data;
+	u32 entry_size = sizeof(struct msm_hw_fence_event), max_size = SZ_4K;
+	char *buf = NULL;
+	int len = 0;
+	static u64 start_time;
+	static int index, start_index;
+	static bool wraparound;
+
+	if (!file || !file->private_data) {
+		HWFNC_ERR("unexpected data %d\n", file);
+		return -EINVAL;
+	}
+	drv_data = file->private_data;
+
+	if (!drv_data->events) {
+		HWFNC_ERR("events not supported\n");
+		return -EINVAL;
+	}
+
+	if (wraparound && index >= start_index) {
+		HWFNC_DBG_H("no more data index:%d total_events:%d\n", index,
+			drv_data->total_events);
+		start_time = 0;
+		index = 0;
+		wraparound = false;
+		return 0;
+	}
+
+	if (user_buf_size < entry_size) {
+		HWFNC_ERR("Not enough buff size:%d to dump entries:%d\n", user_buf_size,
+			entry_size);
+		return -EINVAL;
+	}
+
+	buf = kzalloc(max_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* find index of earliest event */
+	if (!start_time) {
+		mb(); /* make sure data is ready before read */
+		for (index = 0; index < drv_data->total_events; index++) {
+			u64 time = drv_data->events[index].time;
+
+			if (time && (!start_time || time < start_time)) {
+				start_time = time;
+				start_index = index;
+			}
+		}
+		index = start_index;
+		HWFNC_DBG_H("events:0x%pK start_index:%d start_time:%llu total_events:%d\n",
+			drv_data->events, start_index, start_time, drv_data->total_events);
+	}
+
+	HWFNC_DBG_H("++ dump_events index:%d qtime:%llu\n", index, hw_fence_get_qtime(drv_data));
+	while ((!wraparound || index < start_index) && len < (max_size - entry_size)) {
+		len += _dump_event(&drv_data->events[index], buf, len, max_size, index);
+		index++;
+		if (index >= drv_data->total_events) {
+			index = 0;
+			wraparound = true;
+		}
+	}
+	HWFNC_DBG_H("-- dump_events: index:%d qtime:%llu\n", index, hw_fence_get_qtime(drv_data));
+
+	if (len <= 0 || len > user_buf_size) {
+		HWFNC_ERR("len:%d invalid buff size:%d\n", len, user_buf_size);
+		len = 0;
+		goto exit;
+	}
+
+	if (copy_to_user(user_buf, buf, len)) {
+		HWFNC_ERR("failed to copy to user!\n");
+		len = -EFAULT;
+		goto exit;
+	}
+	*ppos += len;
+exit:
+	kfree(buf);
+	return len;
+}
+
 /**
  * hw_fence_dbg_dump_queues_wr() - debugfs wr to dump the hw-fences queues.
  * @file: file handler.
@@ -897,9 +1023,9 @@ int process_validation_client_loopback(struct hw_fence_driver_data *drv_data,
 {
 	struct msm_hw_fence_client *hw_fence_client;
 
-	if (client_id < HW_FENCE_LOOPBACK_VAL_0 || client_id > HW_FENCE_LOOPBACK_VAL_6) {
+	if (client_id < HW_FENCE_CLIENT_ID_VAL0 || client_id > HW_FENCE_CLIENT_ID_VAL6) {
 		HWFNC_ERR("invalid client_id: %d min: %d max: %d\n", client_id,
-				HW_FENCE_LOOPBACK_VAL_0, HW_FENCE_LOOPBACK_VAL_6);
+				HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6);
 		return -EINVAL;
 	}
 
@@ -957,6 +1083,11 @@ static const struct file_operations hw_fence_dump_queues_fops = {
 	.write = hw_fence_dbg_dump_queues_wr,
 };
 
+static const struct file_operations hw_fence_dump_events_fops = {
+	.open = simple_open,
+	.read = hw_fence_dbg_dump_events_rd,
+};
+
 static const struct file_operations hw_fence_create_join_fence_fops = {
 	.open = simple_open,
 	.write = hw_fence_dbg_create_join_fence,
@@ -982,10 +1113,8 @@ int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data)
 
 	debugfs_create_file("ipc_trigger", 0600, debugfs_root, drv_data,
 		&hw_fence_dbg_ipcc_fops);
-#ifdef HW_DPU_IPCC
 	debugfs_create_file("dpu_trigger", 0600, debugfs_root, drv_data,
 		&hw_fence_dbg_ipcc_dpu_fops);
-#endif /* HW_DPU_IPCC */
 	debugfs_create_file("hw_fence_reset_client", 0600, debugfs_root, drv_data,
 		&hw_fence_reset_client_fops);
 	debugfs_create_file("hw_fence_register_clients", 0600, debugfs_root, drv_data,
@@ -1008,6 +1137,8 @@ int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data)
 	debugfs_create_file("hw_sync", 0600, debugfs_root, NULL, &hw_sync_debugfs_fops);
 	debugfs_create_u64("hw_fence_lock_wake_cnt", 0600, debugfs_root,
 		&drv_data->debugfs_data.lock_wake_cnt);
+	debugfs_create_file("hw_fence_dump_events", 0600, debugfs_root, drv_data,
+		&hw_fence_dump_events_fops);
 
 	return 0;
 }

+ 2 - 59
hw_fence/src/hw_fence_drv_ipc.c

@@ -32,37 +32,6 @@ struct hw_fence_client_ipc_map {
 	bool send_ipc;
 };
 
-/**
- * struct hw_fence_clients_ipc_map_no_dpu - Table makes the 'client to signal' mapping, which
- *		is used by the hw fence driver to trigger ipc signal when the hw fence is already
- *		signaled.
- *		This no_dpu version is for targets that do not support dpu client id
- *
- * Notes:
- * The index of this struct must match the enum hw_fence_client_id.
- * To change to a loopback signal instead of GMU, change ctx0 row to use:
- *   {HW_FENCE_IPC_CLIENT_ID_APPS, 20}.
- */
-struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_IPC_MAP_MAX] = {
-	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true},/* ctrlq*/
-	{HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, false, false},/* ctx0 */
-	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 14, false, true},/*ctl0*/
-	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 15, false, true},/*ctl1*/
-	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 16, false, true},/*ctl2*/
-	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 17, false, true},/*ctl3*/
-	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 18, false, true},/*ctl4*/
-	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 19, false, true},/*ctl5*/
-#if IS_ENABLED(CONFIG_DEBUG_FS)
-	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, false},/*val0*/
-	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, false},/*val1*/
-	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, false},/*val2*/
-	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, false},/*val3*/
-	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, false},/*val4*/
-	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, false},/*val5*/
-	{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, false},/*val6*/
-#endif /* CONFIG_DEBUG_FS */
-};
-
 /**
  * struct hw_fence_clients_ipc_map - Table makes the 'client to signal' mapping, which is
  *		used by the hw fence driver to trigger ipc signal when hw fence is already
@@ -337,20 +306,6 @@ static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32
 	int ret = 0;
 
 	switch (hwrev) {
-	case HW_FENCE_IPCC_HW_REV_100:
-		drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
-		drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
-		drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA;
-		drv_data->ipc_clients_table = hw_fence_clients_ipc_map_no_dpu;
-		HWFNC_DBG_INIT("ipcc protocol_id: Lahaina\n");
-		break;
-	case HW_FENCE_IPCC_HW_REV_110:
-		drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
-		drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
-		drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO;
-		drv_data->ipc_clients_table = hw_fence_clients_ipc_map_no_dpu;
-		HWFNC_DBG_INIT("ipcc protocol_id: Waipio\n");
-		break;
 	case HW_FENCE_IPCC_HW_REV_170:
 		drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
 		drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
@@ -381,20 +336,10 @@ int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data)
 
 	HWFNC_DBG_H("enable ipc +\n");
 
-	/**
-	 * Attempt to read the ipc version from dt, if not available, then attempt
-	 * to read from the registers.
-	 */
 	ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-ipc-ver", &val);
 	if (ret || !val) {
-		/* if no device tree prop, attempt to get the version from the registers*/
-		HWFNC_DBG_H("missing hw fences ipc-ver entry or invalid ret:%d val:%d\n", ret, val);
-
-		/* Read IPC Version from Client=0x8 (apps) for protocol=2 (compute_l1) */
-		val = readl_relaxed(IPC_PROTOCOLp_CLIENTc_VERSION(drv_data->ipcc_io_mem,
-			HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA,
-			HW_FENCE_IPC_CLIENT_ID_APPS_VID));
-		HWFNC_DBG_INIT("ipcc version:0x%x\n", val);
+		HWFNC_ERR("missing hw fences ipc-ver entry or invalid ret:%d val:%d\n", ret, val);
+		return -EINVAL;
 	}
 
 	if (_hw_fence_ipcc_hwrev_init(drv_data, val)) {
@@ -421,7 +366,6 @@ int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data)
 	return 0;
 }
 
-#ifdef HW_DPU_IPCC
 int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data)
 {
 	struct hw_fence_client_ipc_map *hw_fence_client;
@@ -482,4 +426,3 @@ int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data)
 
 	return 0;
 }
-#endif /* HW_DPU_IPCC */

+ 126 - 33
hw_fence/src/hw_fence_drv_priv.c

@@ -15,6 +15,8 @@
 /* Global atomic lock */
 #define GLOBAL_ATOMIC_STORE(drv_data, lock, val) global_atomic_store(drv_data, lock, val)
 
+#define IS_HW_FENCE_TX_QUEUE(queue_type) ((queue_type) == HW_FENCE_TX_QUEUE - 1)
+
 inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data)
 {
 #ifdef HWFENCE_USE_SLEEP_TIMER
@@ -32,11 +34,14 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data,
 {
 	struct msm_hw_fence_hfi_queue_table_header *hfi_table_header;
 	struct msm_hw_fence_hfi_queue_header *hfi_queue_header;
+	struct hw_fence_client_type_desc *desc;
 	void *ptr, *qptr;
 	phys_addr_t phys, qphys;
-	u32 size, start_queue_offset;
+	u32 size, start_queue_offset, txq_idx_start = 0, txq_idx_factor = 1;
 	int headers_size, queue_size, payload_size;
+	int start_padding = 0, end_padding = 0;
 	int i, ret = 0;
+	bool skip_txq_wr_idx = false;
 
 	HWFNC_DBG_INIT("mem_reserve_id:%d client_id:%d\n", mem_reserve_id, client_id);
 	switch (mem_reserve_id) {
@@ -46,15 +51,23 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data,
 		payload_size = HW_FENCE_CTRL_QUEUE_PAYLOAD;
 		break;
 	case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
-		if (client_id >= drv_data->clients_num) {
-			HWFNC_ERR("Invalid client_id: %d\n", client_id);
+		if (client_id >= drv_data->clients_num ||
+				!drv_data->hw_fence_client_queue_size[client_id].type) {
+			HWFNC_ERR("Invalid client_id:%d for clients_num:%lu\n", client_id,
+				drv_data->clients_num);
 			return -EINVAL;
 		}
 
-		headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num);
-		queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD *
-			drv_data->hw_fence_client_queue_size[client_id].queue_entries;
+		desc = drv_data->hw_fence_client_queue_size[client_id].type;
+		start_padding = desc->start_padding;
+		end_padding = desc->end_padding;
+		headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num) + start_padding +
+			end_padding;
+		queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries;
 		payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD;
+		txq_idx_start = desc->txq_idx_start;
+		txq_idx_factor = desc->txq_idx_factor ? desc->txq_idx_factor : 1;
+		skip_txq_wr_idx = desc->skip_txq_wr_idx;
 		break;
 	default:
 		HWFNC_ERR("Unexpected mem reserve id: %d\n", mem_reserve_id);
@@ -75,16 +88,15 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data,
 	mem_descriptor->size = size; /* bytes */
 	mem_descriptor->mem_data = NULL; /* Currently we don't need any special info */
 
-	HWFNC_DBG_INIT("Initialize headers\n");
+	HWFNC_DBG_INIT("Initialize headers: headers_size:%d start_padding:%d end_padding:%d\n",
+		headers_size, start_padding, end_padding);
 	/* Initialize headers info within hfi memory */
 	hfi_table_header = (struct msm_hw_fence_hfi_queue_table_header *)ptr;
 	hfi_table_header->version = 0;
 	hfi_table_header->size = size; /* bytes */
 	/* Offset, from the Base Address, where the first queue header starts */
-	hfi_table_header->qhdr0_offset =
-		sizeof(struct msm_hw_fence_hfi_queue_table_header);
-	hfi_table_header->qhdr_size =
-		sizeof(struct msm_hw_fence_hfi_queue_header);
+	hfi_table_header->qhdr0_offset = HW_FENCE_HFI_TABLE_HEADER_SIZE + start_padding;
+	hfi_table_header->qhdr_size = HW_FENCE_HFI_QUEUE_HEADER_SIZE;
 	hfi_table_header->num_q = queues_num; /* number of queues */
 	hfi_table_header->num_active_q = queues_num;
 
@@ -96,7 +108,7 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data,
 	 */
 	HWFNC_DBG_INIT("Initialize queues\n");
 	hfi_queue_header = (struct msm_hw_fence_hfi_queue_header *)
-					   ((char *)ptr + HW_FENCE_HFI_TABLE_HEADER_SIZE);
+					   ((char *)ptr + hfi_table_header->qhdr0_offset);
 	for (i = 0; i < queues_num; i++) {
 		HWFNC_DBG_INIT("init queue[%d]\n", i);
 
@@ -109,7 +121,8 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data,
 		hfi_queue_header->start_addr = qphys;
 
 		/* Set the queue type (i.e. RX or TX queue) */
-		hfi_queue_header->type = (i == 0) ? HW_FENCE_TX_QUEUE : HW_FENCE_RX_QUEUE;
+		hfi_queue_header->type = IS_HW_FENCE_TX_QUEUE(i) ? HW_FENCE_TX_QUEUE :
+			HW_FENCE_RX_QUEUE;
 
 		/* Set the size of this header */
 		hfi_queue_header->queue_size = queue_size;
@@ -117,6 +130,20 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data,
 		/* Set the payload size */
 		hfi_queue_header->pkt_size = payload_size;
 
+		/* Set write index for clients' tx queues that index from nonzero value */
+		if (txq_idx_start && IS_HW_FENCE_TX_QUEUE(i) && !hfi_queue_header->write_index) {
+			if (skip_txq_wr_idx)
+				hfi_queue_header->tx_wm = txq_idx_start;
+			hfi_queue_header->read_index = txq_idx_start;
+			hfi_queue_header->write_index = txq_idx_start;
+			HWFNC_DBG_INIT("init:TX_QUEUE client:%d rd_idx=%s=%lu\n", client_id,
+				skip_txq_wr_idx ? "wr_idx=tx_wm" : "wr_idx",
+				txq_idx_start);
+		}
+
+		/* Update memory for hfi_queue_header */
+		wmb();
+
 		/* Store Memory info in the Client data */
 		queues[i].va_queue = qptr;
 		queues[i].pa_queue = qphys;
@@ -127,6 +154,18 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data,
 			client_id, i, queues[i].va_queue, queues[i].pa_queue, queues[i].va_header,
 			queues[i].q_size_bytes, payload_size);
 
+		/* Store additional tx queue rd_wr_idx properties */
+		if (IS_HW_FENCE_TX_QUEUE(i)) {
+			queues[i].rd_wr_idx_start = txq_idx_start;
+			queues[i].rd_wr_idx_factor = txq_idx_factor;
+			queues[i].skip_wr_idx = skip_txq_wr_idx;
+		} else {
+			queues[i].rd_wr_idx_factor = 1;
+		}
+		HWFNC_DBG_INIT("rd_wr_idx_start:%lu rd_wr_idx_factor:%lu skip_wr_idx:%s\n",
+			queues[i].rd_wr_idx_start, queues[i].rd_wr_idx_factor,
+			queues[i].skip_wr_idx ? "true" : "false");
+
 		/* Next header */
 		hfi_queue_header++;
 	}
@@ -183,6 +222,14 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client,
 	read_idx = readl_relaxed(&hfi_header->read_index);
 	write_idx = readl_relaxed(&hfi_header->write_index);
 
+	/* translate read and write indexes from custom indexing to dwords with no offset */
+	if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) {
+		read_idx = (read_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor;
+		write_idx = (write_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor;
+		HWFNC_DBG_Q("rd_idx_u32:%lu wr_idx_u32:%lu rd_wr_idx start:%lu factor:%lu\n",
+			read_idx, write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor);
+	}
+
 	HWFNC_DBG_Q("read client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n",
 		hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index,
 		read_idx, write_idx, queue);
@@ -209,6 +256,13 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client,
 	if (to_read_idx >= q_size_u32)
 		to_read_idx = 0;
 
+	/* translate to_read_idx to custom indexing with offset */
+	if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) {
+		to_read_idx = (to_read_idx / queue->rd_wr_idx_factor) + queue->rd_wr_idx_start;
+		HWFNC_DBG_Q("translated to_read_idx:%lu rd_wr_idx start:%lu factor:%lu\n",
+			to_read_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor);
+	}
+
 	/* Read the Client Queue */
 	payload->ctxt_id = readq_relaxed(&read_ptr_payload->ctxt_id);
 	payload->seqno = readq_relaxed(&read_ptr_payload->seqno);
@@ -251,10 +305,9 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
 	u32 *wr_ptr;
 	int ret = 0;
 
-	if (queue_type >=
-		drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].queues_num) {
-		HWFNC_ERR("Invalid queue type:%s client_id:%d\n", queue_type,
-			hw_fence_client->client_id);
+	if (queue_type >= hw_fence_client->queues_num) {
+		HWFNC_ERR("Invalid queue type:%d client_id:%d q_num:%lu\n", queue_type,
+			hw_fence_client->client_id, hw_fence_client->queues_num);
 		return -EINVAL;
 	}
 
@@ -270,8 +323,8 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
 		return -EINVAL;
 	}
 
-	/* if skipping update txq wr_index, then use hfi_header->tx_wm instead */
-	if (queue_type == (HW_FENCE_TX_QUEUE - 1) && hw_fence_client->skip_txq_wr_idx)
+	/* if skipping update wr_index, then use hfi_header->tx_wm instead */
+	if (queue->skip_wr_idx)
 		wr_ptr = &hfi_header->tx_wm;
 	else
 		wr_ptr = &hfi_header->write_index;
@@ -305,8 +358,15 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
 
 	HWFNC_DBG_Q("wr client:%d r_ptr:0x%pK w_ptr:0x%pK r_idx:%d w_idx:%d q:0x%pK type:%d s:%s\n",
 		hw_fence_client->client_id, &hfi_header->read_index, wr_ptr,
-		read_idx, write_idx, queue, queue_type,
-		hw_fence_client->skip_txq_wr_idx ? "true" : "false");
+		read_idx, write_idx, queue, queue_type, queue->skip_wr_idx ? "true" : "false");
+
+	/* translate read and write indexes from custom indexing to dwords with no offset */
+	if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) {
+		read_idx = (read_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor;
+		write_idx = (write_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor;
+		HWFNC_DBG_Q("rd_idx_u32:%lu wr_idx_u32:%lu rd_wr_idx start:%lu factor:%lu\n",
+			read_idx, write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor);
+	}
 
 	/* Check queue to make sure message will fit */
 	q_free_u32 = read_idx <= write_idx ? (q_size_u32 - (write_idx - read_idx)) :
@@ -341,6 +401,13 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
 	if (to_write_idx >= q_size_u32)
 		to_write_idx = 0;
 
+	/* translate to_write_idx to custom indexing with offset */
+	if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) {
+		to_write_idx = (to_write_idx / queue->rd_wr_idx_factor) + queue->rd_wr_idx_start;
+		HWFNC_DBG_Q("translated to_write_idx:%lu rd_wr_idx start:%lu factor:%lu\n",
+			to_write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor);
+	}
+
 	/* Update Client Queue */
 	writeq_relaxed(payload_size, &write_ptr_payload->size);
 	writew_relaxed(HW_FENCE_PAYLOAD_TYPE_1, &write_ptr_payload->type);
@@ -435,6 +502,27 @@ static int init_hw_fences_table(struct hw_fence_driver_data *drv_data)
 	return 0;
 }
 
+static int init_hw_fences_events(struct hw_fence_driver_data *drv_data)
+{
+	phys_addr_t phys;
+	void *ptr;
+	u32 size;
+	int ret;
+
+	ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_EVENTS_BUFF, &phys, &ptr,
+		&size, 0);
+	if (ret) {
+		HWFNC_DBG_INFO("Failed to reserve events buffer %d\n", ret);
+		return -ENOMEM;
+	}
+	drv_data->events = (struct msm_hw_fence_event *)ptr;
+	drv_data->total_events = size / sizeof(struct msm_hw_fence_event);
+	HWFNC_DBG_INIT("events:0x%pK total_events:%u event_sz:%u total_size:%u\n", drv_data->events,
+		drv_data->total_events, sizeof(struct msm_hw_fence_event), size);
+
+	return 0;
+}
+
 static int init_ctrl_queue(struct hw_fence_driver_data *drv_data)
 {
 	struct msm_hw_fence_mem_addr *mem_descriptor;
@@ -486,6 +574,11 @@ int hw_fence_init(struct hw_fence_driver_data *drv_data)
 	if (ret)
 		goto exit;
 
+	/* Initialize event log */
+	ret = init_hw_fences_events(drv_data);
+	if (ret)
+		HWFNC_DBG_INFO("Unable to init events\n");
+
 	/* Map ipcc registers */
 	ret = hw_fence_utils_map_ipcc(drv_data);
 	if (ret) {
@@ -500,15 +593,6 @@ int hw_fence_init(struct hw_fence_driver_data *drv_data)
 		goto exit;
 	}
 
-	/* Map ctl_start registers */
-	ret = hw_fence_utils_map_ctl_start(drv_data);
-	if (ret) {
-		/* This is not fatal error, since platfoms with dpu-ipc
-		 * won't use this option
-		 */
-		HWFNC_WARN("no ctl_start regs, won't trigger the frame\n");
-	}
-
 	/* Init debugfs */
 	ret = hw_fence_debug_debugfs_register(drv_data);
 	if (ret) {
@@ -539,10 +623,16 @@ int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data,
 {
 	int ret;
 
+	if (!drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].type) {
+		HWFNC_ERR("invalid client_id:%d not reserved client queue; check dt props\n",
+			hw_fence_client->client_id);
+		return -EINVAL;
+	}
+
 	/* Init client queues */
 	ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CLIENT_QUEUE,
 		&hw_fence_client->mem_descriptor, hw_fence_client->queues,
-		drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].queues_num,
+		drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].type->queues_num,
 		hw_fence_client->client_id);
 	if (ret) {
 		HWFNC_ERR("Failure to init the queue for client:%d\n",
@@ -592,7 +682,6 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data,
 	case HW_FENCE_CLIENT_ID_CTL3:
 	case HW_FENCE_CLIENT_ID_CTL4:
 	case HW_FENCE_CLIENT_ID_CTL5:
-#ifdef HW_DPU_IPCC
 		/* initialize ipcc signals for dpu clients */
 		HWFNC_DBG_H("init_controller_signal: DPU client_id_ext:%d initialized:%d\n",
 			hw_fence_client->client_id_ext, drv_data->ipcc_dpu_initialized);
@@ -602,7 +691,6 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data,
 			/* Init dpu client ipcc signal */
 			hw_fence_ipcc_enable_dpu_signaling(drv_data);
 		}
-#endif /* HW_DPU_IPCC */
 		break;
 	case HW_FENCE_CLIENT_ID_IPE ... HW_FENCE_CLIENT_ID_IPE +
 			MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1:
@@ -1451,8 +1539,12 @@ void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data,
 	/* For the client TxQ: set the read-index same as last write that was done by the client */
 	mb(); /* make sure data is ready before read */
 	wr_idx = readl_relaxed(&hfi_header->write_index);
+	if (queue->skip_wr_idx)
+		hfi_header->tx_wm = wr_idx;
 	writel_relaxed(wr_idx, &hfi_header->read_index);
 	wmb(); /* make sure data is updated after write the index*/
+	HWFNC_DBG_Q("update tx queue %s to match write_index:%lu\n",
+		queue->skip_wr_idx ? "read_index=tx_wm" : "read_index", wr_idx);
 
 	/* For the client RxQ: set the write-index same as last read done by the client */
 	if (hw_fence_client->update_rxq) {
@@ -1478,6 +1570,7 @@ void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data,
 
 		/* unlock */
 		GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0);
+		HWFNC_DBG_Q("update rx queue write_index to match read_index:%lu\n", rd_idx);
 	}
 }
 

+ 219 - 173
hw_fence/src/hw_fence_drv_utils.c

@@ -10,6 +10,9 @@
 #include <linux/gunyah/gh_dbl.h>
 #include <linux/qcom_scm.h>
 #include <linux/version.h>
+#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
+#include <linux/gh_cpusys_vm_mem_access.h>
+#endif
 #include <soc/qcom/secure_buffer.h>
 
 #include "hw_fence_drv_priv.h"
@@ -52,10 +55,16 @@
 
 /*
  * Each bit in this mask represents each of the loopback clients supported in
- * the enum hw_fence_loopback_id
+ * the enum hw_fence_client_id
  */
 #define HW_FENCE_LOOPBACK_CLIENTS_MASK 0x7fff
 
+/**
+ * HW_FENCE_MAX_EVENTS:
+ * Maximum number of HW Fence debug events
+ */
+#define HW_FENCE_MAX_EVENTS 1000
+
 /**
  * struct hw_fence_client_types - Table describing all supported client types, used to parse
  *                                device-tree properties related to client queue size.
@@ -77,23 +86,31 @@
  */
 struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = {
 	{"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU,
-		HW_FENCE_CLIENT_QUEUES, 0, 0, false},
+		HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false},
 	{"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU,
-		HW_FENCE_CLIENT_QUEUES, 0, 0, false},
+		HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false},
 	{"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL,
-		HW_FENCE_CLIENT_QUEUES, 0, 0, false},
-	{"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0,
-		HW_FENCE_CLIENT_QUEUES, 0, 0, false},
-	{"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0,
-		HW_FENCE_CLIENT_QUEUES, 0, 0, false},
-	{"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
-	{"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
-	{"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
-	{"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
-	{"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
-	{"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
-	{"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
-	{"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
+		HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false},
+	{"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0, HW_FENCE_CLIENT_QUEUES,
+		0, 0, 0, 0, 0, 0, false},
+	{"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, HW_FENCE_CLIENT_QUEUES,
+		0, 0, 0, 0, 0, 0, false},
+	{"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
+		true},
+	{"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
+		true},
+	{"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
+		true},
+	{"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
+		true},
+	{"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
+		true},
+	{"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
+		true},
+	{"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
+		true},
+	{"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0,
+		true},
 };
 
 static void _lock(uint64_t *wait)
@@ -162,89 +179,20 @@ void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock,
 	}
 }
 
-static inline int _process_dpu_client_loopback(struct hw_fence_driver_data *drv_data,
-	int client_id)
-{
-	int ctl_id = client_id; /* dpu ctl path id is mapped to client id used for the loopback */
-	void *ctl_start_reg;
-	u32 val;
-
-	if (ctl_id > HW_FENCE_LOOPBACK_DPU_CTL_5) {
-		HWFNC_ERR("invalid ctl_id:%d\n", ctl_id);
-		return -EINVAL;
-	}
-
-	ctl_start_reg = drv_data->ctl_start_ptr[ctl_id];
-	if (!ctl_start_reg) {
-		HWFNC_ERR("ctl_start reg not valid for ctl_id:%d\n", ctl_id);
-		return -EINVAL;
-	}
-
-	HWFNC_DBG_H("Processing DPU loopback ctl_id:%d\n", ctl_id);
-
-	val = 0x1; /* ctl_start trigger */
-#ifdef CTL_START_SIM
-	HWFNC_DBG_IRQ("ctl_id:%d Write: to RegOffset:0x%pK val:0x%x\n", ctl_start_reg, val, ctl_id);
-	writel_relaxed(val, ctl_start_reg);
-#else
-	HWFNC_DBG_IRQ("ctl_id:%d Write: to RegOffset:0x%pK val:0x%x (COMMENTED)\n", ctl_id,
-		ctl_start_reg, val);
-#endif
-
-	return 0;
-}
-
-static inline int _process_gfx_client_loopback(struct hw_fence_driver_data *drv_data,
-	int client_id)
-{
-	int queue_type = HW_FENCE_RX_QUEUE - 1; /* rx queue index */
-	struct msm_hw_fence_queue_payload payload;
-	int read = 1;
-
-	HWFNC_DBG_IRQ("Processing GFX loopback client_id:%d\n", client_id);
-	while (read) {
-		/*
-		 * 'client_id' is the loopback-client-id, not the hw-fence client_id,
-		 * so use GFX hw-fence client id, to get the client data
-		 */
-		read = hw_fence_read_queue(drv_data->clients[HW_FENCE_CLIENT_ID_CTX0], &payload,
-			queue_type);
-		if (read < 0) {
-			HWFNC_ERR("unable to read gfx rxq\n");
-			break;
-		}
-		HWFNC_DBG_L("GFX loopback rxq read: hash:%llu ctx:%llu seq:%llu f:%llu e:%lu\n",
-			payload.hash, payload.ctxt_id, payload.seqno, payload.flags, payload.error);
-	}
-
-	return read;
-}
-
 static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int client_id)
 {
 	int ret;
 
-	HWFNC_DBG_H("Processing loopback client_id:%d\n", client_id);
+	HWFNC_DBG_H("Processing doorbell client_id:%d\n", client_id);
 	switch (client_id) {
-	case HW_FENCE_LOOPBACK_DPU_CTL_0:
-	case HW_FENCE_LOOPBACK_DPU_CTL_1:
-	case HW_FENCE_LOOPBACK_DPU_CTL_2:
-	case HW_FENCE_LOOPBACK_DPU_CTL_3:
-	case HW_FENCE_LOOPBACK_DPU_CTL_4:
-	case HW_FENCE_LOOPBACK_DPU_CTL_5:
-		ret = _process_dpu_client_loopback(drv_data, client_id);
-		break;
-	case HW_FENCE_LOOPBACK_GFX_CTX_0:
-		ret = _process_gfx_client_loopback(drv_data, client_id);
-		break;
 #if IS_ENABLED(CONFIG_DEBUG_FS)
-	case HW_FENCE_LOOPBACK_VAL_0:
-	case HW_FENCE_LOOPBACK_VAL_1:
-	case HW_FENCE_LOOPBACK_VAL_2:
-	case HW_FENCE_LOOPBACK_VAL_3:
-	case HW_FENCE_LOOPBACK_VAL_4:
-	case HW_FENCE_LOOPBACK_VAL_5:
-	case HW_FENCE_LOOPBACK_VAL_6:
+	case HW_FENCE_CLIENT_ID_VAL0:
+	case HW_FENCE_CLIENT_ID_VAL1:
+	case HW_FENCE_CLIENT_ID_VAL2:
+	case HW_FENCE_CLIENT_ID_VAL3:
+	case HW_FENCE_CLIENT_ID_VAL4:
+	case HW_FENCE_CLIENT_ID_VAL5:
+	case HW_FENCE_CLIENT_ID_VAL6:
 		ret = process_validation_client_loopback(drv_data, client_id);
 		break;
 #endif /* CONFIG_DEBUG_FS */
@@ -258,10 +206,10 @@ static int _process_doorbell_client(struct hw_fence_driver_data *drv_data, int c
 
 void hw_fence_utils_process_doorbell_mask(struct hw_fence_driver_data *drv_data, u64 db_flags)
 {
-	int client_id = HW_FENCE_LOOPBACK_DPU_CTL_0;
+	int client_id = HW_FENCE_CLIENT_ID_CTL0;
 	u64 mask;
 
-	for (; client_id < HW_FENCE_LOOPBACK_MAX; client_id++) {
+	for (; client_id <= HW_FENCE_CLIENT_ID_VAL6; client_id++) {
 		mask = 1 << client_id;
 		if (mask & db_flags) {
 			HWFNC_DBG_H("client_id:%d signaled! flags:0x%llx\n", client_id, db_flags);
@@ -375,8 +323,13 @@ static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data,
 	sgl->sgl_entries[0].ipa_base = drv_data->res.start;
 	sgl->sgl_entries[0].size = resource_size(&drv_data->res);
 
+#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
+	ret = ghd_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, drv_data->label,
+			acl, sgl, NULL, &drv_data->memparcel);
+#else
 	ret = gh_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, drv_data->label,
 			acl, sgl, NULL, &drv_data->memparcel);
+#endif
 	if (ret) {
 		HWFNC_ERR("%s: gh_rm_mem_share failed addr=%x size=%u err=%d\n",
 			__func__, drv_data->res.start, drv_data->size, ret);
@@ -392,12 +345,23 @@ static int hw_fence_gunyah_share_mem(struct hw_fence_driver_data *drv_data,
 	return ret;
 }
 
+static int _is_mem_shared(struct resource *res)
+{
+#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
+	return gh_cpusys_vm_get_share_mem_info(res);
+#else
+	return -EINVAL;
+#endif
+}
+
 static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *data)
 {
 	struct gh_rm_notif_vm_status_payload *vm_status_payload;
 	struct hw_fence_driver_data *drv_data;
+	struct resource res;
 	gh_vmid_t peer_vmid;
 	gh_vmid_t self_vmid;
+	int ret;
 
 	drv_data = container_of(nb, struct hw_fence_driver_data, rm_nb);
 
@@ -411,22 +375,44 @@ static int hw_fence_rm_cb(struct notifier_block *nb, unsigned long cmd, void *da
 	    vm_status_payload->vm_status != GH_RM_VM_STATUS_RESET)
 		goto end;
 
+#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
+	if (ghd_rm_get_vmid(drv_data->peer_name, &peer_vmid))
+		goto end;
+
+	if (ghd_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
+		goto end;
+#else
 	if (gh_rm_get_vmid(drv_data->peer_name, &peer_vmid))
 		goto end;
 
 	if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
 		goto end;
+#endif
 
 	if (peer_vmid != vm_status_payload->vmid)
 		goto end;
 
 	switch (vm_status_payload->vm_status) {
 	case GH_RM_VM_STATUS_READY:
-		HWFNC_DBG_INIT("init mem\n");
-		if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid))
-			HWFNC_ERR("failed to share memory\n");
-		else
-			drv_data->vm_ready = true;
+		ret = _is_mem_shared(&res);
+		if (ret) {
+			HWFNC_DBG_INIT("mem not shared ret:%d, attempt share\n", ret);
+			if (hw_fence_gunyah_share_mem(drv_data, self_vmid, peer_vmid))
+				HWFNC_ERR("failed to share memory\n");
+			else
+				drv_data->vm_ready = true;
+		} else {
+			if (drv_data->res.start == res.start &&
+					resource_size(&drv_data->res) == resource_size(&res)) {
+				drv_data->vm_ready = true;
+				HWFNC_DBG_INIT("mem_ready: add:0x%x size:%d ret:%d\n", res.start,
+					resource_size(&res), ret);
+			} else {
+				HWFNC_ERR("mem-shared mismatch:[0x%x,%d] expected:[0x%x,%d]\n",
+					res.start, resource_size(&res), drv_data->res.start,
+					resource_size(&drv_data->res));
+			}
+		}
 		break;
 	case GH_RM_VM_STATUS_RESET:
 		HWFNC_DBG_INIT("reset\n");
@@ -520,6 +506,8 @@ char *_get_mem_reserve_type(enum hw_fence_mem_reserve type)
 		return "HW_FENCE_MEM_RESERVE_TABLE";
 	case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
 		return "HW_FENCE_MEM_RESERVE_CLIENT_QUEUE";
+	case HW_FENCE_MEM_RESERVE_EVENTS_BUFF:
+		return "HW_FENCE_MEM_RESERVE_EVENTS_BUFF";
 	}
 
 	return "Unknown";
@@ -531,6 +519,8 @@ int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data,
 {
 	int ret = 0;
 	u32 start_offset = 0;
+	u32 remaining_size_bytes;
+	u32 total_events;
 
 	switch (type) {
 	case HW_FENCE_MEM_RESERVE_CTRL_QUEUE:
@@ -549,23 +539,32 @@ int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data,
 		*size = drv_data->hw_fence_mem_fences_table_size;
 		break;
 	case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
-		if (client_id >= drv_data->clients_num) {
-			HWFNC_ERR("unexpected client_id:%d\n", client_id);
+		if (client_id >= drv_data->clients_num ||
+				!drv_data->hw_fence_client_queue_size[client_id].type) {
+			HWFNC_ERR("unexpected client_id:%d for clients_num:%lu\n", client_id,
+				drv_data->clients_num);
 			ret = -EINVAL;
 			goto exit;
 		}
 
 		start_offset = drv_data->hw_fence_client_queue_size[client_id].start_offset;
-		*size = drv_data->hw_fence_client_queue_size[client_id].mem_size;
-
-		/*
-		 * If this error occurs when client should be valid, check that support for this
-		 * client has been configured in device-tree properties.
-		 */
-		if (!*size) {
-			HWFNC_ERR("invalid client_id:%d not reserved client queue\n", client_id);
-			ret = -EINVAL;
+		*size = drv_data->hw_fence_client_queue_size[client_id].type->mem_size;
+		break;
+	case HW_FENCE_MEM_RESERVE_EVENTS_BUFF:
+		start_offset = drv_data->used_mem_size;
+		remaining_size_bytes = drv_data->size - start_offset;
+		if (start_offset >= drv_data->size ||
+				remaining_size_bytes < sizeof(struct msm_hw_fence_event)) {
+			HWFNC_DBG_INFO("no space for events total_sz:%lu offset:%lu evt_sz:%lu\n",
+				drv_data->size, start_offset, sizeof(struct msm_hw_fence_event));
+			ret = -ENOMEM;
+			goto exit;
 		}
+
+		total_events = remaining_size_bytes / sizeof(struct msm_hw_fence_event);
+		if (total_events > HW_FENCE_MAX_EVENTS)
+			total_events = HW_FENCE_MAX_EVENTS;
+		*size = total_events * sizeof(struct msm_hw_fence_event);
 		break;
 	default:
 		HWFNC_ERR("Invalid mem reserve type:%d\n", type);
@@ -592,6 +591,92 @@ exit:
 	return ret;
 }
 
+static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_data,
+	struct hw_fence_client_type_desc *desc)
+{
+	u32 max_idx_from_zero, payload_size_u32 = HW_FENCE_CLIENT_QUEUE_PAYLOAD / sizeof(u32);
+	char name[40];
+	u32 tmp[4];
+	bool idx_by_payload = false;
+	int count, ret;
+
+	snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s-extra", desc->name);
+
+	/* check if property is present */
+	ret = of_property_read_bool(drv_data->dev->of_node, name);
+	if (!ret)
+		return 0;
+
+	count = of_property_count_u32_elems(drv_data->dev->of_node, name);
+	if (count <= 0 || count > 4) {
+		HWFNC_ERR("invalid %s extra dt props count:%d\n", desc->name, count);
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, count);
+	if (ret) {
+		HWFNC_ERR("Failed to read %s extra dt properties ret=%d count=%d\n", desc->name,
+			ret, count);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	desc->start_padding = tmp[0];
+	if (count >= 2)
+		desc->end_padding = tmp[1];
+	if (count >= 3)
+		desc->txq_idx_start = tmp[2];
+	if (count >= 4) {
+		if (tmp[3] > 1) {
+			HWFNC_ERR("%s invalid txq_idx_by_payload prop:%lu\n", desc->name, tmp[3]);
+			ret = -EINVAL;
+			goto exit;
+		}
+		idx_by_payload = tmp[3];
+		desc->txq_idx_factor = idx_by_payload ? payload_size_u32 : 1;
+	}
+
+	if (desc->start_padding % sizeof(u32) || desc->end_padding % sizeof(u32) ||
+			(desc->start_padding + desc->end_padding) % sizeof(u64)) {
+		HWFNC_ERR("%s start_padding:%lu end_padding:%lu violates mem alignment\n",
+			desc->name, desc->start_padding, desc->end_padding);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (desc->start_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) {
+		HWFNC_ERR("%s client queues_num:%lu start_padding:%lu will overflow mem_size\n",
+			desc->name, desc->queues_num, desc->start_padding);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (desc->end_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) -
+			desc->start_padding) {
+		HWFNC_ERR("%s client q_num:%lu start_p:%lu end_p:%lu will overflow mem_size\n",
+			desc->name, desc->queues_num, desc->start_padding, desc->end_padding);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	max_idx_from_zero = idx_by_payload ? desc->queue_entries :
+		desc->queue_entries * payload_size_u32;
+	if (desc->txq_idx_start >= U32_MAX - max_idx_from_zero) {
+		HWFNC_ERR("%s txq_idx start:%lu by_payload:%s q_entries:%d will overflow txq_idx\n",
+			desc->name, desc->txq_idx_start, idx_by_payload ? "true" : "false",
+			desc->queue_entries);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	HWFNC_DBG_INIT("%s: start_p=%lu end_p=%lu txq_idx_start:%lu txq_idx_by_payload:%s\n",
+		desc->name, desc->start_padding, desc->end_padding, desc->txq_idx_start,
+		idx_by_payload ? "true" : "false");
+
+exit:
+	return ret;
+}
+
 static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_data,
 	struct hw_fence_client_type_desc *desc)
 {
@@ -600,7 +685,7 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da
 	u32 queue_size;
 	int ret;
 
-	/* parse client queue property from device-tree */
+	/* parse client queue properties from device-tree */
 	snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s", desc->name);
 	ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 4);
 	if (ret) {
@@ -626,6 +711,13 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da
 		return -EINVAL;
 	}
 
+	/* parse extra client queue properties from device-tree */
+	ret = _parse_client_queue_dt_props_extra(drv_data, desc);
+	if (ret) {
+		HWFNC_ERR("%s failed to parse extra dt props\n", desc->name);
+		return -EINVAL;
+	}
+
 	/* compute mem_size */
 	if (desc->queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) {
 		HWFNC_ERR("%s client queue entries:%lu will overflow client queue size\n",
@@ -635,17 +727,18 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da
 
 	queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries;
 	if (queue_size >= ((U32_MAX & PAGE_MASK) -
-		HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) / desc->queues_num) {
-		HWFNC_ERR("%s client queue size:%lu will overflow client queue mem size\n",
-			desc->name, queue_size);
+			(HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) +
+			desc->start_padding + desc->end_padding)) / desc->queues_num) {
+		HWFNC_ERR("%s client queue_sz:%lu start_p:%lu end_p:%lu will overflow mem size\n",
+			desc->name, queue_size, desc->start_padding, desc->end_padding);
 		return -EINVAL;
 	}
 
 	desc->mem_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) +
-		(queue_size * desc->queues_num));
+		(queue_size * desc->queues_num) + desc->start_padding + desc->end_padding);
 
 	if (desc->mem_size > MAX_CLIENT_QUEUE_MEM_SIZE) {
-		HWFNC_ERR("%s client queue mem_size:%lu greater than max client queue size:%lu\n",
+		HWFNC_ERR("%s client queue mem_size:%lu greater than max mem size:%lu\n",
 			desc->name, desc->mem_size, MAX_CLIENT_QUEUE_MEM_SIZE);
 		return -EINVAL;
 	}
@@ -690,7 +783,7 @@ static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data)
 	drv_data->clients_num = HW_FENCE_MAX_STATIC_CLIENTS_INDEX + configurable_clients_num;
 
 	/* allocate memory for client queue size descriptors */
-	size = drv_data->clients_num * sizeof(struct hw_fence_client_queue_size_desc);
+	size = drv_data->clients_num * sizeof(struct hw_fence_client_queue_desc);
 	drv_data->hw_fence_client_queue_size = kzalloc(size, GFP_KERNEL);
 	if (!drv_data->hw_fence_client_queue_size)
 		return -ENOMEM;
@@ -707,9 +800,7 @@ static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data)
 				hw_fence_utils_get_client_id_priv(drv_data, client_id_ext);
 
 			drv_data->hw_fence_client_queue_size[client_id] =
-				(struct hw_fence_client_queue_size_desc)
-				{desc->queues_num, desc->queue_entries, desc->mem_size,
-				start_offset, desc->skip_txq_wr_idx};
+				(struct hw_fence_client_queue_desc){desc, start_offset};
 			HWFNC_DBG_INIT("%s client_id_ext:%lu client_id:%lu start_offset:%lu\n",
 				desc->name, client_id_ext, client_id, start_offset);
 			start_offset += desc->mem_size;
@@ -853,54 +944,6 @@ int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data)
 	return ret;
 }
 
-static int _map_ctl_start(struct hw_fence_driver_data *drv_data, u32 ctl_id,
-	void **iomem_ptr, uint32_t *iomem_size)
-{
-	u32 reg_config[2];
-	void __iomem *ptr;
-	char name[30] = {0};
-	int ret;
-
-	snprintf(name, sizeof(name), "qcom,dpu-ctl-start-%d-reg", ctl_id);
-	ret = of_property_read_u32_array(drv_data->dev->of_node, name, reg_config, 2);
-	if (ret)
-		return 0; /* this is an optional property */
-
-	/* Mmap registers */
-	ptr = devm_ioremap(drv_data->dev, reg_config[0], reg_config[1]);
-	if (!ptr) {
-		HWFNC_ERR("failed to ioremap %s reg\n", name);
-		return -ENOMEM;
-	}
-
-	*iomem_ptr = ptr;
-	*iomem_size = reg_config[1];
-
-	HWFNC_DBG_INIT("mapped ctl_start ctl_id:%d name:%s address:0x%x size:0x%x io_mem:0x%pK\n",
-		ctl_id, name, reg_config[0], reg_config[1], ptr);
-
-	return 0;
-}
-
-int hw_fence_utils_map_ctl_start(struct hw_fence_driver_data *drv_data)
-{
-	u32 ctl_id = HW_FENCE_LOOPBACK_DPU_CTL_0;
-
-	for (; ctl_id <= HW_FENCE_LOOPBACK_DPU_CTL_5; ctl_id++) {
-		if (_map_ctl_start(drv_data, ctl_id, &drv_data->ctl_start_ptr[ctl_id],
-			&drv_data->ctl_start_size[ctl_id])) {
-			HWFNC_ERR("cannot map ctl_start ctl_id:%d\n", ctl_id);
-		} else {
-			if (drv_data->ctl_start_ptr[ctl_id])
-				HWFNC_DBG_INIT("mapped ctl_id:%d ctl_start_ptr:0x%pK size:%u\n",
-					ctl_id, drv_data->ctl_start_ptr[ctl_id],
-					drv_data->ctl_start_size[ctl_id]);
-		}
-	}
-
-	return 0;
-}
-
 enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data,
 	enum hw_fence_client_id client_id)
 {
@@ -929,10 +972,13 @@ enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver
 	return client_id_priv;
 }
 
-bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id)
+int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id)
 {
-	if (!drv_data || client_id >= drv_data->clients_num)
-		return false;
+	if (!drv_data || client_id >= drv_data->clients_num ||
+			!drv_data->hw_fence_client_queue_size[client_id].type) {
+		HWFNC_ERR("invalid access to client:%d queues_num\n", client_id);
+		return 0;
+	}
 
-	return drv_data->hw_fence_client_queue_size[client_id].skip_txq_wr_idx;
+	return drv_data->hw_fence_client_queue_size[client_id].type->queues_num;
 }

+ 6 - 71
hw_fence/src/hw_fence_ioctl.c

@@ -22,12 +22,8 @@
 #define HW_SYNC_IOC_UNREG_CLIENT	_IOWR(HW_SYNC_IOC_MAGIC, 11, unsigned long)
 #define HW_SYNC_IOC_CREATE_FENCE	_IOWR(HW_SYNC_IOC_MAGIC, 12,\
 						struct hw_fence_sync_create_data)
-#define HW_SYNC_IOC_DESTROY_FENCE	_IOWR(HW_SYNC_IOC_MAGIC, 13,\
-						struct hw_fence_sync_create_data)
 #define HW_SYNC_IOC_CREATE_FENCE_ARRAY	_IOWR(HW_SYNC_IOC_MAGIC, 14,\
 						struct hw_fence_array_sync_create_data)
-#define HW_SYNC_IOC_DESTROY_FENCE_ARRAY	_IOWR(HW_SYNC_IOC_MAGIC, 15,\
-						struct hw_fence_array_sync_create_data)
 #define HW_SYNC_IOC_REG_FOR_WAIT	_IOWR(HW_SYNC_IOC_MAGIC, 16, int)
 #define HW_SYNC_IOC_FENCE_SIGNAL	_IOWR(HW_SYNC_IOC_MAGIC, 17, unsigned long)
 #define HW_SYNC_IOC_FENCE_WAIT	_IOWR(HW_SYNC_IOC_MAGIC, 18, int)
@@ -217,8 +213,13 @@ static long hw_sync_ioctl_unreg_client(struct hw_sync_obj *obj, unsigned long ar
 {
 	int client_id = _get_client_id(obj, arg);
 
-	if (IS_ERR(&client_id))
+	if (IS_ERR(&client_id)) {
 		return client_id;
+	} else if (client_id != obj->client_id) {
+		HWFNC_ERR("deregistering hw-fence client %d with invalid client_id arg:%d\n",
+			obj->client_id, client_id);
+		return -EINVAL;
+	}
 
 	return msm_hw_fence_deregister(obj->client_handle);
 }
@@ -317,35 +318,6 @@ exit:
 	return ret;
 }
 
-static long hw_sync_ioctl_destroy_fence(struct hw_sync_obj *obj, unsigned long arg)
-{
-	int fd;
-	struct hw_dma_fence *fence;
-	struct hw_fence_sync_create_data data;
-
-	if (!_is_valid_client(obj))
-		return -EINVAL;
-
-	if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
-		return -EFAULT;
-
-	fd = data.fence;
-	fence = (struct hw_dma_fence *)_hw_sync_get_fence(fd);
-
-	if (!fence) {
-		HWFNC_ERR("fence for fd:%d not found\n", fd);
-		return -EINVAL;
-	}
-
-	/* Decrement the refcount that hw_sync_get_fence increments */
-	dma_fence_put(&fence->base);
-
-	/* To destroy fence */
-	dma_fence_put(&fence->base);
-
-	return 0;
-}
-
 static void _put_child_fences(int i, struct dma_fence **fences)
 {
 	int fence_idx;
@@ -448,41 +420,6 @@ exit:
 	return ret;
 }
 
-static long hw_sync_ioctl_destroy_fence_array(struct hw_sync_obj *obj, unsigned long arg)
-{
-	struct dma_fence_array *fence_array;
-	struct dma_fence *fence;
-	struct hw_fence_array_sync_create_data data;
-	int fd;
-
-	if (!_is_valid_client(obj))
-		return -EINVAL;
-
-	if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
-		return -EFAULT;
-
-	fd = data.fence_array_fd;
-	fence = (struct dma_fence *)_hw_sync_get_fence(fd);
-	if (!fence) {
-		HWFNC_ERR("Invalid fence fd: %d\n", fd);
-		return -EINVAL;
-	}
-
-	/* Decrement the refcount that hw_sync_get_fence increments */
-	dma_fence_put(fence);
-
-	fence_array = to_dma_fence_array(fence);
-	if (!fence_array) {
-		HWFNC_ERR("Invalid fence array fd: %d\n", fd);
-		return -EINVAL;
-	}
-
-	/* Destroy fence array */
-	dma_fence_put(&fence_array->base);
-
-	return 0;
-}
-
 /*
  * this IOCTL only supports receiving one fence as input-parameter, which can be
  * either a "dma_fence" or a "dma_fence_array", but eventually we would expand
@@ -658,9 +595,7 @@ static const struct hw_sync_ioctl_def hw_sync_debugfs_ioctls[] = {
 	HW_IOCTL_DEF(HW_SYNC_IOC_REG_CLIENT, hw_sync_ioctl_reg_client),
 	HW_IOCTL_DEF(HW_SYNC_IOC_UNREG_CLIENT, hw_sync_ioctl_unreg_client),
 	HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE, hw_sync_ioctl_create_fence),
-	HW_IOCTL_DEF(HW_SYNC_IOC_DESTROY_FENCE, hw_sync_ioctl_destroy_fence),
 	HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE_ARRAY, hw_sync_ioctl_create_fence_array),
-	HW_IOCTL_DEF(HW_SYNC_IOC_DESTROY_FENCE_ARRAY, hw_sync_ioctl_destroy_fence_array),
 	HW_IOCTL_DEF(HW_SYNC_IOC_REG_FOR_WAIT, hw_sync_ioctl_reg_for_wait),
 	HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_SIGNAL, hw_sync_ioctl_fence_signal),
 	HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_WAIT, hw_sync_ioctl_fence_wait),

+ 13 - 12
hw_fence/src/msm_hw_fence.c

@@ -87,25 +87,25 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext,
 	}
 
 	hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id);
-	if (hw_fence_client->update_rxq &&
-			hw_fence_drv_data->hw_fence_client_queue_size[client_id].queues_num <
-			HW_FENCE_CLIENT_QUEUES) {
-		HWFNC_ERR("Cannot update rx queue for tx queue-only client:%d\n", client_id);
+	hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id);
+
+	hw_fence_client->queues_num = hw_fence_utils_get_queues_num(hw_fence_drv_data, client_id);
+	if (!hw_fence_client->queues_num || (hw_fence_client->update_rxq &&
+			hw_fence_client->queues_num < HW_FENCE_CLIENT_QUEUES)) {
+		HWFNC_ERR("client:%d invalid q_num:%lu for updates_rxq:%s\n", client_id,
+			hw_fence_client->queues_num,
+			hw_fence_client->update_rxq ? "true" : "false");
 		ret = -EINVAL;
 		goto error;
 	}
 
-	hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id);
-	hw_fence_client->skip_txq_wr_idx = hw_fence_utils_skips_txq_wr_idx(hw_fence_drv_data,
-		client_id);
-
 	/* Alloc Client HFI Headers and Queues */
 	ret = hw_fence_alloc_client_resources(hw_fence_drv_data,
 		hw_fence_client, mem_descriptor);
 	if (ret)
 		goto error;
 
-	/* Initialize signal for communication withe FenceCTL */
+	/* Initialize signal for communication with FenceCTL */
 	ret = hw_fence_init_controller_signal(hw_fence_drv_data, hw_fence_client);
 	if (ret)
 		goto error;
@@ -118,9 +118,10 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext,
 	if (ret)
 		goto error;
 
-	HWFNC_DBG_INIT("-- Initialized ptr:0x%p client_id:%d ipc_signal_id:%d ipc vid:%d pid:%d\n",
-		hw_fence_client, hw_fence_client->client_id, hw_fence_client->ipc_signal_id,
-		hw_fence_client->ipc_client_vid, hw_fence_client->ipc_client_pid);
+	HWFNC_DBG_INIT("Initialized ptr:0x%p client_id:%d q_num:%d ipc signal:%d vid:%d pid:%d\n",
+		hw_fence_client, hw_fence_client->client_id, hw_fence_client->queues_num,
+		hw_fence_client->ipc_signal_id, hw_fence_client->ipc_client_vid,
+		hw_fence_client->ipc_client_pid);
 
 #if IS_ENABLED(CONFIG_DEBUG_FS)
 	init_waitqueue_head(&hw_fence_client->wait_queue);

+ 5 - 2
hw_fence/src/msm_hw_fence_synx_translation.c

@@ -169,7 +169,9 @@ int synx_hwfence_create(struct synx_session *session, struct synx_create_params
 		return -SYNX_INVALID;
 	}
 
-	if (IS_ERR_OR_NULL(params->h_synx) || (params->flags != SYNX_CREATE_DMA_FENCE) ||
+	if (IS_ERR_OR_NULL(params->h_synx) || (params->flags > SYNX_CREATE_MAX_FLAGS) ||
+			!(params->flags & SYNX_CREATE_DMA_FENCE) ||
+			(params->flags & SYNX_CREATE_CSL_FENCE) ||
 			IS_ERR_OR_NULL(params->fence)) {
 		HWFNC_ERR("synx_id:%d invalid create params h_synx:0x%pK flags:0x%x fence:0x%pK\n",
 			session->type, params->h_synx, params->flags, params->fence);
@@ -259,7 +261,8 @@ static int synx_hwfence_import_indv(void *client, struct synx_import_indv_params
 
 	if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) ||
 			IS_ERR_OR_NULL(params->new_h_synx) ||
-			(params->flags != SYNX_IMPORT_DMA_FENCE) || IS_ERR_OR_NULL(params->fence)) {
+			!(params->flags & SYNX_IMPORT_DMA_FENCE) ||
+			(params->flags & SYNX_IMPORT_SYNX_FENCE) || IS_ERR_OR_NULL(params->fence)) {
 		HWFNC_ERR("invalid client:0x%pK params:0x%pK h_synx:0x%pK flags:0x%x fence:0x%pK\n",
 			client, params, IS_ERR_OR_NULL(params) ? NULL : params->new_h_synx,
 			IS_ERR_OR_NULL(params) ? 0 : params->flags,

+ 103 - 0
mm_module_build.bzl

@@ -0,0 +1,103 @@
+load("//build/kernel/kleaf:kernel.bzl", "ddk_module","ddk_submodule")
+load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
+load("//msm-kernel:target_variants.bzl", "get_all_variants")
+
+def _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps):
+    processed_config_srcs = {}
+
+    for config_src_name in config_srcs:
+        config_src = config_srcs[config_src_name]
+
+        if type(config_src) == "list":
+            processed_config_srcs[config_src_name] = {True: config_src}
+        else:
+            processed_config_srcs[config_src_name] = config_src
+
+    module = struct(
+        name = name,
+        path = path,
+        srcs = srcs,
+        config_srcs = processed_config_srcs,
+        config_option = config_option,
+        deps = deps,
+    )
+
+    module_map[name] = module
+
+def _get_config_choices(map, options):
+    choices = []
+    for option in map:
+        choices.extend(map[option].get(option in options,[]))
+    return choices
+
+def _get_kernel_build_options(modules, config_options):
+    all_options = {option: True for option in config_options}
+    all_options = all_options | {module.config_option: True for module in modules if module.config_option}
+    return all_options
+
+def _get_kernel_build_module_srcs(module, options, formatter):
+    srcs = module.srcs + _get_config_choices(module.config_srcs, options)
+    print("-",module.name,",",module.config_option,",srcs =",srcs)
+    module_path = "{}/".format(module.path) if module.path else ""
+    return ["{}{}".format(module_path, formatter(src)) for src in srcs]
+
+def _get_kernel_build_module_deps(module, options, formatter):
+    return [formatter(dep) for dep in module.deps]
+
+def mm_driver_module_entry(hdrs = []):
+    module_map = {}
+
+    def register(name, path = None, config_option = None, srcs = [], config_srcs = {}, deps =[]):
+        _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps)
+    return struct(
+        register = register,
+        get = module_map.get,
+        hdrs = hdrs,
+        module_map = module_map
+    )
+
+def define_target_variant_modules(target, variant, registry, modules, config_options = []):
+    kernel_build = "{}_{}".format(target, variant)
+    kernel_build_label = "//msm-kernel:{}".format(kernel_build)
+    modules = [registry.get(module_name) for module_name in modules]
+    options = _get_kernel_build_options(modules, config_options)
+    build_print = lambda message : print("{}: {}".format(kernel_build, message))
+    formatter = lambda s : s.replace("%b", kernel_build).replace("%t", target)
+    headers = ["//msm-kernel:all_headers"] + registry.hdrs
+    all_module_rules = []
+
+    for module in modules:
+        rule_name = "{}_{}".format(kernel_build, module.name)
+        module_srcs = _get_kernel_build_module_srcs(module, options, formatter)
+
+        if not module_srcs:
+            continue
+
+        ddk_submodule(
+            name = rule_name,
+            srcs = module_srcs,
+            out = "{}.ko".format(module.name),
+            deps =  headers + _get_kernel_build_module_deps(module, options, formatter),
+            local_defines = options.keys(),
+        )
+        all_module_rules.append(rule_name)
+
+    ddk_module(
+        name = "{}_mm_drivers".format(kernel_build),
+        kernel_build = kernel_build_label,
+        deps = all_module_rules,
+    )
+    copy_to_dist_dir(
+        name = "{}_mm_drivers_dist".format(kernel_build),
+        data = [":{}_mm_drivers".format(kernel_build)],
+        dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target),
+        flat = True,
+        wipe_dist_dir = False,
+        allow_duplicate_filenames = False,
+        mode_overrides = {"**/*": "644"},
+        log = "info",
+    )
+
+def define_consolidate_gki_modules(target, registry, modules, config_options = []):
+    for (targets, variant) in get_all_variants():
+            define_target_variant_modules(targets, variant, registry, modules, config_options)

+ 44 - 0
mm_modules.bzl

@@ -0,0 +1,44 @@
+load(":mm_module_build.bzl", "mm_driver_module_entry")
+
+HW_FENCE_PATH = "hw_fence"
+MSM_EXT_DISPLAY_PATH = "msm_ext_display"
+SYNC_FENCE_PATH = "sync_fence"
+
+mm_driver_modules = mm_driver_module_entry([":mm_drivers_headers"])
+module_entry = mm_driver_modules.register
+
+#--------------- MM-DRIVERS MODULES ------------------
+
+module_entry(
+    name = "hw_fence",
+    path = HW_FENCE_PATH + "/src",
+    config_option = "CONFIG_QTI_HW_FENCE",
+    config_srcs = {
+        "CONFIG_DEBUG_FS" : [
+            "hw_fence_ioctl.c",
+        ]
+    },
+    srcs = ["hw_fence_drv_debug.c",
+            "hw_fence_drv_ipc.c",
+            "hw_fence_drv_priv.c",
+            "hw_fence_drv_utils.c",
+            "msm_hw_fence.c",
+            "msm_hw_fence_synx_translation.c"],
+    deps =[
+        "//vendor/qcom/opensource/synx-kernel:synx_headers"
+    ]
+)
+
+module_entry(
+    name = "msm_ext_display",
+    path = MSM_EXT_DISPLAY_PATH + "/src",
+    config_option = "CONFIG_MSM_EXT_DISPLAY",
+    srcs = ["msm_ext_display.c"],
+)
+
+module_entry(
+    name = "sync_fence",
+    path = SYNC_FENCE_PATH + "/src",
+    config_option = "CONFIG_QCOM_SPEC_SYNC",
+    srcs = ["qcom_sync_file.c"],
+)

+ 16 - 0
target.bzl

@@ -0,0 +1,16 @@
+load(":mm_modules.bzl", "mm_driver_modules")
+load(":mm_module_build.bzl", "define_consolidate_gki_modules")
+
+def define_pineapple():
+    define_consolidate_gki_modules(
+        target = "pineapple",
+        registry = mm_driver_modules,
+        modules = [
+            "hw_fence",
+            "msm_ext_display",
+            "sync_fence",
+        ],
+        config_options = [
+            "CONFIG_DEBUG_FS",
+        ],
+)