瀏覽代碼

Merge d87e10c6947cd9bd77aba5e782a1af11146c2214 on remote branch

Change-Id: I8cb23adf9fd4f94979a0ae749d3ab79547bb1ee3
Linux Build Service Account 2 年之前
父節點
當前提交
b25baff71f
共有 8 個文件被更改,包括 732 次插入500 次删除
  1. 341 222
      msm/synx/ipclite.c
  2. 145 192
      msm/synx/ipclite.h
  3. 1 8
      msm/synx/ipclite_client.h
  4. 178 27
      msm/synx/synx.c
  5. 37 43
      msm/synx/synx_global.c
  6. 4 0
      msm/synx/synx_global.h
  7. 5 1
      msm/synx/synx_private.h
  8. 21 7
      msm/synx/synx_util.c

+ 341 - 222
msm/synx/ipclite.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
 
@@ -39,11 +39,22 @@ static struct ipclite_debug_inmem_buf *ipclite_dbg_inmem;
 static struct mutex ssr_mutex;
 static struct kobject *sysfs_kobj;
 
-static uint32_t channel_status_info[IPCMEM_NUM_HOSTS];
+static uint32_t enabled_hosts;
+static uint32_t partitions;
 static u32 global_atomic_support = GLOBAL_ATOMICS_ENABLED;
 static uint32_t ipclite_debug_level = IPCLITE_ERR | IPCLITE_WARN | IPCLITE_INFO;
 static uint32_t ipclite_debug_control = IPCLITE_DMESG_LOG, ipclite_debug_dump;
 
+static inline bool is_host_enabled(uint32_t host)
+{
+	return (1U & (enabled_hosts >> host));
+}
+
+static inline bool is_loopback_except_apps(uint32_t h0, uint32_t h1)
+{
+	return (h0 == h1 && h0 != IPCMEM_APPS);
+}
+
 static void IPCLITE_OS_INMEM_LOG(const char *psztStr, ...)
 {
 	uint32_t local_index = 0;
@@ -83,52 +94,51 @@ static void ipclite_dump_debug_struct(void)
 	pr_info("------------------- Dumping IPCLite Debug Structure -------------------\n");
 
 	for (host = 0; host < IPCMEM_NUM_HOSTS; host++) {
-		if (ipclite->ipcmem.toc->recovery.configured_core[host]) {
-			temp_dbg_struct = (struct ipclite_debug_struct *)
-						(((char *)ipclite_dbg_struct) +
-						(sizeof(*temp_dbg_struct) * host));
-
-			pr_info("---------- Host ID: %d dbg_mem:%p ----------\n",
-					host, temp_dbg_struct);
-			pr_info("Total Signals Sent : %d Total Signals Received : %d\n",
-					temp_dbg_struct->dbg_info_overall.total_numsig_sent,
-					temp_dbg_struct->dbg_info_overall.total_numsig_recv);
-			pr_info("Last Signal Sent to Host ID : %d Last Signal Received from Host ID : %d\n",
-					temp_dbg_struct->dbg_info_overall.last_sent_host_id,
-					temp_dbg_struct->dbg_info_overall.last_recv_host_id);
-			pr_info("Last Signal ID Sent : %d Last Signal ID Received : %d\n",
-					temp_dbg_struct->dbg_info_overall.last_sigid_sent,
-					temp_dbg_struct->dbg_info_overall.last_sigid_recv);
-
-			for (i = 0; i < IPCMEM_NUM_HOSTS; i++) {
-				if (ipclite->ipcmem.toc->recovery.configured_core[i]) {
-					pr_info("----------> Host ID : %d Host ID : %d Channel State: %d\n",
-					host, i, ipclite->ipcmem.toc->toc_entry[host][i].status);
-					pr_info("No. of Messages Sent : %d No. of Messages Received : %d\n",
-					temp_dbg_struct->dbg_info_host[i].numsig_sent,
-					temp_dbg_struct->dbg_info_host[i].numsig_recv);
-					pr_info("No. of Interrupts Received : %d\n",
-					temp_dbg_struct->dbg_info_host[i].num_intr);
-					pr_info("TX Write Index : %d TX Read Index : %d\n",
-					temp_dbg_struct->dbg_info_host[i].tx_wr_index,
-					temp_dbg_struct->dbg_info_host[i].tx_rd_index);
-					pr_info("TX Write Index[0] : %d TX Read Index[0] : %d\n",
-					temp_dbg_struct->dbg_info_host[i].prev_tx_wr_index[0],
-					temp_dbg_struct->dbg_info_host[i].prev_tx_rd_index[0]);
-					pr_info("TX Write Index[1] : %d TX Read Index[1] : %d\n",
-					temp_dbg_struct->dbg_info_host[i].prev_tx_wr_index[1],
-					temp_dbg_struct->dbg_info_host[i].prev_tx_rd_index[1]);
-					pr_info("RX Write Index : %d RX Read Index : %d\n",
-					temp_dbg_struct->dbg_info_host[i].rx_wr_index,
-					temp_dbg_struct->dbg_info_host[i].rx_rd_index);
-					pr_info("RX Write Index[0] : %d RX Read Index[0] : %d\n",
-					temp_dbg_struct->dbg_info_host[i].prev_rx_wr_index[0],
-					temp_dbg_struct->dbg_info_host[i].prev_rx_rd_index[0]);
-					pr_info("RX Write Index[1] : %d RX Read Index[1] : %d\n",
-					temp_dbg_struct->dbg_info_host[i].prev_rx_wr_index[1],
-					temp_dbg_struct->dbg_info_host[i].prev_rx_rd_index[1]);
-				}
-			}
+		if (!is_host_enabled(host))
+			continue;
+		temp_dbg_struct = (struct ipclite_debug_struct *)
+					(((char *)ipclite_dbg_struct) +
+					(sizeof(*temp_dbg_struct) * host));
+
+		pr_info("---------- Host ID: %d dbg_mem:%p ----------\n",
+				host, temp_dbg_struct);
+		pr_info("Total Signals Sent : %d Total Signals Received : %d\n",
+				temp_dbg_struct->dbg_info_overall.total_numsig_sent,
+				temp_dbg_struct->dbg_info_overall.total_numsig_recv);
+		pr_info("Last Signal Sent to Host ID : %d Last Signal Received from Host ID : %d\n",
+				temp_dbg_struct->dbg_info_overall.last_sent_host_id,
+				temp_dbg_struct->dbg_info_overall.last_recv_host_id);
+		pr_info("Last Signal ID Sent : %d Last Signal ID Received : %d\n",
+				temp_dbg_struct->dbg_info_overall.last_sigid_sent,
+				temp_dbg_struct->dbg_info_overall.last_sigid_recv);
+
+		for (i = 0; i < IPCMEM_NUM_HOSTS; i++) {
+			if (!is_host_enabled(i))
+				continue;
+			pr_info("----------> Host ID : %d Host ID : %d\n", host, i);
+			pr_info("No. of Messages Sent : %d No. of Messages Received : %d\n",
+			temp_dbg_struct->dbg_info_host[i].numsig_sent,
+			temp_dbg_struct->dbg_info_host[i].numsig_recv);
+			pr_info("No. of Interrupts Received : %d\n",
+			temp_dbg_struct->dbg_info_host[i].num_intr);
+			pr_info("TX Write Index : %d TX Read Index : %d\n",
+			temp_dbg_struct->dbg_info_host[i].tx_wr_index,
+			temp_dbg_struct->dbg_info_host[i].tx_rd_index);
+			pr_info("TX Write Index[0] : %d TX Read Index[0] : %d\n",
+			temp_dbg_struct->dbg_info_host[i].prev_tx_wr_index[0],
+			temp_dbg_struct->dbg_info_host[i].prev_tx_rd_index[0]);
+			pr_info("TX Write Index[1] : %d TX Read Index[1] : %d\n",
+			temp_dbg_struct->dbg_info_host[i].prev_tx_wr_index[1],
+			temp_dbg_struct->dbg_info_host[i].prev_tx_rd_index[1]);
+			pr_info("RX Write Index : %d RX Read Index : %d\n",
+			temp_dbg_struct->dbg_info_host[i].rx_wr_index,
+			temp_dbg_struct->dbg_info_host[i].rx_rd_index);
+			pr_info("RX Write Index[0] : %d RX Read Index[0] : %d\n",
+			temp_dbg_struct->dbg_info_host[i].prev_rx_wr_index[0],
+			temp_dbg_struct->dbg_info_host[i].prev_rx_rd_index[0]);
+			pr_info("RX Write Index[1] : %d RX Read Index[1] : %d\n",
+			temp_dbg_struct->dbg_info_host[i].prev_rx_wr_index[1],
+			temp_dbg_struct->dbg_info_host[i].prev_rx_rd_index[1]);
 		}
 	}
 	return;
@@ -178,7 +188,7 @@ static void ipclite_hw_mutex_acquire(void)
 	int32_t ret;
 
 	if (ipclite != NULL) {
-		if (!ipclite->ipcmem.toc->ipclite_features.global_atomic_support) {
+		if (!global_atomic_support) {
 			ret = hwspin_lock_timeout_irqsave(ipclite->hwlock,
 					HWSPINLOCK_TIMEOUT,
 					&ipclite->ipclite_hw_mutex->flags);
@@ -187,7 +197,7 @@ static void ipclite_hw_mutex_acquire(void)
 				return;
 			}
 
-			ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_APPS;
+			ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_APPS;
 
 			IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock acquired\n");
 		}
@@ -197,9 +207,8 @@ static void ipclite_hw_mutex_acquire(void)
 static void ipclite_hw_mutex_release(void)
 {
 	if (ipclite != NULL) {
-		if (!ipclite->ipcmem.toc->ipclite_features.global_atomic_support) {
-			ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner =
-									IPCMEM_INVALID_HOST;
+		if (!global_atomic_support) {
+			ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST;
 			hwspin_unlock_irqrestore(ipclite->hwlock,
 				&ipclite->ipclite_hw_mutex->flags);
 			IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock release\n");
@@ -636,6 +645,15 @@ static int ipclite_tx(struct ipclite_channel *channel,
 	unsigned long flags;
 	int ret = 0;
 
+	if (channel->status != ACTIVE) {
+		if (channel->status == IN_PROGRESS && *channel->gstatus_ptr == ACTIVE) {
+			channel->status = ACTIVE;
+		} else {
+			IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Channel not active\n");
+			return -EOPNOTSUPP;
+		}
+	}
+
 	spin_lock_irqsave(&channel->tx_lock, flags);
 	if (ipclite_tx_avail(channel) < dlen) {
 		spin_unlock_irqrestore(&channel->tx_lock, flags);
@@ -656,102 +674,98 @@ static int ipclite_tx(struct ipclite_channel *channel,
 static int ipclite_send_debug_info(int32_t proc_id)
 {
 	int ret = 0;
+	struct ipclite_channel *channel;
 
 	if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
 		IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id);
 		return -EINVAL;
 	}
+	channel = &ipclite->channel[proc_id];
 
-	if (channel_status_info[proc_id] != CHANNEL_ACTIVE) {
-		if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) {
-			channel_status_info[proc_id] = CHANNEL_ACTIVE;
+	if (channel->status != ACTIVE) {
+		if (channel->status == IN_PROGRESS && *channel->gstatus_ptr == ACTIVE) {
+			channel->status = ACTIVE;
 		} else {
 			IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id);
-			return -IPCLITE_EINCHAN;
+			return -EOPNOTSUPP;
 		}
 	}
 
-	ret = mbox_send_message(ipclite->channel[proc_id].irq_info[IPCLITE_DEBUG_SIGNAL].mbox_chan,
-											NULL);
-	if (ret < IPCLITE_SUCCESS) {
+	ret = mbox_send_message(channel->irq_info[IPCLITE_DEBUG_SIGNAL].mbox_chan, NULL);
+	if (ret < 0) {
 		IPCLITE_OS_LOG(IPCLITE_ERR,
 				"Debug Signal sending failed to Core : %d Signal : %d ret : %d\n",
 							proc_id, IPCLITE_DEBUG_SIGNAL, ret);
-		return -IPCLITE_FAILURE;
+		return ret;
 	}
 
 	IPCLITE_OS_LOG(IPCLITE_DBG,
 				"Debug Signal send completed to core : %d signal : %d ret : %d\n",
 							proc_id, IPCLITE_DEBUG_SIGNAL, ret);
-	return IPCLITE_SUCCESS;
+	return 0;
 }
 
 int ipclite_ssr_update(int32_t proc_id)
 {
 	int ret = 0;
+	struct ipclite_channel *channel;
 
 	if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
 		IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id);
 		return -EINVAL;
 	}
+	channel = &ipclite->channel[proc_id];
 
-	if (channel_status_info[proc_id] != CHANNEL_ACTIVE) {
-		if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) {
-			channel_status_info[proc_id] = CHANNEL_ACTIVE;
+	if (channel->status != ACTIVE) {
+		if (channel->status == IN_PROGRESS && *channel->gstatus_ptr == ACTIVE) {
+			channel->status = ACTIVE;
 		} else {
 			IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id);
-			return -IPCLITE_EINCHAN;
+			return -EOPNOTSUPP;
 		}
 	}
 
-	ret = mbox_send_message(ipclite->channel[proc_id].irq_info[IPCLITE_SSR_SIGNAL].mbox_chan,
-											NULL);
-	if (ret < IPCLITE_SUCCESS) {
+	ret = mbox_send_message(channel->irq_info[IPCLITE_SSR_SIGNAL].mbox_chan, NULL);
+	if (ret < 0) {
 		IPCLITE_OS_LOG(IPCLITE_ERR,
 				"SSR Signal sending failed to Core : %d Signal : %d ret : %d\n",
 							proc_id, IPCLITE_SSR_SIGNAL, ret);
-		return -IPCLITE_FAILURE;
+		return ret;
 	}
 
 	IPCLITE_OS_LOG(IPCLITE_DBG,
 				"SSR Signal send completed to core : %d signal : %d ret : %d\n",
 							proc_id, IPCLITE_SSR_SIGNAL, ret);
-	return IPCLITE_SUCCESS;
+	return 0;
 }
 
 void ipclite_recover(enum ipcmem_host_type core_id)
 {
-	int ret, i, host, host0, host1;
+	int ret, host, host0, host1;
+	uint32_t p;
 
 	IPCLITE_OS_LOG(IPCLITE_DBG, "IPCLite Recover - Crashed Core : %d\n", core_id);
 
 	/* verify and reset the hw mutex lock */
-	if (core_id == ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner) {
-		ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
+	if (core_id == ipclite->ipcmem.toc_data.host_info->hwlock_owner) {
+		ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST;
 		hwspin_unlock_raw(ipclite->hwlock);
 		IPCLITE_OS_LOG(IPCLITE_DBG, "HW Lock Reset\n");
 	}
 
 	mutex_lock(&ssr_mutex);
 	/* Set the Global Channel Status to 0 to avoid Race condition */
-	for (i = 0; i < MAX_PARTITION_COUNT; i++) {
-		host0 = ipcmem_toc_partition_entries[i].host0;
-		host1 = ipcmem_toc_partition_entries[i].host1;
-
-		if (host0 == core_id || host1 == core_id) {
+	for (p = 0; p < partitions; p++) {
+		host0 = ipclite->ipcmem.toc_data.partition_entry[p].host0;
+		host1 = ipclite->ipcmem.toc_data.partition_entry[p].host1;
+		if (host0 != core_id && host1 != core_id)
+			continue;
 
-			ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *)
-				(&(ipclite->ipcmem.toc->toc_entry[host0][host1].status)), 0);
-			ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *)
-				(&(ipclite->ipcmem.toc->toc_entry[host1][host0].status)), 0);
+		ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *)
+			(&(ipclite->ipcmem.partition[p]->hdr.status)), 0);
 
-			channel_status_info[core_id] =
-					ipclite->ipcmem.toc->toc_entry[host0][host1].status;
-		}
-		IPCLITE_OS_LOG(IPCLITE_DBG, "Global Channel Status : [%d][%d] : %d\n", host0, host1,
-					ipclite->ipcmem.toc->toc_entry[host0][host1].status);
-		IPCLITE_OS_LOG(IPCLITE_DBG, "Global Channel Status : [%d][%d] : %d\n", host1, host0,
-					ipclite->ipcmem.toc->toc_entry[host1][host0].status);
+		IPCLITE_OS_LOG(IPCLITE_DBG, "Global Channel Status : [%d][%d] : %d\n",
+					host0, host1, ipclite->ipcmem.partition[p]->hdr.status);
 	}
 
 	/* Resets the TX/RX queue */
@@ -765,23 +779,19 @@ void ipclite_recover(enum ipcmem_host_type core_id)
 
 	/* Increment the Global Channel Status for APPS and crashed core*/
 	ipclite_global_atomic_inc((ipclite_atomic_int32_t *)
-			(&(ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][core_id].status)));
-	ipclite_global_atomic_inc((ipclite_atomic_int32_t *)
-			(&(ipclite->ipcmem.toc->toc_entry[core_id][IPCMEM_APPS].status)));
+					ipclite->channel[core_id].gstatus_ptr);
 
-	channel_status_info[core_id] =
-			ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][core_id].status;
+	ipclite->channel[core_id].status = *ipclite->channel[core_id].gstatus_ptr;
 
 	/* Update other cores about SSR */
 	for (host = 1; host < IPCMEM_NUM_HOSTS; host++) {
-		if (host != core_id && ipclite->ipcmem.toc->recovery.configured_core[host]) {
-			ret = ipclite_ssr_update(host);
-			if (ret < IPCLITE_SUCCESS)
-				IPCLITE_OS_LOG(IPCLITE_ERR,
-					"Failed to send SSR update to core : %d\n", host);
-			else
-				IPCLITE_OS_LOG(IPCLITE_DBG, "SSR update sent to core %d\n", host);
-		}
+		if (!is_host_enabled(host) || host == core_id)
+			continue;
+		ret = ipclite_ssr_update(host);
+		if (ret < 0)
+			IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send SSR update to core %d\n", host);
+		else
+			IPCLITE_OS_LOG(IPCLITE_DBG, "SSR update sent to core %d\n", host);
 	}
 	mutex_unlock(&ssr_mutex);
 
@@ -804,15 +814,6 @@ int ipclite_msg_send(int32_t proc_id, uint64_t data)
 		return -EINVAL;
 	}
 
-	if (channel_status_info[proc_id] != CHANNEL_ACTIVE) {
-		if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) {
-			channel_status_info[proc_id] = CHANNEL_ACTIVE;
-		} else {
-			IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id);
-			return -IPCLITE_EINCHAN;
-		}
-	}
-
 	ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data),
 								IPCLITE_MSG_SIGNAL);
 
@@ -845,15 +846,6 @@ int ipclite_test_msg_send(int32_t proc_id, uint64_t data)
 		return -EINVAL;
 	}
 
-	if (channel_status_info[proc_id] != CHANNEL_ACTIVE) {
-		if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) {
-			channel_status_info[proc_id] = CHANNEL_ACTIVE;
-		} else {
-			IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id);
-			return -IPCLITE_EINCHAN;
-		}
-	}
-
 	ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data),
 									IPCLITE_TEST_SIGNAL);
 
@@ -911,25 +903,67 @@ static int map_ipcmem(struct ipclite_info *ipclite, const char *name)
 	return ret;
 }
 
-static void ipcmem_init(struct ipclite_mem *ipcmem)
+/**
+ * insert_magic_number() - Inserts the magic number in toc header
+ *
+ * Function computes a simple checksum of the contents in toc header
+ * and stores the result in magic_number field in the toc header
+ */
+static void insert_magic_number(void)
 {
-	int host, host0, host1;
-	int i = 0;
+	uint32_t *block = ipclite->ipcmem.mem.virt_base;
+	size_t size = sizeof(struct ipcmem_toc_header) / sizeof(uint32_t);
 
-	ipcmem->toc = ipcmem->mem.virt_base;
-	IPCLITE_OS_LOG(IPCLITE_DBG, "toc_base = %p\n", ipcmem->toc);
+	for (int i = 1; i < size; i++)
+		block[0] ^= block[i];
+
+	block[0] = ~block[0];
+}
 
-	ipcmem->toc->hdr.size = IPCMEM_TOC_SIZE;
-	IPCLITE_OS_LOG(IPCLITE_DBG, "toc->hdr.size = %d\n", ipcmem->toc->hdr.size);
+static int32_t setup_toc(struct ipclite_mem *ipcmem)
+{
+	size_t offset = 0;
+	void *virt_base = ipcmem->mem.virt_base;
+	struct ipcmem_offsets *offsets = &ipcmem->toc->offsets;
+	struct ipcmem_toc_data *toc_data = &ipcmem->toc_data;
+
+	/* Setup Offsets */
+	offsets->host_info		= offset += IPCMEM_TOC_VAR_OFFSET;
+	offsets->global_entry		= offset += sizeof(struct ipcmem_host_info);
+	offsets->partition_info		= offset += sizeof(struct ipcmem_partition_entry);
+	offsets->partition_entry	= offset += sizeof(struct ipcmem_partition_info);
+	// offsets->debug		= virt_base + size - 64K;
+	/* Offset to be used for any new structure added in toc (after partition_entry) */
+	// offsets->new_struct	= offset += sizeof(struct ipcmem_partition_entry)*IPCMEM_NUM_HOSTS;
+
+	IPCLITE_OS_LOG(IPCLITE_DBG, "toc_data offsets:");
+	IPCLITE_OS_LOG(IPCLITE_DBG, "host_info = 0x%X", offsets->host_info);
+	IPCLITE_OS_LOG(IPCLITE_DBG, "global_entry = 0x%X", offsets->global_entry);
+	IPCLITE_OS_LOG(IPCLITE_DBG, "partition_info = 0x%X", offsets->partition_info);
+	IPCLITE_OS_LOG(IPCLITE_DBG, "partition_entry = 0x%X", offsets->partition_entry);
+
+	/* Point structures to the appropriate offset in TOC */
+	toc_data->host_info		= ADD_OFFSET(virt_base, offsets->host_info);
+	toc_data->global_entry		= ADD_OFFSET(virt_base, offsets->global_entry);
+	toc_data->partition_info	= ADD_OFFSET(virt_base, offsets->partition_info);
+	toc_data->partition_entry	= ADD_OFFSET(virt_base, offsets->partition_entry);
+
+	return 0;
+}
 
+static void setup_global_partition(struct ipclite_mem *ipcmem, uint32_t base_offset)
+{
 	/*Fill in global partition details*/
-	ipcmem->toc->toc_entry_global = ipcmem_toc_global_partition_entry;
-	ipcmem->global_partition = (struct ipcmem_global_partition *)
-								((char *)ipcmem->mem.virt_base +
-						ipcmem_toc_global_partition_entry.base_offset);
+	ipcmem->toc_data.global_entry->base_offset = base_offset;
+	ipcmem->toc_data.global_entry->size = GLOBAL_PARTITION_SIZE;
+	ipcmem->toc_data.global_entry->flags = GLOBAL_PARTITION_FLAGS;
+	ipcmem->toc_data.global_entry->host0 = IPCMEM_GLOBAL_HOST;
+	ipcmem->toc_data.global_entry->host1 = IPCMEM_GLOBAL_HOST;
+
+	ipcmem->global_partition = ADD_OFFSET(ipcmem->mem.virt_base, base_offset);
 
 	IPCLITE_OS_LOG(IPCLITE_DBG, "base_offset =%x,ipcmem->global_partition = %p\n",
-				ipcmem_toc_global_partition_entry.base_offset,
+				base_offset,
 				ipcmem->global_partition);
 
 	ipcmem->global_partition->hdr = global_partition_hdr;
@@ -938,55 +972,112 @@ static void ipcmem_init(struct ipclite_mem *ipcmem)
 				ipcmem->global_partition->hdr.partition_type,
 				ipcmem->global_partition->hdr.region_offset,
 				ipcmem->global_partition->hdr.region_size);
+}
 
-	/* Fill in each IPCMEM TOC entry from ipcmem_toc_partition_entries config*/
-	for (i = 0; i < MAX_PARTITION_COUNT; i++) {
-		host0 = ipcmem_toc_partition_entries[i].host0;
-		host1 = ipcmem_toc_partition_entries[i].host1;
-		IPCLITE_OS_LOG(IPCLITE_DBG, "host0 = %d, host1=%d\n", host0, host1);
+static void update_partition(struct ipclite_mem *ipcmem, uint32_t p)
+{
+	int host0 = ipcmem->toc_data.partition_entry[p].host0;
+	int host1 = ipcmem->toc_data.partition_entry[p].host1;
 
-		ipcmem->toc->toc_entry[host0][host1] = ipcmem_toc_partition_entries[i];
-		ipcmem->toc->toc_entry[host1][host0] = ipcmem_toc_partition_entries[i];
+	IPCLITE_OS_LOG(IPCLITE_DBG, "host0 = %d, host1=%d\n", host0, host1);
 
-		if (host0 == IPCMEM_APPS && host1 == IPCMEM_APPS) {
-			/* Updating the Global Channel Status for APPS Loopback */
-			ipcmem->toc->toc_entry[host0][host1].status = CHANNEL_ACTIVE;
-			ipcmem->toc->toc_entry[host1][host0].status = CHANNEL_ACTIVE;
+	ipcmem->partition[p] = ADD_OFFSET(ipcmem->mem.virt_base,
+					ipcmem->toc_data.partition_entry[p].base_offset);
 
-			/* Updating Local Channel Status */
-			channel_status_info[host1] = ipcmem->toc->toc_entry[host0][host1].status;
+	IPCLITE_OS_LOG(IPCLITE_DBG, "partition[%d] = %p,partition_base_offset[%d]=%lx",
+				p, ipcmem->partition[p],
+				p, ipcmem->toc_data.partition_entry[p].base_offset);
 
-		} else if (host0 == IPCMEM_APPS || host1 == IPCMEM_APPS) {
-			/* Updating the Global Channel Status */
-			ipcmem->toc->toc_entry[host0][host1].status = CHANNEL_ACTIVATE_IN_PROGRESS;
-			ipcmem->toc->toc_entry[host1][host0].status = CHANNEL_ACTIVATE_IN_PROGRESS;
+	if (host0 == host1)
+		ipcmem->partition[p]->hdr = loopback_partition_hdr;
+	else
+		ipcmem->partition[p]->hdr = default_partition_hdr;
 
-			/* Updating Local Channel Status */
-			if (host0 == IPCMEM_APPS)
-				host = host1;
-			else if (host1 == IPCMEM_APPS)
-				host = host0;
+	IPCLITE_OS_LOG(IPCLITE_DBG, "hdr.type = %x,hdr.offset = %x,hdr.size = %d",
+				ipcmem->partition[p]->hdr.type,
+				ipcmem->partition[p]->hdr.desc_offset,
+				ipcmem->partition[p]->hdr.desc_size);
+}
 
-			channel_status_info[host] = ipcmem->toc->toc_entry[host0][host1].status;
+static int32_t setup_partitions(struct ipclite_mem *ipcmem, uint32_t base_offset)
+{
+	uint32_t p, host0, host1;
+	uint32_t num_entry = 0;
+
+	/*Fill in each valid ipcmem partition table entry*/
+	for (host0 = 0; host0 < IPCMEM_NUM_HOSTS; host0++) {
+		if (!is_host_enabled(host0))
+			continue;
+		for (host1 = host0; host1 < IPCMEM_NUM_HOSTS; host1++) {
+			if (!is_host_enabled(host1) || is_loopback_except_apps(host0, host1))
+				continue;
+			ipcmem->toc_data.partition_entry[num_entry].base_offset = base_offset;
+			ipcmem->toc_data.partition_entry[num_entry].size = DEFAULT_PARTITION_SIZE;
+			ipcmem->toc_data.partition_entry[num_entry].flags = DEFAULT_PARTITION_FLAGS;
+			ipcmem->toc_data.partition_entry[num_entry].host0 = host0;
+			ipcmem->toc_data.partition_entry[num_entry].host1 = host1;
+
+			base_offset += DEFAULT_PARTITION_SIZE;
+			num_entry++;
 		}
+	}
+	IPCLITE_OS_LOG(IPCLITE_DBG, "total partitions = %u", num_entry);
 
-		ipcmem->partition[i] = (struct ipcmem_partition *)
-								((char *)ipcmem->mem.virt_base +
-						ipcmem_toc_partition_entries[i].base_offset);
+	ipcmem->partition = kcalloc(num_entry, sizeof(*ipcmem->partition), GFP_KERNEL);
+	if (!ipcmem->partition) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Partition Allocation failed");
+		return -ENOMEM;
+	}
 
-		IPCLITE_OS_LOG(IPCLITE_DBG, "partition[%d] = %p,partition_base_offset[%d]=%lx\n",
-					i, ipcmem->partition[i],
-					i, ipcmem_toc_partition_entries[i].base_offset);
+	/*Update appropriate partition based on partition entries*/
+	for (p = 0; p < num_entry; p++)
+		update_partition(ipcmem, p);
 
-		if (host0 == host1)
-			ipcmem->partition[i]->hdr = loopback_partition_hdr;
-		else
-			ipcmem->partition[i]->hdr = default_partition_hdr;
+	/*Set up info to parse partition entries*/
+	ipcmem->toc_data.partition_info->num_entries = partitions = num_entry;
+	ipcmem->toc_data.partition_info->entry_size = sizeof(struct ipcmem_partition_entry);
+	return 0;
+}
 
-		IPCLITE_OS_LOG(IPCLITE_DBG, "hdr.type = %x,hdr.offset = %x,hdr.size = %d\n",
-					ipcmem->partition[i]->hdr.type,
-					ipcmem->partition[i]->hdr.desc_offset,
-					ipcmem->partition[i]->hdr.desc_size);
+static int32_t ipcmem_init(struct ipclite_mem *ipcmem, struct device_node *pn)
+{
+	int ret;
+	uint32_t remote_pid;
+	uint32_t host_count = 0;
+	uint32_t gmem_offset = 0;
+	struct device_node *cn;
+
+	for_each_available_child_of_node(pn, cn) {
+		of_property_read_u32(cn, "qcom,remote-pid", &remote_pid);
+		if (remote_pid < IPCMEM_NUM_HOSTS) {
+			enabled_hosts |= BIT_MASK(remote_pid);
+			host_count++;
+		}
+	}
+	IPCLITE_OS_LOG(IPCLITE_DBG, "enabled_hosts = 0x%X", enabled_hosts);
+	IPCLITE_OS_LOG(IPCLITE_DBG, "host_count = %u", host_count);
+
+	ipcmem->toc = ipcmem->mem.virt_base;
+	IPCLITE_OS_LOG(IPCLITE_DBG, "toc_base = %p\n", ipcmem->toc);
+
+	ret = setup_toc(ipcmem);
+	if (ret) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to set up toc");
+		return ret;
+	}
+
+	/*Set up host related info*/
+	ipcmem->toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST;
+	ipcmem->toc_data.host_info->configured_host = enabled_hosts;
+
+	gmem_offset += IPCMEM_TOC_SIZE;
+	setup_global_partition(ipcmem, gmem_offset);
+
+	gmem_offset += GLOBAL_PARTITION_SIZE;
+	ret = setup_partitions(ipcmem, gmem_offset);
+	if (ret) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to set up partitions");
+		return ret;
 	}
 
 	/*Making sure all writes for ipcmem configurations are completed*/
@@ -994,6 +1085,7 @@ static void ipcmem_init(struct ipclite_mem *ipcmem)
 
 	ipcmem->toc->hdr.init_done = IPCMEM_INIT_COMPLETED;
 	IPCLITE_OS_LOG(IPCLITE_DBG, "Ipcmem init completed\n");
+	return 0;
 }
 
 static int ipclite_channel_irq_init(struct device *parent, struct device_node *node,
@@ -1089,8 +1181,22 @@ EXPORT_SYMBOL(get_global_partition_info);
 static struct ipcmem_partition_header *get_ipcmem_partition_hdr(struct ipclite_mem ipcmem, int local_pid,
 								int remote_pid)
 {
-	return (struct ipcmem_partition_header *)((char *)ipcmem.mem.virt_base +
-				ipcmem.toc->toc_entry[local_pid][remote_pid].base_offset);
+	uint32_t p;
+	uint32_t found = -1;
+
+	for (p = 0; p < partitions; p++) {
+		if (ipcmem.toc_data.partition_entry[p].host0 == local_pid
+			&& ipcmem.toc_data.partition_entry[p].host1 == remote_pid) {
+			found = p;
+			break;
+		}
+	}
+
+	if (found < partitions)
+		return (struct ipcmem_partition_header *)((char *)ipcmem.mem.virt_base +
+					ipcmem.toc_data.partition_entry[found].base_offset);
+	else
+		return NULL;
 }
 
 static void ipclite_channel_release(struct device *dev)
@@ -1166,9 +1272,13 @@ static int ipclite_channel_init(struct device *parent,
 	}
 	IPCLITE_OS_LOG(IPCLITE_DBG, "rx_fifo = %p, tx_fifo=%p\n", rx_fifo, tx_fifo);
 
-	partition_hdr = get_ipcmem_partition_hdr(ipclite->ipcmem,
-						local_pid, remote_pid);
+	partition_hdr = get_ipcmem_partition_hdr(ipclite->ipcmem, local_pid, remote_pid);
 	IPCLITE_OS_LOG(IPCLITE_DBG, "partition_hdr = %p\n", partition_hdr);
+	if (!partition_hdr) {
+		ret = -ENOMEM;
+		goto err_put_dev;
+	}
+
 	descs = (u32 *)((char *)partition_hdr + partition_hdr->desc_offset);
 	IPCLITE_OS_LOG(IPCLITE_DBG, "descs = %p\n", descs);
 
@@ -1216,6 +1326,7 @@ static int ipclite_channel_init(struct device *parent,
 	ipclite->channel[remote_pid].remote_pid = remote_pid;
 	ipclite->channel[remote_pid].tx_fifo = tx_fifo;
 	ipclite->channel[remote_pid].rx_fifo = rx_fifo;
+	ipclite->channel[remote_pid].gstatus_ptr = &partition_hdr->status;
 
 	spin_lock_init(&ipclite->channel[remote_pid].tx_lock);
 
@@ -1228,12 +1339,19 @@ static int ipclite_channel_init(struct device *parent,
 		}
 	}
 
-	ipclite->ipcmem.toc->recovery.configured_core[remote_pid] = CONFIGURED_CORE;
+	/* Updating Local & Global Channel Status */
+	if (remote_pid == IPCMEM_APPS) {
+		*ipclite->channel[remote_pid].gstatus_ptr = ACTIVE;
+		ipclite->channel[remote_pid].status = ACTIVE;
+	} else {
+		*ipclite->channel[remote_pid].gstatus_ptr = IN_PROGRESS;
+		ipclite->channel[remote_pid].status = IN_PROGRESS;
+	}
 	IPCLITE_OS_LOG(IPCLITE_DBG, "Channel init completed, ret = %d\n", ret);
 	return ret;
 
 err_put_dev:
-	ipclite->channel[remote_pid].channel_status = 0;
+	ipclite->channel[remote_pid].status = INACTIVE;
 	device_unregister(dev);
 	kfree(dev);
 	return ret;
@@ -1255,9 +1373,9 @@ static ssize_t ipclite_dbg_lvl_write(struct kobject *kobj,
 
 	/* Parse the string from Sysfs Interface */
 	ret = kstrtoint(buf, 0, &ipclite_debug_level);
-	if (ret < IPCLITE_SUCCESS) {
+	if (ret < 0) {
 		IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value");
-		return -IPCLITE_FAILURE;
+		return ret;
 	}
 
 	/* Check if debug structure is initialized */
@@ -1274,14 +1392,13 @@ static ssize_t ipclite_dbg_lvl_write(struct kobject *kobj,
 
 	/* Signal other cores for updating the debug information */
 	for (host = 1; host < IPCMEM_NUM_HOSTS; host++) {
-		if (ipclite->ipcmem.toc->recovery.configured_core[host]) {
-			ret = ipclite_send_debug_info(host);
-			if (ret < IPCLITE_SUCCESS)
-				IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n",
-											host);
-			else
-				IPCLITE_OS_LOG(IPCLITE_DBG, "Debug info sent to host %d\n", host);
-		}
+		if (!is_host_enabled(host))
+			continue;
+		ret = ipclite_send_debug_info(host);
+		if (ret < 0)
+			IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n", host);
+		else
+			IPCLITE_OS_LOG(IPCLITE_DBG, "Debug info sent to host %d\n", host);
 	}
 
 	return count;
@@ -1294,9 +1411,9 @@ static ssize_t ipclite_dbg_ctrl_write(struct kobject *kobj,
 
 	/* Parse the string from Sysfs Interface */
 	ret = kstrtoint(buf, 0, &ipclite_debug_control);
-	if (ret < IPCLITE_SUCCESS) {
+	if (ret < 0) {
 		IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value");
-		return -IPCLITE_FAILURE;
+		return ret;
 	}
 
 	/* Check if debug structures are initialized */
@@ -1313,14 +1430,13 @@ static ssize_t ipclite_dbg_ctrl_write(struct kobject *kobj,
 
 	/* Signal other cores for updating the debug information */
 	for (host = 1; host < IPCMEM_NUM_HOSTS; host++) {
-		if (ipclite->ipcmem.toc->recovery.configured_core[host]) {
-			ret = ipclite_send_debug_info(host);
-			if (ret < IPCLITE_SUCCESS)
-				IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n",
-											host);
-			else
-				IPCLITE_OS_LOG(IPCLITE_DBG, "Debug info sent to host %d\n", host);
-		}
+		if (!is_host_enabled(host))
+			continue;
+		ret = ipclite_send_debug_info(host);
+		if (ret < 0)
+			IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n", host);
+		else
+			IPCLITE_OS_LOG(IPCLITE_DBG, "Debug info sent to host %d\n", host);
 	}
 
 	return count;
@@ -1333,9 +1449,9 @@ static ssize_t ipclite_dbg_dump_write(struct kobject *kobj,
 
 	/* Parse the string from Sysfs Interface */
 	ret = kstrtoint(buf, 0, &ipclite_debug_dump);
-	if (ret < IPCLITE_SUCCESS) {
+	if (ret < 0) {
 		IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value");
-		return -IPCLITE_FAILURE;
+		return ret;
 	}
 
 	/* Check if debug structures are initialized */
@@ -1363,37 +1479,42 @@ struct kobj_attribute sysfs_dbg_dump = __ATTR(ipclite_debug_dump, 0660,
 
 static int ipclite_debug_sysfs_setup(void)
 {
+	int ret = 0;
+
 	/* Creating a directory in /sys/kernel/ */
 	sysfs_kobj = kobject_create_and_add("ipclite", kernel_kobj);
 	if (!sysfs_kobj) {
 		IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create and add sysfs directory\n");
-		return -IPCLITE_FAILURE;
+		return -ENOMEM;
 	}
 
 	/* Creating sysfs files/interfaces for debug */
-	if (sysfs_create_file(sysfs_kobj, &sysfs_dbg_lvl.attr)) {
+	ret = sysfs_create_file(sysfs_kobj, &sysfs_dbg_lvl.attr);
+	if (ret) {
 		IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug level file\n");
-		return -IPCLITE_FAILURE;
+		return ret;
 	}
 
-	if (sysfs_create_file(sysfs_kobj, &sysfs_dbg_ctrl.attr)) {
+	ret = sysfs_create_file(sysfs_kobj, &sysfs_dbg_ctrl.attr);
+	if (ret) {
 		IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug control file\n");
-		return -IPCLITE_FAILURE;
+		return ret;
 	}
 
-	if (sysfs_create_file(sysfs_kobj, &sysfs_dbg_dump.attr)) {
+	ret = sysfs_create_file(sysfs_kobj, &sysfs_dbg_dump.attr);
+	if (ret) {
 		IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug dump file\n");
-		return -IPCLITE_FAILURE;
+		return ret;
 	}
 
-	return IPCLITE_SUCCESS;
+	return ret;
 }
 
 static int ipclite_debug_info_setup(void)
 {
 	/* Setting up the Debug Structures */
 	ipclite_dbg_info = (struct ipclite_debug_info *)(((char *)ipclite->ipcmem.mem.virt_base +
-						ipclite->ipcmem.mem.size) - IPCLITE_DEBUG_SIZE);
+						ipclite->ipcmem.mem.size) - DEBUG_PARTITION_SIZE);
 	if (!ipclite_dbg_info)
 		return -EADDRNOTAVAIL;
 
@@ -1411,11 +1532,11 @@ static int ipclite_debug_info_setup(void)
 		return -EADDRNOTAVAIL;
 
 	IPCLITE_OS_LOG(IPCLITE_DBG, "virtual_base_ptr = %p total_size : %d debug_size : %d\n",
-		ipclite->ipcmem.mem.virt_base, ipclite->ipcmem.mem.size, IPCLITE_DEBUG_SIZE);
+		ipclite->ipcmem.mem.virt_base, ipclite->ipcmem.mem.size, DEBUG_PARTITION_SIZE);
 	IPCLITE_OS_LOG(IPCLITE_DBG, "dbg_info : %p dbg_struct : %p dbg_inmem : %p\n",
 					ipclite_dbg_info, ipclite_dbg_struct, ipclite_dbg_inmem);
 
-	return IPCLITE_SUCCESS;
+	return 0;
 }
 
 static int ipclite_probe(struct platform_device *pdev)
@@ -1464,18 +1585,22 @@ static int ipclite_probe(struct platform_device *pdev)
 	mem = &(ipclite->ipcmem.mem);
 	memset(mem->virt_base, 0, mem->size);
 
-	ipcmem_init(&ipclite->ipcmem);
+	ret = ipcmem_init(&ipclite->ipcmem, pn);
+	if (ret) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to set up IPCMEM");
+		goto release;
+	}
 
 	/* Set up sysfs for debug  */
 	ret = ipclite_debug_sysfs_setup();
-	if (ret != IPCLITE_SUCCESS) {
+	if (ret) {
 		IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to Set up IPCLite Debug Sysfs\n");
 		goto release;
 	}
 
 	/* Mapping Debug Memory */
 	ret = ipclite_debug_info_setup();
-	if (ret != IPCLITE_SUCCESS) {
+	if (ret) {
 		IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to Set up IPCLite Debug Structures\n");
 		goto release;
 	}
@@ -1495,15 +1620,12 @@ static int ipclite_probe(struct platform_device *pdev)
 	mbox_client_txdone(broadcast.irq_info[IPCLITE_MEM_INIT_SIGNAL].mbox_chan, 0);
 
 	if (global_atomic_support) {
-		ipclite->ipcmem.toc->ipclite_features.global_atomic_support =
-							GLOBAL_ATOMICS_ENABLED;
-	} else {
-		ipclite->ipcmem.toc->ipclite_features.global_atomic_support =
-							GLOBAL_ATOMICS_DISABLED;
+		ipclite->ipcmem.toc->hdr.feature_mask |= GLOBAL_ATOMIC_SUPPORT_BMSK;
 	}
+	IPCLITE_OS_LOG(IPCLITE_DBG, "global_atomic_support : %d\n", global_atomic_support);
 
-	IPCLITE_OS_LOG(IPCLITE_DBG, "global_atomic_support : %d\n",
-		ipclite->ipcmem.toc->ipclite_features.global_atomic_support);
+	/* Should be called after all Global TOC related init is done */
+	insert_magic_number();
 
 	/* hw mutex callbacks */
 	ipclite_hw_mutex->acquire = ipclite_hw_mutex_acquire;
@@ -1512,9 +1634,6 @@ static int ipclite_probe(struct platform_device *pdev)
 	/* store to ipclite structure */
 	ipclite->ipclite_hw_mutex = ipclite_hw_mutex;
 
-	/* initialize hwlock owner to invalid host */
-	ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
-
 	/* Update the Global Debug variable for FW cores */
 	ipclite_dbg_info->debug_level = ipclite_debug_level;
 	ipclite_dbg_info->debug_control = ipclite_debug_control;

+ 145 - 192
msm/synx/ipclite.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved..
+ * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved..
  */
 #include <linux/hwspinlock.h>
 #include <linux/module.h>
@@ -14,9 +14,10 @@
 #define ACTIVE_CHANNEL			0x1
 
 #define IPCMEM_TOC_SIZE			(4*1024)
+#define IPCMEM_TOC_VAR_OFFSET	0x100
 #define MAX_CHANNEL_SIGNALS		6
 
-#define MAX_PARTITION_COUNT		11	/*11 partitions other than global partition*/
+#define GLOBAL_ATOMIC_SUPPORT_BMSK 0x1UL
 
 #define IPCLITE_MSG_SIGNAL		0
 #define IPCLITE_MEM_INIT_SIGNAL 1
@@ -26,13 +27,13 @@
 #define IPCLITE_DEBUG_SIGNAL	5
 
 /** Flag definitions for the entries */
-#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_READ_PROTECTION   (0x01)
-#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_WRITE_PROTECTION  (0x02)
-#define IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION \
-		(IPCMEM_TOC_ENTRY_FLAGS_ENABLE_READ_PROTECTION | \
-		IPCMEM_TOC_ENTRY_FLAGS_ENABLE_WRITE_PROTECTION)
+#define IPCMEM_FLAGS_ENABLE_READ_PROTECTION   (0x01)
+#define IPCMEM_FLAGS_ENABLE_WRITE_PROTECTION  (0x02)
+#define IPCMEM_FLAGS_ENABLE_RW_PROTECTION \
+		(IPCMEM_FLAGS_ENABLE_READ_PROTECTION | \
+		IPCMEM_FLAGS_ENABLE_WRITE_PROTECTION)
 
-#define IPCMEM_TOC_ENTRY_FLAGS_IGNORE_PARTITION         (0x00000004)
+#define IPCMEM_FLAGS_IGNORE_PARTITION         (0x00000004)
 
 /*Hardcoded macro to identify local host on each core*/
 #define LOCAL_HOST		IPCMEM_APPS
@@ -40,13 +41,6 @@
 /* Timeout (ms) for the trylock of remote spinlocks */
 #define HWSPINLOCK_TIMEOUT	1000
 
-#define CHANNEL_INACTIVE		0
-#define CHANNEL_ACTIVATE_IN_PROGRESS    1
-#define CHANNEL_ACTIVE			2
-
-#define CONFIGURED_CORE		1
-
-#define IPCLITE_DEBUG_SIZE			(64 * 1024)
 #define IPCLITE_DEBUG_INFO_SIZE		256
 #define IPCLITE_CORE_DBG_LABEL		"APSS:"
 #define IPCLITE_LOG_MSG_SIZE		100
@@ -55,6 +49,8 @@
 #define IPCLITE_SIGNAL_LABEL_SIZE	10
 #define PREV_INDEX					2
 
+#define ADD_OFFSET(x, y)	((void *)((size_t)x + y))
+
 #define IPCLITE_OS_LOG(__level, __fmt, arg...) \
 	do { \
 		if (ipclite_debug_level & __level) { \
@@ -69,7 +65,18 @@
 		} \
 	} while (0)
 
-/*IPCMEM Structure Definitions*/
+/**
+ * enum ipclite_channel_status - channel status
+ *
+ * INACTIVE             : Channel uninitialized or init failed
+ * IN_PROGRESS          : Channel init passed, awaiting confirmation from remote host
+ * ACTIVE               : Channel init passed in local and remote host, thus active
+ */
+enum ipclite_channel_status {
+	INACTIVE				= 0,
+	IN_PROGRESS				= 1,
+	ACTIVE					= 2,
+};
 
 enum ipclite_debug_level {
 	IPCLITE_ERR  = 0x0001,
@@ -97,6 +104,11 @@ static const char ipclite_dbg_label[][IPCLITE_DBG_LABEL_SIZE] = {
 	[IPCLITE_DBG] = "dbg"
 };
 
+/**
+ * IPCMEM Debug Structure Definitions
+ *  - Present in Local Memory
+ */
+
 struct ipclite_debug_info_host {
 	uint32_t numsig_sent; //no. of signals sent from the core
 	uint32_t numsig_recv; //no. of signals received on the core
@@ -137,61 +149,78 @@ struct ipclite_debug_struct {
 	struct ipclite_debug_info_host dbg_info_host[IPCMEM_NUM_HOSTS];
 };
 
-struct ipclite_features {
-	uint32_t global_atomic_support;
-	uint32_t version_finalised;
-};
-
-struct ipclite_recover {
-	uint32_t global_atomic_hwlock_owner;
-	uint32_t configured_core[IPCMEM_NUM_HOSTS];
-};
+/**
+ * IPCMEM TOC Structure Definitions
+ *  - Present in toc in shared memory
+ */
 
-struct ipcmem_partition_header {
-	uint32_t type;			   /*partition type*/
-	uint32_t desc_offset;      /*descriptor offset*/
-	uint32_t desc_size;        /*descriptor size*/
-	uint32_t fifo0_offset;     /*fifo 0 offset*/
-	uint32_t fifo0_size;       /*fifo 0 size*/
-	uint32_t fifo1_offset;     /*fifo 1 offset*/
-	uint32_t fifo1_size;       /*fifo 1 size*/
+struct ipcmem_host_info {
+	uint32_t hwlock_owner;
+	uint32_t configured_host;
 };
 
-struct ipcmem_toc_entry {
+struct ipcmem_partition_entry {
 	uint32_t base_offset;	/*partition offset from IPCMEM base*/
 	uint32_t size;			/*partition size*/
 	uint32_t flags;			/*partition flags if required*/
 	uint32_t host0;			/*subsystem 0 who can access this partition*/
 	uint32_t host1;			/*subsystem 1 who can access this partition*/
-	uint32_t status;		/*partition active status*/
+	uint32_t reserved;		/*legacy partition active status*/
 };
 
-struct ipcmem_toc_header {
-	uint32_t size;
-	uint32_t init_done;
+struct ipcmem_partition_info {
+	uint32_t num_entries;	/* Number of channel partitions */
+	uint32_t entry_size;	/* Size of partition_entry structure */
 };
 
-struct ipcmem_toc {
-	struct ipcmem_toc_header hdr;
-	struct ipcmem_toc_entry toc_entry_global;
-	struct ipcmem_toc_entry toc_entry[IPCMEM_NUM_HOSTS][IPCMEM_NUM_HOSTS];
-	/* Need to have a better implementation here */
-	/* as ipcmem is 4k and if host number increases */
-	/* it would create problems*/
-	struct ipclite_features ipclite_features;
-	struct ipclite_recover recovery;
+struct ipcmem_offsets {
+	uint32_t host_info;
+	uint32_t global_entry;
+	uint32_t partition_info;
+	uint32_t partition_entry;
+	uint32_t debug;
+	uint32_t reserved;		/*Padded for 64-bit alignment*/
 };
 
-struct ipcmem_region {
-	u64 aux_base;
-	void __iomem *virt_base;
-	uint32_t size;
+/**
+ * Any change in TOC header size can only be accomodated with
+ * major version change, as it is not backward compatible.
+ */
+struct ipcmem_toc_header {
+	uint32_t magic_number;		/*Checksum of TOC*/
+	uint32_t init_done;			/*TOC initialization status*/
+	uint32_t major_version;
+	uint32_t minor_version;
+	uint64_t feature_mask;
+	uint32_t reserved[6];		/*Padded for future use and 64-bit alignment*/
 };
 
-struct ipcmem_partition {
-	struct ipcmem_partition_header hdr;
+/**
+ * struct ipcmem_toc - Table of contents in ipcmem
+ *
+ * @hdr     : Header to check for toc integrity, version and features
+ * @offsets : List of offsetted structures and partition entries
+ *            available in the toc data region (ipcmem_toc_data)
+ */
+struct ipcmem_toc {
+	struct ipcmem_toc_header hdr;
+	struct ipcmem_offsets offsets;
+
+	/* ---------------------------------------
+	 * ipcmem_toc_data @ 256-byte offset
+	 * struct ipcmem_host_info host_info;
+	 * struct ipcmem_partition_entry global_entry;
+	 * struct ipcmem_partition_info partition_info;
+	 * struct ipcmem_partition_entry partition_entry[num_entries];
+	 * ---------------------------------------
+	 */
 };
 
+/**
+ * IPCMEM Partition Structure Definitions
+ *  - Present in partitions in shared memory
+ */
+
 struct global_partition_header {
 	uint32_t partition_type;
 	uint32_t region_offset;
@@ -202,13 +231,55 @@ struct ipcmem_global_partition {
 	struct global_partition_header hdr;
 };
 
+struct ipcmem_partition_header {
+	uint32_t type;			   /*partition type*/
+	uint32_t desc_offset;      /*descriptor offset*/
+	uint32_t desc_size;        /*descriptor size*/
+	uint32_t fifo0_offset;     /*fifo 0 offset*/
+	uint32_t fifo0_size;       /*fifo 0 size*/
+	uint32_t fifo1_offset;     /*fifo 1 offset*/
+	uint32_t fifo1_size;       /*fifo 1 size*/
+	uint32_t status;           /*partition status*/
+};
+
+struct ipcmem_partition {
+	struct ipcmem_partition_header hdr;
+};
+
+/**
+ * IPCMEM Helper Structure Definitions
+ *  - Present in local memory
+ *  - Can have pointers to toc and partitions in shared memory
+ */
+
+/*Pointers to offsetted structures in TOC*/
+struct ipcmem_toc_data {
+	struct ipcmem_host_info *host_info;
+	struct ipcmem_partition_entry *global_entry;
+	struct ipcmem_partition_info *partition_info;
+	struct ipcmem_partition_entry *partition_entry;
+};
+
+struct ipcmem_region {
+	u64 aux_base;
+	void __iomem *virt_base;
+	uint32_t size;
+};
+
 struct ipclite_mem {
 	struct ipcmem_toc *toc;
+	struct ipcmem_toc_data toc_data;
 	struct ipcmem_region mem;
 	struct ipcmem_global_partition *global_partition;
-	struct ipcmem_partition *partition[MAX_PARTITION_COUNT];
+	struct ipcmem_partition **partition;
 };
 
+/**
+ * IPCLite Structure Definitions
+ *  - Present in local memory
+ *  - Can have pointers to partitions in shared memory
+ */
+
 struct ipclite_fifo {
 	uint32_t length;
 
@@ -265,7 +336,8 @@ struct ipclite_channel {
 	uint32_t channel_version;
 	uint32_t version_finalised;
 
-	uint32_t channel_status;
+	uint32_t *gstatus_ptr;
+	uint32_t status;
 };
 
 /*Single structure that defines everything about IPCLite*/
@@ -277,157 +349,36 @@ struct ipclite_info {
 	struct ipclite_hw_mutex_ops *ipclite_hw_mutex;
 };
 
-const struct ipcmem_toc_entry ipcmem_toc_global_partition_entry = {
-	/* Global partition. */
-	  4 * 1024,
-	  128 * 1024,
-	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
-	  IPCMEM_GLOBAL_HOST,
-	  IPCMEM_GLOBAL_HOST,
-};
-
-const struct ipcmem_toc_entry ipcmem_toc_partition_entries[] = {
-	/* Global partition. */
-	/* {
-	 *   4 * 1024,
-	 *   128 * 1024,
-	 *   IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
-	 *   IPCMEM_GLOBAL_HOST,
-	 *   IPCMEM_GLOBAL_HOST,
-	 * },
-	 */
-
-	/* APPS<->CDSP partition. */
-	{
-	  132 * 1024,
-	  32 * 1024,
-	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
-	  IPCMEM_APPS,
-	  IPCMEM_CDSP,
-	  CHANNEL_INACTIVE,
-	},
-	/* APPS<->CVP (EVA) partition. */
-	{
-	  164 * 1024,
-	  32 * 1024,
-	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
-	  IPCMEM_APPS,
-	  IPCMEM_CVP,
-	  CHANNEL_INACTIVE,
-	},
-	/* APPS<->CAM (ICP) partition. */
-	{
-	  196 * 1024,
-	  32 * 1024,
-	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
-	  IPCMEM_APPS,
-	  IPCMEM_CAM,
-	  CHANNEL_INACTIVE,
-	},
-	/* APPS<->VPU (IRIS) partition. */
-	{
-	  228 * 1024,
-	  32 * 1024,
-	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
-	  IPCMEM_APPS,
-	  IPCMEM_VPU,
-	  CHANNEL_INACTIVE,
-	},
-	/* CDSP<->CVP (EVA) partition. */
-	{
-	  260 * 1024,
-	  32 * 1024,
-	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
-	  IPCMEM_CDSP,
-	  IPCMEM_CVP,
-	  CHANNEL_INACTIVE,
-	},
-	/* CDSP<->CAM (ICP) partition. */
-	{
-	  292 * 1024,
-	  32 * 1024,
-	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
-	  IPCMEM_CDSP,
-	  IPCMEM_CAM,
-	  CHANNEL_INACTIVE,
-	},
-	/* CDSP<->VPU (IRIS) partition. */
-	{
-	  324 * 1024,
-	  32 * 1024,
-	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
-	  IPCMEM_CDSP,
-	  IPCMEM_VPU,
-	  CHANNEL_INACTIVE,
-	},
-	/* CVP<->CAM (ICP) partition. */
-	{
-	  356 * 1024,
-	  32 * 1024,
-	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
-	  IPCMEM_CVP,
-	  IPCMEM_CAM,
-	  CHANNEL_INACTIVE,
-	},
-	/* CVP<->VPU (IRIS) partition. */
-	{
-	  388 * 1024,
-	  32 * 1024,
-	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
-	  IPCMEM_CVP,
-	  IPCMEM_VPU,
-	  CHANNEL_INACTIVE,
-	},
-	/* CAM<->VPU (IRIS) partition. */
-	{
-	  420 * 1024,
-	  32 * 1024,
-	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
-	  IPCMEM_CAM,
-	  IPCMEM_VPU,
-	  CHANNEL_INACTIVE,
-	},
-	/* APPS<->APPS partition. */
-	{
-	  454 * 1024,
-	  32 * 1024,
-	  IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
-	  IPCMEM_APPS,
-	  IPCMEM_APPS,
-	  CHANNEL_INACTIVE,
-	}
-	/* Last entry uses invalid hosts and no protections to signify the end. */
-	/* {
-	 *   0,
-	 *   0,
-	 *   IPCMEM_TOC_ENTRY_FLAGS_ENABLE_RW_PROTECTION,
-	 *   IPCMEM_INVALID_HOST,
-	 *   IPCMEM_INVALID_HOST,
-	 * }
-	 */
-};
-
-/*D:wefault partition parameters*/
-#define	DEFAULT_PARTITION_TYPE			0x0
-#define	DEFAULT_PARTITION_HDR_SIZE		1024
+/*Default partition parameters*/
+#define DEFAULT_PARTITION_TYPE			0x0
+#define DEFAULT_PARTITION_STATUS		INACTIVE
+#define DEFAULT_PARTITION_HDR_SIZE		1024
 
-#define	DEFAULT_DESCRIPTOR_OFFSET		1024
-#define	DEFAULT_DESCRIPTOR_SIZE			(3*1024)
+#define DEFAULT_DESCRIPTOR_OFFSET		1024
+#define DEFAULT_DESCRIPTOR_SIZE			(3*1024)
 #define DEFAULT_FIFO0_OFFSET			(4*1024)
 #define DEFAULT_FIFO0_SIZE				(8*1024)
 #define DEFAULT_FIFO1_OFFSET			(12*1024)
 #define DEFAULT_FIFO1_SIZE				(8*1024)
 
+#define DEFAULT_PARTITION_SIZE			(32*1024)
+#define DEFAULT_PARTITION_FLAGS			IPCMEM_FLAGS_ENABLE_RW_PROTECTION
+
 /*Loopback partition parameters*/
-#define	LOOPBACK_PARTITION_TYPE			0x1
+#define LOOPBACK_PARTITION_TYPE			0x1
 
 /*Global partition parameters*/
-#define	GLOBAL_PARTITION_TYPE			0xFF
+#define GLOBAL_PARTITION_TYPE			0xFF
 #define GLOBAL_PARTITION_HDR_SIZE		(4*1024)
 
 #define GLOBAL_REGION_OFFSET			(4*1024)
 #define GLOBAL_REGION_SIZE				(124*1024)
 
+#define GLOBAL_PARTITION_SIZE			(128*1024)
+#define GLOBAL_PARTITION_FLAGS			IPCMEM_FLAGS_ENABLE_RW_PROTECTION
+
+/*Debug partition parameters*/
+#define DEBUG_PARTITION_SIZE			(64*1024)
 
 const struct ipcmem_partition_header default_partition_hdr = {
 	DEFAULT_PARTITION_TYPE,
@@ -437,6 +388,7 @@ const struct ipcmem_partition_header default_partition_hdr = {
 	DEFAULT_FIFO0_SIZE,
 	DEFAULT_FIFO1_OFFSET,
 	DEFAULT_FIFO1_SIZE,
+	DEFAULT_PARTITION_STATUS,
 };
 
 /* TX and RX FIFO point to same location for such loopback partition type
@@ -450,6 +402,7 @@ const struct ipcmem_partition_header loopback_partition_hdr = {
 	DEFAULT_FIFO0_SIZE,
 	DEFAULT_FIFO0_OFFSET,
 	DEFAULT_FIFO0_SIZE,
+	DEFAULT_PARTITION_STATUS,
 };
 
 const struct global_partition_header global_partition_hdr = {

+ 1 - 8
msm/synx/ipclite_client.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #ifndef __IPCLITE_CLIENT_H__
 #define __IPCLITE_CLIENT_H__
@@ -27,13 +27,6 @@ enum ipcmem_host_type {
 	IPCMEM_INVALID_HOST =  0xFF,				  /**< Invalid processor */
 };
 
-/**
- * IPCLite return codes
- */
-#define IPCLITE_SUCCESS		0 /*< Success > */
-#define IPCLITE_FAILURE		1 /*< Failure > */
-#define IPCLITE_EINCHAN		9 /*< Inactive Channel */
-
 struct global_region_info {
 	void *virt_base;
 	uint32_t size;

+ 178 - 27
msm/synx/synx.c

@@ -483,6 +483,11 @@ int synx_native_signal_fence(struct synx_coredata *synx_obj,
 		return -SYNX_ALREADY;
 	}
 
+	synx_obj->status = status;
+
+	if (status >= SYNX_DMA_FENCE_STATE_MAX)
+		status = SYNX_DMA_FENCE_STATE_MAX - 1;
+
 	/* set fence error to model {signal w/ error} */
 	if (status != SYNX_STATE_SIGNALED_SUCCESS)
 		dma_fence_set_error(synx_obj->fence, -status);
@@ -514,6 +519,7 @@ int synx_native_signal_merged_fence(struct synx_coredata *synx_obj, u32 status)
 			rc = -SYNX_NOENT;
 			goto fail;
 		}
+
 		mutex_lock(&synx_child_obj[i]->obj_lock);
 		spin_lock_irqsave(synx_child_obj[i]->fence->lock, flags);
 		if (synx_util_get_object_status_locked(synx_child_obj[i]) != SYNX_STATE_ACTIVE ||
@@ -525,6 +531,7 @@ int synx_native_signal_merged_fence(struct synx_coredata *synx_obj, u32 status)
 		}
 		spin_unlock_irqrestore(synx_child_obj[i]->fence->lock, flags);
 
+		status = synx_global_get_status(synx_child_obj[i]->global_idx);
 		rc = synx_native_signal_fence(synx_child_obj[i], status);
 		mutex_unlock(&synx_child_obj[i]->obj_lock);
 	}
@@ -533,6 +540,80 @@ fail:
 	return rc;
 }
 
+u32 synx_get_child_status(struct synx_coredata *synx_obj)
+{
+	u32 h_child = 0, i = 0;
+	u32 status = SYNX_DMA_FENCE_STATE_MAX - 1, child_status = SYNX_STATE_ACTIVE;
+	struct dma_fence_array *array = NULL;
+	struct synx_map_entry *fence_entry = NULL;
+	struct synx_coredata *synx_child_obj = NULL;
+
+	if (!dma_fence_is_array(synx_obj->fence))
+		return status;
+
+	array = to_dma_fence_array(synx_obj->fence);
+	if (IS_ERR_OR_NULL(array))
+		goto bail;
+
+	for (i = 0; i < array->num_fences; i++) {
+		h_child = synx_util_get_fence_entry((u64)array->fences[i], 1);
+		if (h_child == 0)
+			h_child = synx_util_get_fence_entry((u64)array->fences[i], 0);
+
+		if (h_child == 0)
+			continue;
+
+		fence_entry = synx_util_get_map_entry(h_child);
+		if (IS_ERR_OR_NULL(fence_entry) || IS_ERR_OR_NULL(fence_entry->synx_obj)) {
+			dprintk(SYNX_ERR, "Invalid handle access %u", h_child);
+			goto bail;
+		}
+		synx_child_obj = fence_entry->synx_obj;
+
+		mutex_lock(&synx_child_obj->obj_lock);
+		if (synx_util_is_global_object(synx_child_obj))
+			child_status = synx_global_get_status(synx_child_obj->global_idx);
+		else
+			child_status = synx_child_obj->status;
+		mutex_unlock(&synx_child_obj->obj_lock);
+		synx_util_release_map_entry(fence_entry);
+
+		dprintk(SYNX_VERB, "Child handle %u status %d", h_child, child_status);
+		if (child_status != SYNX_STATE_ACTIVE &&
+			(status == SYNX_DMA_FENCE_STATE_MAX - 1 ||
+			(child_status > SYNX_STATE_SIGNALED_SUCCESS &&
+			child_status <= SYNX_STATE_SIGNALED_MAX)))
+			status = child_status;
+	}
+bail:
+	return status;
+}
+
+u32 synx_custom_get_status(struct synx_coredata *synx_obj, u32 status)
+{
+	u32 custom_status = status;
+	u32 parent_global_status =
+		synx_util_is_global_object(synx_obj) ?
+		synx_global_get_status(synx_obj->global_idx) : SYNX_STATE_ACTIVE;
+
+	if (IS_ERR_OR_NULL(synx_obj))
+		goto bail;
+
+	mutex_lock(&synx_obj->obj_lock);
+	if (synx_util_is_merged_object(synx_obj)) {
+		if (parent_global_status == SYNX_STATE_ACTIVE)
+			synx_obj->status = synx_get_child_status(synx_obj);
+		else
+			synx_obj->status = parent_global_status;
+	}
+
+	custom_status = synx_obj->status;
+	mutex_unlock(&synx_obj->obj_lock);
+
+bail:
+	return custom_status;
+}
+
 void synx_signal_handler(struct work_struct *cb_dispatch)
 {
 	int rc = SYNX_SUCCESS;
@@ -544,6 +625,13 @@ void synx_signal_handler(struct work_struct *cb_dispatch)
 	u32 h_synx = signal_cb->handle;
 	u32 status = signal_cb->status;
 
+	if (signal_cb->flag & SYNX_SIGNAL_FROM_FENCE) {
+		status = synx_custom_get_status(synx_obj, status);
+		dprintk(SYNX_VERB,
+			"handle %d will be updated with status %d\n",
+			h_synx, status);
+	}
+
 	if ((signal_cb->flag & SYNX_SIGNAL_FROM_FENCE) &&
 			(synx_util_is_global_handle(h_synx) ||
 			synx_util_is_global_object(synx_obj))) {
@@ -590,8 +678,8 @@ void synx_signal_handler(struct work_struct *cb_dispatch)
 	}
 
 	mutex_lock(&synx_obj->obj_lock);
-
-	if (signal_cb->flag & SYNX_SIGNAL_FROM_IPC) {
+	if (signal_cb->flag & SYNX_SIGNAL_FROM_IPC &&
+		synx_util_get_object_status(synx_obj) == SYNX_STATE_ACTIVE) {
 		if (synx_util_is_merged_object(synx_obj))
 			rc = synx_native_signal_merged_fence(synx_obj, status);
 		else
@@ -648,8 +736,12 @@ void synx_fence_callback(struct dma_fence *fence,
 	 */
 	if (status == 1)
 		status = SYNX_STATE_SIGNALED_SUCCESS;
-	else if (status < 0)
+	else if (status == -SYNX_STATE_SIGNALED_CANCEL)
+		status = SYNX_STATE_SIGNALED_CANCEL;
+	else if (status < 0 && status >= -SYNX_STATE_SIGNALED_MAX)
 		status = SYNX_STATE_SIGNALED_EXTERNAL;
+	else
+		status = (u32)-status;
 
 	signal_cb->status = status;
 
@@ -707,7 +799,10 @@ int synx_signal(struct synx_session *session, u32 h_synx, u32 status)
 	if (IS_ERR_OR_NULL(client))
 		return -SYNX_INVALID;
 
-	if (status <= SYNX_STATE_ACTIVE) {
+	if (status <= SYNX_STATE_ACTIVE ||
+			!(status == SYNX_STATE_SIGNALED_SUCCESS ||
+			status == SYNX_STATE_SIGNALED_CANCEL ||
+			status > SYNX_STATE_SIGNALED_MAX)) {
 		dprintk(SYNX_ERR,
 			"[sess :%llu] signaling with wrong status: %u\n",
 			client->id, status);
@@ -727,7 +822,6 @@ int synx_signal(struct synx_session *session, u32 h_synx, u32 status)
 	}
 
 	mutex_lock(&synx_obj->obj_lock);
-
 	if (synx_util_is_global_handle(h_synx) ||
 			synx_util_is_global_object(synx_obj))
 		rc = synx_global_update_status(
@@ -788,6 +882,52 @@ static int synx_match_payload(struct synx_kernel_payload *cb_payload,
 	return rc;
 }
 
+/* Timer Callback function. This will be called when timer expires */
+void synx_timer_cb(struct timer_list *data)
+{
+	struct synx_client *client;
+	struct synx_handle_coredata *synx_data;
+	struct synx_coredata *synx_obj;
+	struct synx_cb_data *synx_cb = container_of(data, struct synx_cb_data, synx_timer);
+
+	client = synx_get_client(synx_cb->session);
+	if (IS_ERR_OR_NULL(client)) {
+		dprintk(SYNX_ERR,
+			"invalid session data 0x%x in cb payload\n",
+			synx_cb->session);
+		return;
+	}
+	synx_data = synx_util_acquire_handle(client, synx_cb->h_synx);
+	synx_obj = synx_util_obtain_object(synx_data);
+	if (IS_ERR_OR_NULL(synx_obj)) {
+		dprintk(SYNX_ERR,
+			"[sess :0x%llx] invalid handle access 0x%x\n",
+			synx_cb->session, synx_cb->h_synx);
+		return;
+	}
+	dprintk(SYNX_VERB,
+		"Timer expired for synx_cb 0x%x timeout 0x%llx. Deleting the timer.\n",
+		synx_cb, synx_cb->timeout);
+
+	synx_cb->status = SYNX_STATE_TIMEOUT;
+	del_timer(&synx_cb->synx_timer);
+	list_del_init(&synx_cb->node);
+	queue_work(synx_dev->wq_cb, &synx_cb->cb_dispatch);
+}
+
+static int synx_start_timer(struct synx_cb_data *synx_cb)
+{
+	int rc = 0;
+
+	timer_setup(&synx_cb->synx_timer, synx_timer_cb, 0);
+	rc = mod_timer(&synx_cb->synx_timer, jiffies + msecs_to_jiffies(synx_cb->timeout));
+	dprintk(SYNX_VERB,
+		"Timer started for synx_cb 0x%x timeout 0x%llx\n",
+		synx_cb, synx_cb->timeout);
+	return rc;
+}
+
+
 int synx_async_wait(struct synx_session *session,
 	struct synx_callback_params *params)
 {
@@ -803,9 +943,6 @@ int synx_async_wait(struct synx_session *session,
 	if (IS_ERR_OR_NULL(session) || IS_ERR_OR_NULL(params))
 		return -SYNX_INVALID;
 
-	if (params->timeout_ms != SYNX_NO_TIMEOUT)
-		return -SYNX_NOSUPPORT;
-
 	client = synx_get_client(session);
 	if (IS_ERR_OR_NULL(client))
 		return -SYNX_INVALID;
@@ -858,6 +995,8 @@ int synx_async_wait(struct synx_session *session,
 
 	synx_cb->session = session;
 	synx_cb->idx = idx;
+	synx_cb->h_synx = params->h_synx;
+
 	INIT_WORK(&synx_cb->cb_dispatch, synx_util_cb_dispatch);
 
 	/* add callback if object still ACTIVE, dispatch if SIGNALED */
@@ -865,6 +1004,17 @@ int synx_async_wait(struct synx_session *session,
 		dprintk(SYNX_VERB,
 			"[sess :%llu] callback added for handle %u\n",
 			client->id, params->h_synx);
+		synx_cb->timeout = params->timeout_ms;
+		if (params->timeout_ms != SYNX_NO_TIMEOUT) {
+			rc = synx_start_timer(synx_cb);
+			if (rc != SYNX_SUCCESS) {
+				dprintk(SYNX_ERR,
+					"[sess :%llu] timer start failed - synx_cb: 0x%x, params->timeout_ms: 0x%llx, handle: 0x%x, ret : %d\n",
+					client->id, synx_cb, params->timeout_ms,
+					params->h_synx, rc);
+				goto release;
+			}
+		}
 		list_add(&synx_cb->node, &synx_obj->reg_cbs_list);
 	} else {
 		synx_cb->status = status;
@@ -930,7 +1080,7 @@ int synx_cancel_async_wait(
 	status = synx_util_get_object_status(synx_obj);
 	if (status != SYNX_STATE_ACTIVE) {
 		dprintk(SYNX_ERR,
-			"handle %u already signaled cannot cancel\n",
+			"handle %u already signaled or timed out, cannot cancel\n",
 			params->h_synx);
 		rc = -SYNX_INVALID;
 		goto release;
@@ -958,6 +1108,12 @@ int synx_cancel_async_wait(
 
 		cb_payload = &client->cb_table[synx_cb->idx];
 		ret = synx_match_payload(&cb_payload->kernel_cb, &payload);
+		if (synx_cb->timeout != SYNX_NO_TIMEOUT) {
+			dprintk(SYNX_VERB,
+				"Deleting timer synx_cb 0x%x, timeout 0x%llx\n",
+				synx_cb, synx_cb->timeout);
+			del_timer(&synx_cb->synx_timer);
+		}
 		switch (ret) {
 		case 1:
 			/* queue the cancel cb work */
@@ -997,7 +1153,7 @@ EXPORT_SYMBOL(synx_cancel_async_wait);
 int synx_merge(struct synx_session *session,
 	struct synx_merge_params *params)
 {
-	int rc, i, num_signaled = 0;
+	int rc = SYNX_SUCCESS, i, num_signaled = 0;
 	u32 count = 0, h_child, status = SYNX_STATE_ACTIVE;
 	u32 *h_child_list = NULL, *h_child_idx_list = NULL;
 	struct synx_client *client;
@@ -1073,6 +1229,7 @@ int synx_merge(struct synx_session *session,
 
 	h_child_idx_list = kzalloc(count*4, GFP_KERNEL);
 	if (IS_ERR_OR_NULL(h_child_idx_list)) {
+		kfree(h_child_list);
 		rc = -SYNX_NOMEM;
 		goto clear;
 	}
@@ -1110,10 +1267,14 @@ int synx_merge(struct synx_session *session,
 						client->id, h_child_list[i]);
 					continue;
 				}
-
-				rc = synx_native_signal_fence(synx_obj_child, status);
+				mutex_lock(&synx_obj_child->obj_lock);
+				if (synx_obj->status == SYNX_STATE_ACTIVE)
+					rc = synx_native_signal_fence(synx_obj_child, status);
+				mutex_unlock(&synx_obj_child->obj_lock);
 				if (rc != SYNX_SUCCESS)
 					dprintk(SYNX_ERR, "h_synx %u failed with status %d\n", h_child_list[i], rc);
+
+				synx_util_release_handle(synx_data_child);
 			}
 		}
 	}
@@ -1361,7 +1522,7 @@ EXPORT_SYMBOL(synx_bind);
 int synx_get_status(struct synx_session *session,
 	u32 h_synx)
 {
-	int rc = 0;
+	int rc = 0, status = 0;
 	struct synx_client *client;
 	struct synx_handle_coredata *synx_data;
 	struct synx_coredata *synx_obj;
@@ -1381,23 +1542,13 @@ int synx_get_status(struct synx_session *session,
 		goto fail;
 	}
 
-	if (synx_util_is_global_handle(h_synx)) {
-		rc = synx_global_get_status(
-				synx_util_global_idx(h_synx));
-		if (rc != SYNX_STATE_ACTIVE) {
-			dprintk(SYNX_VERB,
-				"[sess :%llu] handle %u in status %d\n",
-				client->id, h_synx, rc);
-			goto fail;
-		}
-	}
-
 	mutex_lock(&synx_obj->obj_lock);
-	rc = synx_util_get_object_status(synx_obj);
+	status = synx_util_get_object_status(synx_obj);
+	rc = synx_obj->status;
 	mutex_unlock(&synx_obj->obj_lock);
 	dprintk(SYNX_VERB,
-		"[sess :%llu] handle %u status %d\n",
-		client->id, h_synx, rc);
+		"[sess :%llu] handle %u synx coredata status %d and dma fence status %d\n",
+		client->id, h_synx, rc, status);
 
 fail:
 	synx_util_release_handle(synx_data);

+ 37 - 43
msm/synx/synx_global.c

@@ -460,7 +460,7 @@ u32 synx_global_get_status(u32 idx)
 {
 	int rc;
 	unsigned long flags;
-	u32 status;
+	u32 status = SYNX_STATE_ACTIVE;
 	struct synx_global_coredata *synx_g_obj;
 
 	if (!synx_gmem.table)
@@ -473,7 +473,8 @@ u32 synx_global_get_status(u32 idx)
 	if (rc)
 		return rc;
 	synx_g_obj = &synx_gmem.table[idx];
-	status = synx_g_obj->status;
+	if (synx_g_obj->status != SYNX_STATE_ACTIVE && synx_g_obj->num_child == 0)
+		status = synx_g_obj->status;
 	synx_gmem_unlock(idx, &flags);
 
 	return status;
@@ -500,8 +501,10 @@ u32 synx_global_test_status_set_wait(u32 idx,
 	synx_global_print_data(synx_g_obj, __func__);
 	status = synx_g_obj->status;
 	/* if handle is still ACTIVE */
-	if (status == SYNX_STATE_ACTIVE)
+	if (status == SYNX_STATE_ACTIVE || synx_g_obj->num_child != 0) {
 		synx_g_obj->waiters |= (1UL << id);
+		status = SYNX_STATE_ACTIVE;
+	}
 	else
 		dprintk(SYNX_DBG, "handle %u already signaled %u",
 			synx_g_obj->handle, synx_g_obj->status);
@@ -533,21 +536,17 @@ static int synx_global_update_status_core(u32 idx,
 	if (synx_g_obj->num_child != 0) {
 		/* composite handle */
 		synx_g_obj->num_child--;
+		if (synx_g_obj->status == SYNX_STATE_ACTIVE ||
+			(status > SYNX_STATE_SIGNALED_SUCCESS &&
+			status <= SYNX_STATE_SIGNALED_MAX))
+			synx_g_obj->status = status;
+
 		if (synx_g_obj->num_child == 0) {
-			if (synx_g_obj->status == SYNX_STATE_ACTIVE) {
-				synx_g_obj->status =
-					(status == SYNX_STATE_SIGNALED_SUCCESS) ?
-					SYNX_STATE_SIGNALED_SUCCESS : SYNX_STATE_SIGNALED_ERROR;
-				data |= synx_g_obj->status;
-				synx_global_get_waiting_cores_locked(synx_g_obj,
-					wait_cores);
-				synx_global_get_parents_locked(synx_g_obj, h_parents);
-			} else {
-				data = 0;
-				dprintk(SYNX_WARN,
-					"merged handle %u already in state %u\n",
-					synx_g_obj->handle, synx_g_obj->status);
-			}
+			data |= synx_g_obj->status;
+			synx_global_get_waiting_cores_locked(synx_g_obj,
+				wait_cores);
+			synx_global_get_parents_locked(synx_g_obj, h_parents);
+
 			/* release ref held by constituting handles */
 			synx_g_obj->refcount--;
 			if (synx_g_obj->refcount == 0) {
@@ -555,15 +554,6 @@ static int synx_global_update_status_core(u32 idx,
 					sizeof(*synx_g_obj));
 				clear = true;
 			}
-		} else if (status != SYNX_STATE_SIGNALED_SUCCESS) {
-			synx_g_obj->status = SYNX_STATE_SIGNALED_ERROR;
-			data |= synx_g_obj->status;
-			synx_global_get_waiting_cores_locked(synx_g_obj,
-				wait_cores);
-			synx_global_get_parents_locked(synx_g_obj, h_parents);
-			dprintk(SYNX_WARN,
-				"merged handle %u signaled with error state\n",
-				synx_g_obj->handle);
 		} else {
 			/* pending notification from  handles */
 			data = 0;
@@ -723,8 +713,8 @@ int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx)
 	struct synx_global_coredata *synx_g_obj;
 	u32 i, j = 0;
 	u32 idx;
-	bool sig_error = false;
 	u32 num_child = 0;
+	u32 parent_status = SYNX_STATE_ACTIVE;
 
 	if (!synx_gmem.table)
 		return -SYNX_NOMEM;
@@ -746,18 +736,26 @@ int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx)
 			goto fail;
 
 		synx_g_obj = &synx_gmem.table[idx];
-		if (synx_g_obj->status == SYNX_STATE_ACTIVE) {
-			for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
-				if (synx_g_obj->parents[i] == 0) {
-					synx_g_obj->parents[i] = p_idx;
-					break;
-				}
+		for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
+			if (synx_g_obj->parents[i] == 0) {
+				synx_g_obj->parents[i] = p_idx;
+				break;
 			}
-			num_child++;
-		} else if (synx_g_obj->status >
-			SYNX_STATE_SIGNALED_SUCCESS) {
-			sig_error = true;
 		}
+		if (synx_g_obj->status == SYNX_STATE_ACTIVE)
+			num_child++;
+		else if (synx_g_obj->status >
+			SYNX_STATE_SIGNALED_SUCCESS &&
+			synx_g_obj->status <= SYNX_STATE_SIGNALED_MAX)
+			parent_status = synx_g_obj->status;
+		else if (parent_status == SYNX_STATE_ACTIVE)
+			parent_status = synx_g_obj->status;
+
+		if (synx_g_obj->status != SYNX_STATE_ACTIVE && synx_g_obj->num_child != 0)
+			num_child++;
+
+		dprintk(SYNX_MEM, "synx_obj->status %d parent status %d\n",
+			synx_g_obj->status, parent_status);
 		synx_gmem_unlock(idx, &flags);
 
 		if (i >= SYNX_GLOBAL_MAX_PARENTS) {
@@ -773,13 +771,9 @@ int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx)
 		goto fail;
 	synx_g_obj = &synx_gmem.table[p_idx];
 	synx_g_obj->num_child += num_child;
-	if (sig_error)
-		synx_g_obj->status = SYNX_STATE_SIGNALED_ERROR;
-	else if (synx_g_obj->num_child != 0)
+	if (synx_g_obj->num_child != 0)
 		synx_g_obj->refcount++;
-	else if (synx_g_obj->num_child == 0 &&
-		synx_g_obj->status == SYNX_STATE_ACTIVE)
-		synx_g_obj->status = SYNX_STATE_SIGNALED_SUCCESS;
+	synx_g_obj->status = parent_status;
 	synx_global_print_data(synx_g_obj, __func__);
 	synx_gmem_unlock(p_idx, &flags);
 

+ 4 - 0
msm/synx/synx_global.h

@@ -53,6 +53,10 @@ enum synx_core_id {
 #define SYNX_STATE_SIGNALED_ERROR      3
 #define SYNX_STATE_SIGNALED_EXTERNAL   5
 #define SYNX_STATE_SIGNALED_SSR        6
+#define SYNX_STATE_TIMEOUT             7
+
+/* dma fence states */
+#define SYNX_DMA_FENCE_STATE_MAX             4096
 
 /**
  * struct synx_global_coredata - Synx global object, used for book keeping

+ 5 - 1
msm/synx/synx_private.h

@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef __SYNX_PRIVATE_H__
@@ -101,7 +101,10 @@ struct synx_kernel_payload {
 struct synx_cb_data {
 	struct synx_session *session;
 	u32 idx;
+	u32 h_synx;
 	u32 status;
+	struct timer_list synx_timer;
+	u64 timeout;
 	struct work_struct cb_dispatch;
 	struct list_head node;
 };
@@ -149,6 +152,7 @@ struct synx_coredata {
 	struct mutex obj_lock;
 	struct kref refcount;
 	u32 type;
+	u32 status;
 	u32 num_bound_synxs;
 	struct synx_bind_desc bound_synxs[SYNX_MAX_NUM_BINDINGS];
 	struct list_head reg_cbs_list;

+ 21 - 7
msm/synx/synx_util.c

@@ -108,6 +108,7 @@ int synx_util_init_coredata(struct synx_coredata *synx_obj,
 	if (rc != SYNX_SUCCESS)
 		goto clean;
 
+	synx_obj->status = synx_util_get_object_status(synx_obj);
 	return SYNX_SUCCESS;
 
 clean:
@@ -217,6 +218,7 @@ int synx_util_init_group_coredata(struct synx_coredata *synx_obj,
 	kref_init(&synx_obj->refcount);
 	mutex_init(&synx_obj->obj_lock);
 	INIT_LIST_HEAD(&synx_obj->reg_cbs_list);
+	synx_obj->status = synx_util_get_object_status(synx_obj);
 
 	synx_util_activate(synx_obj);
 	return rc;
@@ -299,6 +301,12 @@ void synx_util_object_destroy(struct synx_coredata *synx_obj)
 			"dipatching un-released callbacks of session %pK\n",
 			synx_cb->session);
 		synx_cb->status = SYNX_STATE_SIGNALED_CANCEL;
+		if (synx_cb->timeout != SYNX_NO_TIMEOUT) {
+			dprintk(SYNX_VERB,
+				"Deleting timer synx_cb 0x%x, timeout 0x%llx\n",
+				synx_cb, synx_cb->timeout);
+			del_timer(&synx_cb->synx_timer);
+		}
 		list_del_init(&synx_cb->node);
 		queue_work(synx_dev->wq_cb,
 			&synx_cb->cb_dispatch);
@@ -731,7 +739,7 @@ static u32 __fence_state(struct dma_fence *fence, bool locked)
 static u32 __fence_group_state(struct dma_fence *fence, bool locked)
 {
 	u32 i = 0;
-	u32 state = SYNX_STATE_INVALID;
+	u32 state = SYNX_STATE_INVALID, parent_state = SYNX_STATE_INVALID;
 	struct dma_fence_array *array = NULL;
 	u32 intr, actv_cnt, sig_cnt, err_cnt;
 
@@ -747,6 +755,8 @@ static u32 __fence_group_state(struct dma_fence *fence, bool locked)
 
 	for (i = 0; i < array->num_fences; i++) {
 		intr = __fence_state(array->fences[i], locked);
+		if (err_cnt == 0)
+			parent_state = intr;
 		switch (intr) {
 		case SYNX_STATE_ACTIVE:
 			actv_cnt++;
@@ -755,7 +765,7 @@ static u32 __fence_group_state(struct dma_fence *fence, bool locked)
 			sig_cnt++;
 			break;
 		default:
-			err_cnt++;
+			intr > SYNX_STATE_SIGNALED_MAX ? sig_cnt++ : err_cnt++;
 		}
 	}
 
@@ -763,12 +773,10 @@ static u32 __fence_group_state(struct dma_fence *fence, bool locked)
 		"group cnt stats act:%u, sig: %u, err: %u\n",
 		actv_cnt, sig_cnt, err_cnt);
 
-	if (err_cnt)
-		state = SYNX_STATE_SIGNALED_ERROR;
-	else if (actv_cnt)
+	if (actv_cnt)
 		state = SYNX_STATE_ACTIVE;
-	else if (sig_cnt == array->num_fences)
-		state = SYNX_STATE_SIGNALED_SUCCESS;
+	else
+		state = parent_state;
 
 	return state;
 }
@@ -1173,6 +1181,12 @@ void synx_util_callback_dispatch(struct synx_coredata *synx_obj, u32 status)
 	list_for_each_entry_safe(synx_cb,
 		synx_cb_temp, &synx_obj->reg_cbs_list, node) {
 		synx_cb->status = status;
+		if (synx_cb->timeout != SYNX_NO_TIMEOUT) {
+			dprintk(SYNX_VERB,
+				"Deleting timer synx_cb 0x%x, timeout 0x%llx\n",
+				synx_cb, synx_cb->timeout);
+			del_timer(&synx_cb->synx_timer);
+		}
 		list_del_init(&synx_cb->node);
 		queue_work(synx_dev->wq_cb,
 			&synx_cb->cb_dispatch);