Browse Source

Merge bfffddb83e58b5752e16bcb5d1cb704f59983bad on remote branch

Change-Id: I470b97d6373285b89d9c8549367cb824f757f7c7
Linux Build Service Account 1 year ago
parent
commit
59c4b43a36
5 changed files with 186 additions and 195 deletions
  1. 45 91
      msm/synx/ipclite.c
  2. 7 12
      msm/synx/ipclite.h
  3. 20 6
      msm/synx/ipclite_client.h
  4. 114 78
      msm/synx/synx_api.h
  5. 0 8
      msm/synx/synx_global.h

+ 45 - 91
msm/synx/ipclite.c

@@ -17,7 +17,6 @@
 #include <linux/sizes.h>
 
 #include <linux/hwspinlock.h>
-#include <linux/qcom_scm.h>
 
 #include <linux/sysfs.h>
 
@@ -32,15 +31,13 @@
 static struct ipclite_info *ipclite;
 static struct ipclite_client synx_client;
 static struct ipclite_client test_client;
-static struct ipclite_hw_mutex_ops *ipclite_hw_mutex;
 static struct ipclite_debug_info *ipclite_dbg_info;
 static struct ipclite_debug_struct *ipclite_dbg_struct;
 static struct ipclite_debug_inmem_buf *ipclite_dbg_inmem;
 static struct mutex ssr_mutex;
 static struct kobject *sysfs_kobj;
 
-static uint32_t enabled_hosts;
-static uint32_t partitions;
+static uint32_t enabled_hosts, partitions;
 static u32 global_atomic_support = GLOBAL_ATOMICS_ENABLED;
 static uint32_t ipclite_debug_level = IPCLITE_ERR | IPCLITE_WARN | IPCLITE_INFO;
 static uint32_t ipclite_debug_control = IPCLITE_DMESG_LOG, ipclite_debug_dump;
@@ -55,7 +52,7 @@ static inline bool is_loopback_except_apps(uint32_t h0, uint32_t h1)
 	return (h0 == h1 && h0 != IPCMEM_APPS);
 }
 
-static void IPCLITE_OS_INMEM_LOG(const char *psztStr, ...)
+static void ipclite_inmem_log(const char *psztStr, ...)
 {
 	uint32_t local_index = 0;
 	va_list pArgs;
@@ -183,38 +180,41 @@ static void ipclite_dump_inmem_logs(void)
 	return;
 }
 
-static void ipclite_hw_mutex_acquire(void)
+int ipclite_hw_mutex_acquire(void)
 {
-	int32_t ret;
-
-	if (ipclite != NULL) {
-		if (!global_atomic_support) {
-			ret = hwspin_lock_timeout_irqsave(ipclite->hwlock,
-					HWSPINLOCK_TIMEOUT,
-					&ipclite->ipclite_hw_mutex->flags);
-			if (ret) {
-				IPCLITE_OS_LOG(IPCLITE_ERR, "Hw mutex lock acquire failed\n");
-				return;
-			}
-
-			ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_APPS;
+	int ret;
 
-			IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock acquired\n");
-		}
+	if (unlikely(!ipclite)) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite not initialized");
+		return -ENOMEM;
 	}
+	ret = hwspin_lock_timeout_irqsave(ipclite->hwlock,
+					HWSPINLOCK_TIMEOUT, &ipclite->hw_mutex_flags);
+	if (ret) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "Hw mutex lock acquire failed");
+		return ret;
+	}
+	ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_APPS;
+	IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock acquired");
+	return ret;
 }
+EXPORT_SYMBOL(ipclite_hw_mutex_acquire);
 
-static void ipclite_hw_mutex_release(void)
+int ipclite_hw_mutex_release(void)
 {
-	if (ipclite != NULL) {
-		if (!global_atomic_support) {
-			ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST;
-			hwspin_unlock_irqrestore(ipclite->hwlock,
-				&ipclite->ipclite_hw_mutex->flags);
-			IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock release\n");
-		}
+	if (unlikely(!ipclite)) {
+		IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite not initialized");
+		return -ENOMEM;
 	}
+	if (ipclite->ipcmem.toc_data.host_info->hwlock_owner != IPCMEM_APPS)
+		return -EINVAL;
+
+	ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST;
+	hwspin_unlock_irqrestore(ipclite->hwlock, &ipclite->hw_mutex_flags);
+	IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock released");
+	return 0;
 }
+EXPORT_SYMBOL(ipclite_hw_mutex_release);
 
 void ipclite_atomic_init_u32(ipclite_atomic_uint32_t *addr, uint32_t data)
 {
@@ -230,25 +230,17 @@ EXPORT_SYMBOL(ipclite_atomic_init_i32);
 
 void ipclite_global_atomic_store_u32(ipclite_atomic_uint32_t *addr, uint32_t data)
 {
-	/* callback to acquire hw mutex lock if atomic support is not enabled */
-	ipclite->ipclite_hw_mutex->acquire();
-
+	ATOMIC_HW_MUTEX_ACQUIRE;
 	atomic_set(addr, data);
-
-	/* callback to release hw mutex lock if atomic support is not enabled */
-	ipclite->ipclite_hw_mutex->release();
+	ATOMIC_HW_MUTEX_RELEASE;
 }
 EXPORT_SYMBOL(ipclite_global_atomic_store_u32);
 
 void ipclite_global_atomic_store_i32(ipclite_atomic_int32_t *addr, int32_t data)
 {
-	/* callback to acquire hw mutex lock if atomic support is not enabled */
-	ipclite->ipclite_hw_mutex->acquire();
-
+	ATOMIC_HW_MUTEX_ACQUIRE;
 	atomic_set(addr, data);
-
-	/* callback to release hw mutex lock if atomic support is not enabled */
-	ipclite->ipclite_hw_mutex->release();
+	ATOMIC_HW_MUTEX_RELEASE;
 }
 EXPORT_SYMBOL(ipclite_global_atomic_store_i32);
 
@@ -256,13 +248,9 @@ uint32_t ipclite_global_atomic_load_u32(ipclite_atomic_uint32_t *addr)
 {
 	uint32_t ret;
 
-	/* callback to acquire hw mutex lock if atomic support is not enabled */
-	ipclite->ipclite_hw_mutex->acquire();
-
+	ATOMIC_HW_MUTEX_ACQUIRE;
 	ret = atomic_read(addr);
-
-	/* callback to release hw mutex lock if atomic support is not enabled */
-	ipclite->ipclite_hw_mutex->release();
+	ATOMIC_HW_MUTEX_RELEASE;
 
 	return ret;
 }
@@ -272,13 +260,9 @@ int32_t ipclite_global_atomic_load_i32(ipclite_atomic_int32_t *addr)
 {
 	int32_t ret;
 
-	/* callback to acquire hw mutex lock if atomic support is not enabled */
-	ipclite->ipclite_hw_mutex->acquire();
-
+	ATOMIC_HW_MUTEX_ACQUIRE;
 	ret = atomic_read(addr);
-
-	/* callback to release hw mutex lock if atomic support is not enabled */
-	ipclite->ipclite_hw_mutex->release();
+	ATOMIC_HW_MUTEX_RELEASE;
 
 	return ret;
 }
@@ -289,13 +273,9 @@ uint32_t ipclite_global_test_and_set_bit(uint32_t nr, ipclite_atomic_uint32_t *a
 	uint32_t ret;
 	uint32_t mask = (1 << nr);
 
-	/* callback to acquire hw mutex lock if atomic support is not enabled */
-	ipclite->ipclite_hw_mutex->acquire();
-
+	ATOMIC_HW_MUTEX_ACQUIRE;
 	ret = atomic_fetch_or(mask, addr);
-
-	/* callback to release hw mutex lock if atomic support is not enabled */
-	ipclite->ipclite_hw_mutex->release();
+	ATOMIC_HW_MUTEX_RELEASE;
 
 	return ret;
 }
@@ -306,13 +286,9 @@ uint32_t ipclite_global_test_and_clear_bit(uint32_t nr, ipclite_atomic_uint32_t
 	uint32_t ret;
 	uint32_t mask = (1 << nr);
 
-	/* callback to acquire hw mutex lock if atomic support is not enabled */
-	ipclite->ipclite_hw_mutex->acquire();
-
+	ATOMIC_HW_MUTEX_ACQUIRE;
 	ret = atomic_fetch_and(~mask, addr);
-
-	/* callback to release hw mutex lock if atomic support is not enabled */
-	ipclite->ipclite_hw_mutex->release();
+	ATOMIC_HW_MUTEX_RELEASE;
 
 	return ret;
 }
@@ -322,13 +298,9 @@ int32_t ipclite_global_atomic_inc(ipclite_atomic_int32_t *addr)
 {
 	int32_t ret = 0;
 
-	/* callback to acquire hw mutex lock if atomic support is not enabled */
-	ipclite->ipclite_hw_mutex->acquire();
-
+	ATOMIC_HW_MUTEX_ACQUIRE;
 	ret = atomic_fetch_add(1, addr);
-
-	/* callback to release hw mutex lock if atomic support is not enabled */
-	ipclite->ipclite_hw_mutex->release();
+	ATOMIC_HW_MUTEX_RELEASE;
 
 	return ret;
 }
@@ -338,13 +310,9 @@ int32_t ipclite_global_atomic_dec(ipclite_atomic_int32_t *addr)
 {
 	int32_t ret = 0;
 
-	/* callback to acquire hw mutex lock if atomic support is not enabled */
-	ipclite->ipclite_hw_mutex->acquire();
-
+	ATOMIC_HW_MUTEX_ACQUIRE;
 	ret = atomic_fetch_sub(1, addr);
-
-	/* callback to release hw mutex lock if atomic support is not enabled */
-	ipclite->ipclite_hw_mutex->release();
+	ATOMIC_HW_MUTEX_RELEASE;
 
 	return ret;
 }
@@ -1158,7 +1126,6 @@ int32_t get_global_partition_info(struct global_region_info *global_ipcmem)
 {
 	struct ipcmem_global_partition *global_partition;
 
-	/* Check added to verify ipclite is initialized */
 	if (!ipclite) {
 		IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite not initialized\n");
 		return -ENOMEM;
@@ -1250,12 +1217,6 @@ static int ipclite_channel_init(struct device *parent,
 	}
 	IPCLITE_OS_LOG(IPCLITE_DBG, "remote_pid = %d, local_pid=%d\n", remote_pid, local_pid);
 
-	ipclite_hw_mutex = devm_kzalloc(dev, sizeof(*ipclite_hw_mutex), GFP_KERNEL);
-	if (!ipclite_hw_mutex) {
-		ret = -ENOMEM;
-		goto err_put_dev;
-	}
-
 	ret = of_property_read_u32(dev->of_node, "global_atomic", &global_atomic);
 	if (ret) {
 		dev_err(dev, "failed to parse global_atomic\n");
@@ -1627,13 +1588,6 @@ static int ipclite_probe(struct platform_device *pdev)
 	/* Should be called after all Global TOC related init is done */
 	insert_magic_number();
 
-	/* hw mutex callbacks */
-	ipclite_hw_mutex->acquire = ipclite_hw_mutex_acquire;
-	ipclite_hw_mutex->release = ipclite_hw_mutex_release;
-
-	/* store to ipclite structure */
-	ipclite->ipclite_hw_mutex = ipclite_hw_mutex;
-
 	/* Update the Global Debug variable for FW cores */
 	ipclite_dbg_info->debug_level = ipclite_debug_level;
 	ipclite_dbg_info->debug_control = ipclite_debug_control;

+ 7 - 12
msm/synx/ipclite.h

@@ -54,17 +54,18 @@
 #define IPCLITE_OS_LOG(__level, __fmt, arg...) \
 	do { \
 		if (ipclite_debug_level & __level) { \
-			if (ipclite_debug_control & IPCLITE_DMESG_LOG) { \
+			if (ipclite_debug_control & IPCLITE_DMESG_LOG) \
 				pr_info(IPCLITE_CORE_DBG_LABEL "%s:"__fmt, \
 							ipclite_dbg_label[__level], ## arg); \
-			} \
-			if (ipclite_debug_control & IPCLITE_INMEM_LOG) { \
-				IPCLITE_OS_INMEM_LOG(IPCLITE_CORE_DBG_LABEL "%s:"__fmt, \
+			if (ipclite_debug_control & IPCLITE_INMEM_LOG) \
+				ipclite_inmem_log(IPCLITE_CORE_DBG_LABEL "%s:"__fmt, \
 							ipclite_dbg_label[__level], ## arg); \
-			} \
 		} \
 	} while (0)
 
+#define ATOMIC_HW_MUTEX_ACQUIRE (global_atomic_support ?: ipclite_hw_mutex_acquire())
+#define ATOMIC_HW_MUTEX_RELEASE (global_atomic_support ?: ipclite_hw_mutex_release())
+
 /**
  * enum ipclite_channel_status - channel status
  *
@@ -302,12 +303,6 @@ struct ipclite_fifo {
 	void (*reset)(struct ipclite_fifo *fifo);
 };
 
-struct ipclite_hw_mutex_ops {
-	unsigned long flags;
-	void (*acquire)(void);
-	void (*release)(void);
-};
-
 struct ipclite_irq_info {
 	struct mbox_client mbox_client;
 	struct mbox_chan *mbox_chan;
@@ -346,7 +341,7 @@ struct ipclite_info {
 	struct ipclite_channel channel[IPCMEM_NUM_HOSTS];
 	struct ipclite_mem ipcmem;
 	struct hwspinlock *hwlock;
-	struct ipclite_hw_mutex_ops *ipclite_hw_mutex;
+	unsigned long hw_mutex_flags;
 };
 
 /*Default partition parameters*/

+ 20 - 6
msm/synx/ipclite_client.h

@@ -32,7 +32,7 @@ struct global_region_info {
 	uint32_t size;
 };
 
-typedef int32_t (*IPCLite_Client)(uint32_t proc_id,  int64_t data,  void *priv);
+typedef int (*IPCLite_Client)(uint32_t proc_id,  int64_t data,  void *priv);
 
 /**
  * ipclite_msg_send() - Sends message to remote client.
@@ -42,7 +42,7 @@ typedef int32_t (*IPCLite_Client)(uint32_t proc_id,  int64_t data,  void *priv);
  *
  * @return Zero on successful registration, negative on failure.
  */
-int32_t ipclite_msg_send(int32_t proc_id, uint64_t data);
+int ipclite_msg_send(int32_t proc_id, uint64_t data);
 
 /**
  * ipclite_register_client() - Registers client callback with framework.
@@ -52,7 +52,7 @@ int32_t ipclite_msg_send(int32_t proc_id, uint64_t data);
  *
  * @return Zero on successful registration, negative on failure.
  */
-int32_t ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv);
+int ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv);
 
 /**
  * ipclite_test_msg_send() - Sends message to remote client.
@@ -62,7 +62,7 @@ int32_t ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv);
  *
  * @return Zero on successful registration, negative on failure.
  */
-int32_t ipclite_test_msg_send(int32_t proc_id, uint64_t data);
+int ipclite_test_msg_send(int32_t proc_id, uint64_t data);
 
 /**
  * ipclite_register_test_client() - Registers client callback with framework.
@@ -72,7 +72,7 @@ int32_t ipclite_test_msg_send(int32_t proc_id, uint64_t data);
  *
  * @return Zero on successful registration, negative on failure.
  */
-int32_t ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv);
+int ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv);
 
 /**
  * get_global_partition_info() - Gets info about IPCMEM's global partitions.
@@ -81,7 +81,7 @@ int32_t ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv);
  *
  * @return Zero on successful registration, negative on failure.
  */
-int32_t get_global_partition_info(struct global_region_info *global_ipcmem);
+int get_global_partition_info(struct global_region_info *global_ipcmem);
 
 /**
  * ipclite_recover() - Recovers the ipclite if any core goes for SSR
@@ -92,6 +92,20 @@ int32_t get_global_partition_info(struct global_region_info *global_ipcmem);
  */
 void ipclite_recover(enum ipcmem_host_type core_id);
 
+/**
+ * ipclite_hw_mutex_acquire() - Locks the hw mutex reserved for ipclite.
+ *
+ * @return Zero on successful acquire, negative on failure.
+ */
+int ipclite_hw_mutex_acquire(void);
+
+/**
+ * ipclite_hw_mutex_release() - Unlocks the hw mutex reserved for ipclite.
+ *
+ * @return Zero on successful release, negative on failure.
+ */
+int ipclite_hw_mutex_release(void);
+
 /**
  * ipclite_atomic_init_u32() - Initializes the global memory with uint32_t value.
  *

+ 114 - 78
msm/synx/synx_api.h

@@ -20,17 +20,27 @@
  */
 #define SYNX_INVALID_HANDLE 0
 
+/* synx object states */
+#define SYNX_STATE_INVALID             0    // Invalid synx object
+#define SYNX_STATE_ACTIVE              1    // Synx object has not been signaled
+#define SYNX_STATE_SIGNALED_ERROR      3    // Synx object signaled with error
+#define SYNX_STATE_SIGNALED_EXTERNAL   5    // Synx object was signaled by external dma client.
+#define SYNX_STATE_SIGNALED_SSR        6    // Synx object signaled with SSR
+#define SYNX_STATE_TIMEOUT             7    // Callback status for synx object in case of timeout
+
 /**
- * enum synx_create_flags - Flags passed during synx_create call
+ * enum synx_create_flags - Flags passed during synx_create call.
  *
- * SYNX_CREATE_LOCAL_FENCE  : Instructs the framework to create local synx object
+ * SYNX_CREATE_LOCAL_FENCE  : Instructs the framework to create local synx object,
+ *                            for local synchronization i.e. within same core.
  * SYNX_CREATE_GLOBAL_FENCE : Instructs the framework to create global synx object
+ *                            for global synchronization i.e. across supported core.
  * SYNX_CREATE_DMA_FENCE    : Create a synx object by wrapping the provided dma fence.
  *                            Need to pass the dma_fence ptr through fence variable
- *                            if this flag is set.
+ *                            if this flag is set. (NOT SUPPORTED)
  * SYNX_CREATE_CSL_FENCE    : Create a synx object with provided csl fence.
  *                            Establishes interop with the csl fence through
- *                            bind operations.
+ *                            bind operations. (NOT SUPPORTED)
  */
 enum synx_create_flags {
 	SYNX_CREATE_LOCAL_FENCE  = 0x01,
@@ -42,24 +52,41 @@ enum synx_create_flags {
 
 /**
  * enum synx_init_flags - Session initialization flag
+ * SYNX_INIT_DEFAULT   : Initialization flag to be passed
+ *                       when initializing session
+ * SYNX_INIT_MAX       : Used for internal checks
  */
 enum synx_init_flags {
-	SYNX_INIT_MAX = 0x01,
+	SYNX_INIT_DEFAULT = 0x00,
+	SYNX_INIT_MAX     = 0x01,
 };
 
 /**
  * enum synx_import_flags - Import flags
  *
- * SYNX_IMPORT_LOCAL_FENCE  : Instructs the framework to create local synx object
- * SYNX_IMPORT_GLOBAL_FENCE : Instructs the framework to create global synx object
- * SYNX_IMPORT_SYNX_FENCE   : Import native Synx handle for synchronization
+ * SYNX_IMPORT_LOCAL_FENCE  : Instructs the framework to create local synx object,
+ *                            for local synchronization i.e. within same core.
+ * SYNX_IMPORT_GLOBAL_FENCE : Instructs the framework to create global synx object,
+ *                            for global synchronization i.e. across supported core.
+ * SYNX_IMPORT_SYNX_FENCE   : Import native Synx handle for synchronization.
  *                            Need to pass the Synx handle ptr through fence variable
- *                            if this flag is set.
- * SYNX_IMPORT_DMA_FENCE    : Import dma fence.and crate Synx handle for interop
+ *                            if this flag is set. Client must pass:
+ *                            a. SYNX_IMPORT_SYNX_FENCE|SYNX_IMPORT_LOCAL_FENCE
+ *                               to import a synx handle as local synx handle.
+ *                            b. SYNX_IMPORT_SYNX_FENCE|SYNX_IMPORT_GLOBAL_FENCE
+ *                               to import a synx handle as global synx handle.
+ * SYNX_IMPORT_DMA_FENCE    : Import dma fence and create Synx handle for interop.
  *                            Need to pass the dma_fence ptr through fence variable
- *                            if this flag is set.
+ *                            if this flag is set. Client must pass:
+ *                            a. SYNX_IMPORT_DMA_FENCE|SYNX_IMPORT_LOCAL_FENCE
+ *                               to import a dma fence and create local synx handle
+ *                               for interop.
+ *                            b. SYNX_IMPORT_DMA_FENCE|SYNX_IMPORT_GLOBAL_FENCE
+ *                               to import a dma fence and create global synx handle
+ *                               for interop.
  * SYNX_IMPORT_EX_RELEASE   : Flag to inform relaxed invocation where release call
  *                            need not be called by client on this handle after import.
+ *                            (NOT SUPPORTED)
  */
 enum synx_import_flags {
 	SYNX_IMPORT_LOCAL_FENCE  = 0x01,
@@ -95,7 +122,7 @@ typedef void (*synx_callback)(s32 sync_obj, int status, void *data);
  * synx_user_callback - Callback function registered by clients
  *
  * User callback registered for non-blocking wait. Dispatched when
- * synx object is signaled or timeout has expired.
+ * synx object is signaled or timed-out with status of synx object.
  */
 typedef void (*synx_user_callback_t)(u32 h_synx, int status, void *data);
 
@@ -119,9 +146,10 @@ struct bind_operations {
 };
 
 /**
- * synx_bind_client_type : External fence supported for bind
+ * synx_bind_client_type : External fence supported for bind (NOT SUPPORTED)
  *
  * SYNX_TYPE_CSL : Camera CSL fence
+ * SYNX_MAX_BIND_TYPES : Used for internal checks
  */
 enum synx_bind_client_type {
 	SYNX_TYPE_CSL = 0,
@@ -129,7 +157,7 @@ enum synx_bind_client_type {
 };
 
 /**
- * struct synx_register_params - External registration parameters
+ * struct synx_register_params - External registration parameters  (NOT SUPPORTED)
  *
  * @ops  : Bind operations struct
  * @name : External client name
@@ -144,8 +172,10 @@ struct synx_register_params {
 
 /**
  * struct synx_queue_desc - Memory descriptor of the queue allocated by
- *                           the fence driver for each client during
- *                           register.
+ *                          the fence driver for each client during
+ *                          register. (Clients need not pass any pointer
+ *                          in synx_initialize_params. It is for future
+ *                          use).
  *
  * @vaddr    : CPU virtual address of the queue.
  * @dev_addr : Physical address of the memory object.
@@ -196,8 +226,10 @@ enum synx_client_id {
 /**
  * struct synx_session - Client session identifier
  *
- * @type   : Session type
+ * @type   : Session type.
+ *           Internal Member. (Do not access/modify)
  * @client : Pointer to client session
+ *           Internal Member. (Do not access/modify)
  */
 struct synx_session {
 	u32 type;
@@ -209,7 +241,8 @@ struct synx_session {
  *
  * @name  : Client session name
  *          Only first 64 bytes are accepted, rest will be ignored
- * @ptr   : Pointer to queue descriptor (filled by function)
+ * @ptr   : Memory descriptor of queue allocated by fence during
+ *          device register. (filled by function)
  * @id    : Client identifier
  * @flags : Synx initialization flags
  */
@@ -228,16 +261,8 @@ struct synx_initialization_params {
  *             Only first 64 bytes are accepted,
  *             rest will be ignored
  * @h_synx   : Pointer to synx object handle (filled by function)
- * @fence    : Pointer to external fence
- * @flags    : Synx flags for customization (mentioned below)
- *
- * SYNX_CREATE_GLOBAL_FENCE - Hints the framework to create global synx object
- *     If flag not set, hints framework to create a local synx object.
- * SYNX_CREATE_DMA_FENCE - Wrap synx object with dma fence.
- *     Need to pass the dma_fence ptr through 'fence' variable if this flag is set.
- * SYNX_CREATE_BIND_FENCE - Create a synx object with provided external fence.
- *     Establishes interop with supported external fence through bind operations.
- *     Need to fill synx_external_desc structure if this flag is set.
+ * @fence    : Pointer to external dma fence or csl fence. (NOT SUPPORTED)
+ * @flags    : Synx flags for customization
  */
 
 struct synx_create_params {
@@ -250,10 +275,19 @@ struct synx_create_params {
 /**
  * enum synx_merge_flags - Handle merge flags
  *
- * SYNX_MERGE_LOCAL_FENCE   : Create local composite object.
- * SYNX_MERGE_GLOBAL_FENCE  : Create global composite object.
- * SYNX_MERGE_NOTIFY_ON_ALL : Notify on signaling of ALL objects
- * SYNX_MERGE_NOTIFY_ON_ANY : Notify on signaling of ANY object
+ * SYNX_MERGE_LOCAL_FENCE   : Create local composite synx object. To be passed along
+ *                            with SYNX_MERGE_NOTIFY_ON_ALL.
+ * SYNX_MERGE_GLOBAL_FENCE  : Create global composite synx object. To be passed along
+ *                            with SYNX_MERGE_NOTIFY_ON_ALL.
+ * SYNX_MERGE_NOTIFY_ON_ALL : Notify on signaling of ALL objects.
+ *                            Clients must pass:
+ *                            a. SYNX_MERGE_LOCAL_FENCE|SYNX_MERGE_NOTIFY_ON_ALL
+ *                               to create local composite synx object and notify
+ *                               it when all child synx objects are signaled.
+ *                            b. SYNX_MERGE_GLOBAL_FENCE|SYNX_MERGE_NOTIFY_ON_ALL
+ *                               to create global composite synx object and notify
+ *                               it when all child synx objects are signaled.
+ * SYNX_MERGE_NOTIFY_ON_ANY : Notify on signaling of ANY object. (NOT SUPPORTED)
  */
 enum synx_merge_flags {
 	SYNX_MERGE_LOCAL_FENCE   = 0x01,
@@ -267,8 +301,8 @@ enum synx_merge_flags {
  *
  * @h_synxs      : Pointer to a array of synx handles to be merged
  * @flags        : Merge flags
- * @num_objs     : Number of synx objs in the block
- * @h_merged_obj : Merged synx object handle (filled by function)
+ * @num_objs     : Number of synx handles to be merged (in array h_synxs).
+ * @h_merged_obj : Merged synx handle (filled by function)
  */
 struct synx_merge_params {
 	u32 *h_synxs;
@@ -296,8 +330,8 @@ enum synx_import_type {
  *                The new handle/s should be used by importing
  *                process for all synx api operations and
  *                for sharing with FW cores.
- * @flags       : Synx flags
- * @fence       : Pointer to external fence
+ * @flags       : Synx import flags
+ * @fence       : Pointer to DMA fence fd or synx handle.
  */
 struct synx_import_indv_params {
 	u32 *new_h_synx;
@@ -308,8 +342,8 @@ struct synx_import_indv_params {
 /**
  * struct synx_import_arr_params - Synx import arr parameters
  *
- * @list        : Array of synx_import_indv_params pointers
- * @num_fences  : No of fences passed to framework
+ * @list        : List of synx_import_indv_params
+ * @num_fences  : Number of fences or synx handles to be imported
  */
 struct synx_import_arr_params {
 	struct synx_import_indv_params *list;
@@ -320,8 +354,8 @@ struct synx_import_arr_params {
  * struct synx_import_params - Synx import parameters
  *
  * @type : Import params type filled by client
- * @indv : Params to import an individual handle/fence
- * @arr  : Params to import an array of handles/fences
+ * @indv : Params to import an individual handle or fence
+ * @arr  : Params to import an array of handles or fences
  */
 struct synx_import_params {
 	enum synx_import_type type;
@@ -335,9 +369,9 @@ struct synx_import_params {
  * struct synx_callback_params - Synx callback parameters
  *
  * @h_synx         : Synx object handle
- * @cb_func        : Pointer to callback func to be invoked
- * @userdata       : Opaque pointer passed back with callback
- * @cancel_cb_func : Pointer to callback to ack cancellation (optional)
+ * @cb_func        : Pointer to callback func to be invoked.
+ * @userdata       : Opaque pointer passed back with callback as data
+ * @cancel_cb_func : Pointer to callback to ack cancellation
  * @timeout_ms     : Timeout in ms. SYNX_NO_TIMEOUT if no timeout.
  */
 struct synx_callback_params {
@@ -350,7 +384,7 @@ struct synx_callback_params {
 
 /* Kernel APIs */
 
-/* synx_register_ops - Register operations for external synchronization
+/* synx_register_ops - Register operations for external synchronization  (NOT SUPPORTED)
  *
  * Register with synx for enabling external synchronization through bind
  *
@@ -365,7 +399,7 @@ struct synx_callback_params {
 int synx_register_ops(const struct synx_register_params *params);
 
 /**
- * synx_deregister_ops - De-register external synchronization operations
+ * synx_deregister_ops - De-register external synchronization operations  (NOT SUPPORTED)
  *
  * @param params : Pointer to register params
  *
@@ -388,47 +422,48 @@ struct synx_session *synx_initialize(struct synx_initialization_params *params);
  *
  * @param session : Session ptr (returned from synx_initialize)
  *
- * @return Status of operation. SYNX_SUCCESS in case of success.
+ * @return Status of operation. Negative in case of error, SYNX_SUCCESS otherwise.
  */
 int synx_uninitialize(struct synx_session *session);
 
 /**
  * synx_create - Creates a synx object
  *
- *  Creates a new synx obj and returns the handle to client.
+ * Creates a new synx obj and returns the handle to client. There can be
+ * maximum of 4095 global synx handles or local synx handles across
+ * sessions.
  *
  * @param session : Session ptr (returned from synx_initialize)
  * @param params  : Pointer to create params
  *
- * @return Status of operation. SYNX_SUCCESS in case of success.
- * -SYNX_INVALID will be returned if params were invalid.
- * -SYNX_NOMEM will be returned if the kernel can't allocate space for
- * synx object.
+ * @return Status of operation. Negative in case of error, SYNX_SUCCESS otherwise.
  */
 int synx_create(struct synx_session *session, struct synx_create_params *params);
 
 /**
  * synx_async_wait - Registers a callback with a synx object
  *
+ * Clients can register maximum of 64 callbacks functions per
+ * synx session. Clients should register callback functions with minimal computation.
+ *
  * @param session : Session ptr (returned from synx_initialize)
- * @param params  : Callback params
+ * @param params  : Callback params.
+ *                  cancel_cb_func in callback params is optional with this API.
  *
- * @return Status of operation. SYNX_SUCCESS in case of success.
- * -SYNX_INVALID will be returned if userdata is invalid.
- * -SYNX_NOMEM will be returned if cb_func is invalid.
+ * @return Status of operation. Negative in case of error, SYNX_SUCCESS otherwise.
  */
 int synx_async_wait(struct synx_session *session, struct synx_callback_params *params);
 
 /**
  * synx_cancel_async_wait - De-registers a callback with a synx object
  *
+ * This API will cancel one instance of callback function (mapped
+ * with userdata and h_synx) provided in cb_func of callback params.
+ *
  * @param session : Session ptr (returned from synx_initialize)
  * @param params  : Callback params
  *
- * @return Status of operation. SYNX_SUCCESS in case of success.
- * -SYNX_ALREADY if object has already been signaled, and cannot be cancelled.
- * -SYNX_INVALID will be returned if userdata is invalid.
- * -SYNX_NOMEM will be returned if cb_func is invalid.
+ * @return Status of operation.Negative in case of error, SYNX_SUCCESS otherwise.
  */
 int synx_cancel_async_wait(struct synx_session *session,
 	struct synx_callback_params *params);
@@ -470,59 +505,57 @@ int synx_merge(struct synx_session *session, struct synx_merge_params *params);
  * Does a wait on the synx object identified by h_synx for a maximum
  * of timeout_ms milliseconds. Must not be called from interrupt context as
  * this API can sleep.
- * Will return status if handle was signaled. Status can be from pre-defined
- * states (enum synx_signal_status) or custom status sent by producer.
  *
  * @param session    : Session ptr (returned from synx_initialize)
  * @param h_synx     : Synx object handle to be waited upon
  * @param timeout_ms : Timeout in ms
  *
- * @return Signal status. -SYNX_INVAL if synx object is in bad state or arguments
- * are invalid, -SYNX_TIMEOUT if wait times out.
+ * @return Status of synx object if handle is signaled. -SYNX_INVAL if synx object
+ * is in bad state or arguments are invalid, -SYNX_TIMEOUT if wait times out.
  */
 int synx_wait(struct synx_session *session, u32 h_synx, u64 timeout_ms);
 
 /**
- * synx_get_status - Returns the status of the synx object
+ * synx_get_status - Returns the status of the synx object.
+ *
+ * This API should not be used in polling mode to know if the handle
+ * is signaled or not.
+ * Clients need to explicitly wait using synx_wait() or synx_async_wait()
  *
  * @param session : Session ptr (returned from synx_initialize)
  * @param h_synx  : Synx object handle
  *
- * @return Status of the synx object.
+ * @return Status of the synx object
  */
 int synx_get_status(struct synx_session *session, u32 h_synx);
 
 /**
- * synx_import - Imports (looks up) synx object from given handle/fence
- *
- * Import subscribes the client session for notification on signal
- * of handles/fences.
- *
+ * synx_import - Imports (looks up) synx object from given handle or fence
+ * *
  * @param session : Session ptr (returned from synx_initialize)
  * @param params  : Pointer to import params
  *
- * @return SYNX_SUCCESS upon success, -SYNX_INVAL if synx object is bad state
+ * @return Status of operation. Negative in case of failure, SYNX_SUCCESS otherwise.
  */
 int synx_import(struct synx_session *session, struct synx_import_params *params);
 
 /**
  * synx_get_fence - Get the native fence backing the synx object
  *
- * Function returns the native fence. Clients need to
- * acquire & release additional reference explicitly.
+ * Synx framework will take additional reference on dma fence and returns the native
+ * fence. Clients need to release additional reference explicitly by calling kref_put.
  *
  * @param session : Session ptr (returned from synx_initialize)
  * @param h_synx  : Synx object handle
  *
- * @return Fence pointer upon success, NULL or error in case of failure.
+ * @return Fence pointer in case of success and NULL in case of failure.
  */
 void *synx_get_fence(struct synx_session *session, u32 h_synx);
 
 /**
- * synx_release - Release the synx object
+ * synx_release - Release the synx object.
  *
- * Decrements refcount of a synx object by 1, and destroys it
- * if becomes 0.
+ * Every created, imported or merged synx object should be released.
  *
  * @param session : Session ptr (returned from synx_initialize)
  * @param h_synx  : Synx object handle to be destroyed
@@ -536,7 +569,7 @@ int synx_release(struct synx_session *session, u32 h_synx);
  *
  * Function should be called on HW hang/reset to
  * recover the Synx handles shared. This cleans up
- * Synx handles held by the rest HW, and avoids
+ * synx handles owned by subsystem under hang/reset, and avoids
  * potential resource leaks.
  *
  * Function does not destroy the session, but only
@@ -545,6 +578,9 @@ int synx_release(struct synx_session *session, u32 h_synx);
  * need to destroy the session explicitly through
  * synx_uninitialize API.
  *
+ * All the unsignaled handles owned/imported by the core at the time of reset
+ * will be signaled by synx framework on behalf of hung core with SYNX_STATE_SIGNALED_SSR.
+ *
  * @param id : Client ID of core to recover
  *
  * @return Status of operation. Negative in case of error. SYNX_SUCCESS otherwise.

+ 0 - 8
msm/synx/synx_global.h

@@ -47,14 +47,6 @@ enum synx_core_id {
 #define SYNX_HWSPIN_TIMEOUT            500
 #define SYNX_HWSPIN_ID                 10
 
-/* internal signal states */
-#define SYNX_STATE_INVALID             0
-#define SYNX_STATE_ACTIVE              1
-#define SYNX_STATE_SIGNALED_ERROR      3
-#define SYNX_STATE_SIGNALED_EXTERNAL   5
-#define SYNX_STATE_SIGNALED_SSR        6
-#define SYNX_STATE_TIMEOUT             7
-
 /* dma fence states */
 #define SYNX_DMA_FENCE_STATE_MAX             4096