Browse Source

synx: Propagating changes from msm-5.10

Includes param change for async_wait(timeout) and other fixes

Change-Id: If8ff795538bbfaf53ee1758561fbd2841e5a71c7
Signed-off-by: Pravin Kumar Ravi <[email protected]>
Pravin Kumar Ravi 2 years ago
parent
commit
33788f7297
5 changed files with 122 additions and 8 deletions
  1. 22 2
      msm/synx/synx.c
  2. 5 1
      msm/synx/synx_api.h
  3. 53 2
      msm/synx/synx_global.c
  4. 10 0
      msm/synx/synx_global.h
  5. 32 3
      msm/synx/synx_util.c

+ 22 - 2
msm/synx/synx.c

@@ -519,6 +519,13 @@ void synx_signal_handler(struct work_struct *cb_dispatch)
 			dprintk(SYNX_ERR,
 			dprintk(SYNX_ERR,
 				"global status update of %u failed=%d\n",
 				"global status update of %u failed=%d\n",
 				h_synx, rc);
 				h_synx, rc);
+		/*
+		 * We are decrementing the reference here assuming this code will be
+		 * executed after handle is released. But in case if clients signal
+		 * dma fence in middle of execution sequence, then we will put
+		 * one reference thus deleting the global idx. As of now clients cannot
+		 * signal dma fence.
+		 */
 		synx_global_put_ref(idx);
 		synx_global_put_ref(idx);
 	}
 	}
 
 
@@ -573,6 +580,7 @@ fail:
 void synx_fence_callback(struct dma_fence *fence,
 void synx_fence_callback(struct dma_fence *fence,
 	struct dma_fence_cb *cb)
 	struct dma_fence_cb *cb)
 {
 {
+	s32 status;
 	struct synx_signal_cb *signal_cb =
 	struct synx_signal_cb *signal_cb =
 		container_of(cb, struct synx_signal_cb, fence_cb);
 		container_of(cb, struct synx_signal_cb, fence_cb);
 
 
@@ -581,7 +589,19 @@ void synx_fence_callback(struct dma_fence *fence,
 		fence, signal_cb->handle);
 		fence, signal_cb->handle);
 
 
 	/* other signal_cb members would be set during cb registration */
 	/* other signal_cb members would be set during cb registration */
-	signal_cb->status = dma_fence_get_status_locked(fence);
+	status = dma_fence_get_status_locked(fence);
+
+	/*
+	 * dma_fence_get_status_locked API returns 1 if signaled,
+	 * 0 if ACTIVE,
+	 * and negative error code in case of any failure
+	 */
+	if (status == 1)
+		status = SYNX_STATE_SIGNALED_SUCCESS;
+	else if (status < 0)
+		status = SYNX_STATE_SIGNALED_EXTERNAL;
+
+	signal_cb->status = status;
 
 
 	INIT_WORK(&signal_cb->cb_dispatch, synx_signal_handler);
 	INIT_WORK(&signal_cb->cb_dispatch, synx_signal_handler);
 	queue_work(synx_dev->wq_cb, &signal_cb->cb_dispatch);
 	queue_work(synx_dev->wq_cb, &signal_cb->cb_dispatch);
@@ -2427,7 +2447,7 @@ int synx_ipc_callback(u32 client_id,
 	if (IS_ERR_OR_NULL(signal_cb))
 	if (IS_ERR_OR_NULL(signal_cb))
 		return -SYNX_NOMEM;
 		return -SYNX_NOMEM;
 
 
-	dprintk(SYNX_INFO,
+	dprintk(SYNX_DBG,
 		"signal notification for %u received with status %u\n",
 		"signal notification for %u received with status %u\n",
 		handle, status);
 		handle, status);
 
 

+ 5 - 1
msm/synx/synx_api.h

@@ -12,6 +12,8 @@
 
 
 #include "synx_err.h"
 #include "synx_err.h"
 
 
+#define SYNX_NO_TIMEOUT        ((u64)-1)
+
 /**
 /**
  * enum synx_create_flags - Flags passed during synx_create call
  * enum synx_create_flags - Flags passed during synx_create call
  *
  *
@@ -87,7 +89,7 @@ typedef void (*synx_callback)(s32 sync_obj, int status, void *data);
  * synx_user_callback - Callback function registered by clients
  * synx_user_callback - Callback function registered by clients
  *
  *
  * User callback registered for non-blocking wait. Dispatched when
  * User callback registered for non-blocking wait. Dispatched when
- * synx object is signaled.
+ * synx object is signaled or timeout has expired.
  */
  */
 typedef void (*synx_user_callback_t)(u32 h_synx, int status, void *data);
 typedef void (*synx_user_callback_t)(u32 h_synx, int status, void *data);
 
 
@@ -330,12 +332,14 @@ struct synx_import_params {
  * @cb_func        : Pointer to callback func to be invoked
  * @cb_func        : Pointer to callback func to be invoked
  * @userdata       : Opaque pointer passed back with callback
  * @userdata       : Opaque pointer passed back with callback
  * @cancel_cb_func : Pointer to callback to ack cancellation (optional)
  * @cancel_cb_func : Pointer to callback to ack cancellation (optional)
+ * @timeout_ms     : Timeout in ms. SYNX_NO_TIMEOUT if no timeout.
  */
  */
 struct synx_callback_params {
 struct synx_callback_params {
 	u32 h_synx;
 	u32 h_synx;
 	synx_user_callback_t cb_func;
 	synx_user_callback_t cb_func;
 	void *userdata;
 	void *userdata;
 	synx_user_callback_t cancel_cb_func;
 	synx_user_callback_t cancel_cb_func;
+	u64 timeout_ms;
 };
 };
 
 
 /* Kernel APIs */
 /* Kernel APIs */

+ 53 - 2
msm/synx/synx_global.c

@@ -208,6 +208,24 @@ int synx_global_init_coredata(u32 h_synx)
 	if (rc)
 	if (rc)
 		return rc;
 		return rc;
 	synx_g_obj = &synx_gmem.table[idx];
 	synx_g_obj = &synx_gmem.table[idx];
+	if (synx_g_obj->status != 0 || synx_g_obj->refcount != 0 ||
+		synx_g_obj->subscribers != 0 || synx_g_obj->handle != 0 ||
+		synx_g_obj->parents[0] != 0) {
+		dprintk(SYNX_ERR,
+				"entry not cleared for idx %u,\n"
+				"synx_g_obj->status %d,\n"
+				"synx_g_obj->refcount %d,\n"
+				"synx_g_obj->subscribers %d,\n"
+				"synx_g_obj->handle %u,\n"
+				"synx_g_obj->parents[0] %d\n",
+				idx, synx_g_obj->status,
+				synx_g_obj->refcount,
+				synx_g_obj->subscribers,
+				synx_g_obj->handle,
+				synx_g_obj->parents[0]);
+		synx_gmem_unlock(idx, &flags);
+		return -SYNX_INVALID;
+	}
 	memset(synx_g_obj, 0, sizeof(*synx_g_obj));
 	memset(synx_g_obj, 0, sizeof(*synx_g_obj));
 	/* set status to active */
 	/* set status to active */
 	synx_g_obj->status = SYNX_STATE_ACTIVE;
 	synx_g_obj->status = SYNX_STATE_ACTIVE;
@@ -332,6 +350,28 @@ int synx_global_set_subscribed_core(u32 idx, enum synx_core_id id)
 	return SYNX_SUCCESS;
 	return SYNX_SUCCESS;
 }
 }
 
 
+int synx_global_clear_subscribed_core(u32 idx, enum synx_core_id id)
+{
+	int rc;
+	unsigned long flags;
+	struct synx_global_coredata *synx_g_obj;
+
+	if (!synx_gmem.table)
+		return -SYNX_NOMEM;
+
+	if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
+		return -SYNX_INVALID;
+
+	rc = synx_gmem_lock(idx, &flags);
+	if (rc)
+		return rc;
+	synx_g_obj = &synx_gmem.table[idx];
+	synx_g_obj->subscribers &= ~(1UL << id);
+	synx_gmem_unlock(idx, &flags);
+
+	return SYNX_SUCCESS;
+}
+
 u32 synx_global_get_parents_num(u32 idx)
 u32 synx_global_get_parents_num(u32 idx)
 {
 {
 	int rc;
 	int rc;
@@ -528,7 +568,18 @@ static int synx_global_update_status_core(u32 idx,
 	/* notify waiting clients on signal */
 	/* notify waiting clients on signal */
 	if (data) {
 	if (data) {
 		/* notify wait client */
 		/* notify wait client */
-		for (i = 1; i < SYNX_CORE_MAX; i++) {
+
+	/* In case of SSR, someone might be waiting on same core
+	 * However, in other cases, synx_signal API will take care
+	 * of signaling handles on same core and thus we don't need
+	 * to send interrupt
+	 */
+		if (status == SYNX_STATE_SIGNALED_SSR)
+			i = 0;
+		else
+			i = 1;
+
+		for (; i < SYNX_CORE_MAX ; i++) {
 			if (!wait_cores[i])
 			if (!wait_cores[i])
 				continue;
 				continue;
 			dprintk(SYNX_DBG,
 			dprintk(SYNX_DBG,
@@ -735,7 +786,7 @@ int synx_global_recover(enum synx_core_id core_id)
 	const u32 size = SYNX_GLOBAL_MAX_OBJS;
 	const u32 size = SYNX_GLOBAL_MAX_OBJS;
 	unsigned long flags;
 	unsigned long flags;
 	struct synx_global_coredata *synx_g_obj;
 	struct synx_global_coredata *synx_g_obj;
-	
+
 	bool update;
 	bool update;
 	int *clear_idx = NULL;
 	int *clear_idx = NULL;
 	if (!synx_gmem.table)
 	if (!synx_gmem.table)

+ 10 - 0
msm/synx/synx_global.h

@@ -179,6 +179,16 @@ int synx_global_get_subscribed_cores(u32 idx, bool *cores);
  */
  */
 int synx_global_set_subscribed_core(u32 idx, enum synx_core_id id);
 int synx_global_set_subscribed_core(u32 idx, enum synx_core_id id);
 
 
+/**
+ * synx_global_clear_subscribed_core - Clear core as a subscriber core on global entry
+ *
+ * @param idx : Global entry index
+ * @param id  : Core to be added as subscriber
+ *
+ * @return SYNX_SUCCESS on success. Negative error on failure.
+ */
+int synx_global_clear_subscribed_core(u32 idx, enum synx_core_id id);
+
 /**
 /**
  * synx_global_get_status - Get status of the global entry
  * synx_global_get_status - Get status of the global entry
  *
  *

+ 32 - 3
msm/synx/synx_util.c

@@ -223,11 +223,17 @@ int synx_util_init_group_coredata(struct synx_coredata *synx_obj,
 
 
 static void synx_util_destroy_coredata(struct kref *kref)
 static void synx_util_destroy_coredata(struct kref *kref)
 {
 {
+	int rc;
 	struct synx_coredata *synx_obj =
 	struct synx_coredata *synx_obj =
 		container_of(kref, struct synx_coredata, refcount);
 		container_of(kref, struct synx_coredata, refcount);
 
 
-	if (synx_util_is_global_object(synx_obj))
+	if (synx_util_is_global_object(synx_obj)) {
+		rc = synx_global_clear_subscribed_core(synx_obj->global_idx, SYNX_CORE_APSS);
+		if (rc)
+			dprintk(SYNX_ERR, "Failed to clear subscribers");
+
 		synx_global_put_ref(synx_obj->global_idx);
 		synx_global_put_ref(synx_obj->global_idx);
+	}
 	synx_util_object_destroy(synx_obj);
 	synx_util_object_destroy(synx_obj);
 }
 }
 
 
@@ -247,6 +253,7 @@ void synx_util_object_destroy(struct synx_coredata *synx_obj)
 	u32 i;
 	u32 i;
 	s32 sync_id;
 	s32 sync_id;
 	u32 type;
 	u32 type;
+	unsigned long flags;
 	struct synx_cb_data *synx_cb, *synx_cb_temp;
 	struct synx_cb_data *synx_cb, *synx_cb_temp;
 	struct synx_bind_desc *bind_desc;
 	struct synx_bind_desc *bind_desc;
 	struct bind_operations *bind_ops;
 	struct bind_operations *bind_ops;
@@ -297,6 +304,29 @@ void synx_util_object_destroy(struct synx_coredata *synx_obj)
 
 
 	mutex_destroy(&synx_obj->obj_lock);
 	mutex_destroy(&synx_obj->obj_lock);
 	synx_util_release_fence_entry((u64)synx_obj->fence);
 	synx_util_release_fence_entry((u64)synx_obj->fence);
+
+	/* dma fence framework expects handles are signaled before release,
+	 * so signal if active handle and has last refcount. Synx handles
+	 * on other cores are still active to carry out usual callflow.
+	 */
+	if (!IS_ERR_OR_NULL(synx_obj->fence)) {
+		spin_lock_irqsave(synx_obj->fence->lock, flags);
+		if (kref_read(&synx_obj->fence->refcount) == 1 &&
+				(synx_util_get_object_status_locked(synx_obj) ==
+				SYNX_STATE_ACTIVE)) {
+			// set fence error to cancel
+			dma_fence_set_error(synx_obj->fence,
+				-SYNX_STATE_SIGNALED_CANCEL);
+
+			rc = dma_fence_signal_locked(synx_obj->fence);
+			if (rc)
+				dprintk(SYNX_ERR,
+					"signaling fence %pK failed=%d\n",
+					synx_obj->fence, rc);
+		}
+		spin_unlock_irqrestore(synx_obj->fence->lock, flags);
+	}
+
 	dma_fence_put(synx_obj->fence);
 	dma_fence_put(synx_obj->fence);
 	kfree(synx_obj);
 	kfree(synx_obj);
 	dprintk(SYNX_MEM, "released synx object %pK\n", synx_obj);
 	dprintk(SYNX_MEM, "released synx object %pK\n", synx_obj);
@@ -873,7 +903,6 @@ static void synx_util_cleanup_fence(
 			if (synx_util_get_object_status_locked(synx_obj) ==
 			if (synx_util_get_object_status_locked(synx_obj) ==
 				SYNX_STATE_ACTIVE) {
 				SYNX_STATE_ACTIVE) {
 				signal_cb->synx_obj = NULL;
 				signal_cb->synx_obj = NULL;
-				signal_cb->handle = synx_obj->global_idx;
 				synx_obj->signal_cb =  NULL;
 				synx_obj->signal_cb =  NULL;
 				/*
 				/*
 				 * release reference held by signal cb and
 				 * release reference held by signal cb and
@@ -1162,7 +1191,7 @@ void synx_util_cb_dispatch(struct work_struct *cb_dispatch)
 				client->id);
 				client->id);
 	}
 	}
 
 
-	dprintk(SYNX_INFO,
+	dprintk(SYNX_DBG,
 		"callback dispatched for handle %u, status %u, data %pK\n",
 		"callback dispatched for handle %u, status %u, data %pK\n",
 		payload.h_synx, payload.status, payload.data);
 		payload.h_synx, payload.status, payload.data);