Procházet zdrojové kódy

qcacmn: Optimize the spin lock usage in mgmt rx reo release function

Optimize the usage of spin lock in the API to release
frames to the upper layer. The spin lock to protect the
reorder list is released as soon as the frame from the
head of the list is removed. This would reduce the
contention duration for other users of this spin lock.
A new spin lock is added to ensure that frame delivery
happens sequentially.

CRs-Fixed: 3062471
Change-Id: Ibabe6bc965febe28a2182c7235ab2d67232e24e6
Edayilliam Jayadev před 3 roky
rodič
revize
2a905d2ecb

+ 61 - 37
umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_rx_reo.c

@@ -771,16 +771,15 @@ mgmt_rx_reo_list_entry_get_release_reason(
  * for entries which can be released to upper layer. It is the caller's
  * responsibility to ensure that entry can be released (by using API
  * mgmt_rx_reo_list_is_ready_to_send_up_entry). This API is called after
- * acquiring the lock which protects the reorder list.
+ * acquiring the lock which serializes the frame delivery to the upper layers.
  *
  * Return: QDF_STATUS
  */
 static QDF_STATUS
-mgmt_rx_reo_list_entry_send_up(struct mgmt_rx_reo_list *reo_list,
+mgmt_rx_reo_list_entry_send_up(const struct mgmt_rx_reo_list *reo_list,
 			       struct mgmt_rx_reo_list_entry *entry)
 {
 	uint8_t release_reason;
-	QDF_STATUS status;
 	uint8_t link_id;
 	struct wlan_objmgr_pdev *pdev;
 	uint32_t entry_global_ts;
@@ -789,6 +788,7 @@ mgmt_rx_reo_list_entry_send_up(struct mgmt_rx_reo_list *reo_list,
 	qdf_assert_always(reo_list);
 	qdf_assert_always(entry);
 
+	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
 	entry_global_ts = mgmt_rx_reo_get_global_ts(entry->rx_params);
 	ts_last_delivered_frame = &reo_list->ts_last_delivered_frame;
 
@@ -797,14 +797,6 @@ mgmt_rx_reo_list_entry_send_up(struct mgmt_rx_reo_list *reo_list,
 
 	qdf_assert_always(release_reason != 0);
 
-	status = qdf_list_remove_node(&reo_list->list, &entry->node);
-	if (QDF_IS_STATUS_ERROR(status)) {
-		mgmt_rx_reo_err("Failed to remove entry %pK from list", entry);
-		qdf_assert_always(0);
-	}
-
-	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
-
 	/**
 	 * Last delivered frame global time stamp is invalid means that
 	 * current frame is the first frame to be delivered to the upper layer
@@ -874,7 +866,7 @@ mgmt_rx_reo_list_is_ready_to_send_up_entry(struct mgmt_rx_reo_list *reo_list,
 
 /**
  * mgmt_rx_reo_list_release_entries() - Release entries from the reorder list
- * @reo_list: Pointer to reorder list
+ * @reo_context: Pointer to management Rx reorder context
  *
  * This API releases the entries from the reorder list based on the following
  * conditions.
@@ -888,41 +880,70 @@ mgmt_rx_reo_list_is_ready_to_send_up_entry(struct mgmt_rx_reo_list *reo_list,
  * Return: QDF_STATUS
  */
 static QDF_STATUS
-mgmt_rx_reo_list_release_entries(struct mgmt_rx_reo_list *reo_list)
+mgmt_rx_reo_list_release_entries(struct mgmt_rx_reo_context *reo_context)
 {
-	struct mgmt_rx_reo_list_entry *cur_entry;
-	struct mgmt_rx_reo_list_entry *temp;
-	/* TODO yield if release_count > THRESHOLD */
-	uint16_t release_count = 0;
+	struct mgmt_rx_reo_list *reo_list;
 	QDF_STATUS status;
 
-	if (!reo_list) {
-		mgmt_rx_reo_err("reo list is null");
+	if (!reo_context) {
+		mgmt_rx_reo_err("reo context is null");
 		return QDF_STATUS_E_NULL_VALUE;
 	}
 
-	qdf_spin_lock_bh(&reo_list->list_lock);
+	reo_list = &reo_context->reo_list;
 
-	qdf_list_for_each_del(&reo_list->list, cur_entry, temp, node) {
-		if (mgmt_rx_reo_list_is_ready_to_send_up_entry(reo_list,
-							       cur_entry)) {
-			mgmt_rx_reo_debug("Freeing up entry %pK", cur_entry);
-			status = mgmt_rx_reo_list_entry_send_up(reo_list,
-								cur_entry);
-			if (QDF_IS_STATUS_ERROR(status))
-				goto error;
+	qdf_spin_lock(&reo_context->frame_release_lock);
 
-			release_count++;
-		} else {
-			break;
+	while (1) {
+		struct mgmt_rx_reo_list_entry *first_entry;
+		/* TODO yield if release_count > THRESHOLD */
+		uint16_t release_count = 0;
+
+		qdf_spin_lock_bh(&reo_list->list_lock);
+
+		first_entry = qdf_list_first_entry_or_null(
+			&reo_list->list, struct mgmt_rx_reo_list_entry, node);
+
+		if (!first_entry) {
+			status = QDF_STATUS_SUCCESS;
+			goto exit_unlock_list_lock;
 		}
 
-		qdf_mem_free(cur_entry);
+		if (!mgmt_rx_reo_list_is_ready_to_send_up_entry(reo_list,
+								first_entry)) {
+			status = QDF_STATUS_SUCCESS;
+			goto exit_unlock_list_lock;
+		}
+
+		status = qdf_list_remove_node(&reo_list->list,
+					      &first_entry->node);
+		if (QDF_IS_STATUS_ERROR(status)) {
+			status = QDF_STATUS_E_FAILURE;
+			goto exit_unlock_list_lock;
+		}
+
+		qdf_spin_unlock_bh(&reo_list->list_lock);
+
+		status = mgmt_rx_reo_list_entry_send_up(reo_list,
+							first_entry);
+		if (QDF_IS_STATUS_ERROR(status)) {
+			status = QDF_STATUS_E_FAILURE;
+			qdf_mem_free(first_entry);
+			goto exit_unlock_frame_release_lock;
+		}
+
+		qdf_mem_free(first_entry);
+		release_count++;
 	}
 
 	status = QDF_STATUS_SUCCESS;
-error:
+	goto exit_unlock_frame_release_lock;
+
+exit_unlock_list_lock:
 	qdf_spin_unlock_bh(&reo_list->list_lock);
+exit_unlock_frame_release_lock:
+	qdf_spin_unlock(&reo_context->frame_release_lock);
+
 	return status;
 }
 
@@ -942,9 +963,12 @@ mgmt_rx_reo_list_ageout_timer_handler(void *arg)
 	struct mgmt_rx_reo_list_entry *cur_entry;
 	uint64_t cur_ts;
 	QDF_STATUS status;
+	struct mgmt_rx_reo_context *reo_context;
 
-	if (!reo_list)
-		return;
+	qdf_assert_always(reo_list);
+
+	reo_context = mgmt_rx_reo_get_context_from_reo_list(reo_list);
+	qdf_assert_always(reo_context);
 
 	qdf_spin_lock_bh(&reo_list->list_lock);
 
@@ -973,7 +997,7 @@ mgmt_rx_reo_list_ageout_timer_handler(void *arg)
 
 	qdf_spin_unlock_bh(&reo_list->list_lock);
 
-	status = mgmt_rx_reo_list_release_entries(reo_list);
+	status = mgmt_rx_reo_list_release_entries(reo_context);
 	if (QDF_IS_STATUS_ERROR(status)) {
 		mgmt_rx_reo_err("Failed to release list entries, status = %d",
 				status);
@@ -1563,7 +1587,7 @@ wlan_mgmt_rx_reo_algo_entry(struct wlan_objmgr_pdev *pdev,
 	qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
 
 	/* Finally, release the entries for which pending frame is received */
-	return mgmt_rx_reo_list_release_entries(&reo_ctx->reo_list);
+	return mgmt_rx_reo_list_release_entries(reo_ctx);
 }
 
 #ifndef WLAN_MGMT_RX_REO_SIM_SUPPORT

+ 28 - 0
umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_rx_reo_i.h

@@ -422,6 +422,18 @@ struct mgmt_rx_reo_egress_frame_debug_info {
  * @reo_list: Linked list used for reordering
  * @reo_algo_entry_lock: Spin lock to protect reo algorithm entry critical
  * section execution
+ * @frame_release_lock: Spin lock to serialize the frame delivery to the
+ * upper layers. This could prevent race conditions like the one given in
+ * the following example.
+ * Lets take an example of 2 links (Link A & B) and each has received
+ * a management frame A1(deauth) and B1(auth) such that MLO global time
+ * stamp of A1 < MLO global time stamp of B1. Host is concurrently
+ * executing "mgmt_rx_reo_list_release_entries" for A1 and B1 in
+ * 2 different CPUs. It is possible that frame B1 gets processed by
+ * upper layers before frame A1 and this could result in unwanted
+ * disconnection. Hence it is required to serialize the delivery
+ * of management frames to upper layers in the strict order of MLO
+ * global time stamp.
  * @num_mlo_links: Number of MLO links on the system
  * @sim_context: Management rx-reorder simulation context
  * @ingress_frame_debug_info: Debug object to log incoming frames
@@ -430,6 +442,7 @@ struct mgmt_rx_reo_egress_frame_debug_info {
 struct mgmt_rx_reo_context {
 	struct mgmt_rx_reo_list reo_list;
 	qdf_spinlock_t reo_algo_entry_lock;
+	qdf_spinlock_t frame_release_lock;
 #ifndef WLAN_MGMT_RX_REO_SIM_SUPPORT
 	uint8_t num_mlo_links;
 #else
@@ -456,6 +469,21 @@ struct mgmt_rx_reo_frame_descriptor {
 	struct mgmt_rx_reo_wait_count wait_count;
 };
 
+/**
+ * mgmt_rx_reo_get_context_from_reo_list() - Helper API to get pointer to
+ * management rx reorder context from pointer to management reorder list
+ * @reo_list: Pointer to management rx reorder list
+ *
+ * Return: Pointer to management rx reorder context
+ */
+static inline struct mgmt_rx_reo_context *
+mgmt_rx_reo_get_context_from_reo_list(struct mgmt_rx_reo_list *reo_list) {
+	qdf_assert_always(reo_list);
+
+	return qdf_container_of(reo_list, struct mgmt_rx_reo_context,
+				reo_list);
+}
+
 /**
  * mgmt_rx_reo_get_global_ts() - Helper API to get global time stamp
  * corresponding to the mgmt rx event