Selaa lähdekoodia

qcacmn: Enable management rx reo scheduler

Enable management Rx reorder scheduler.

CRs-Fixed: 3410648
Change-Id: I90fad2bb3c118f3d9021dc94ecdd1a300c9ab00c
Edayilliam Jayadev 2 vuotta sitten
vanhempi
sitoutus
4855c6ef1a

+ 11 - 1
target_if/mgmt_txrx/inc/target_if_mgmt_txrx_rx_reo.h

@@ -129,7 +129,6 @@ target_if_mgmt_rx_reo_extract_reo_params(wmi_unified_t wmi_handle,
 QDF_STATUS
 target_if_mgmt_rx_reo_host_drop_handler(struct wlan_objmgr_pdev *pdev,
 					struct mgmt_rx_event_params *params);
-
 /**
  * target_if_mgmt_rx_reo_release_frames() - API to release the management
  * frames of a given psoc
@@ -212,5 +211,16 @@ target_if_mgmt_rx_reo_host_drop_handler(struct wlan_objmgr_pdev *pdev,
 	/* Nothing to do when REO is compiled off */
 	return QDF_STATUS_SUCCESS;
 }
+
+/**
+ * target_if_mgmt_rx_reo_release_frames() - API to release the management
+ * frames of a given psoc
+ * @arg: Pointer to void * argument
+ *
+ * Return: void
+ */
+static inline void target_if_mgmt_rx_reo_release_frames(void *arg)
+{
+}
 #endif /* WLAN_MGMT_RX_REO_SUPPORT */
 #endif /* _TARGET_IF_MGMT_TXRX_RX_REO_H_ */

+ 60 - 40
target_if/mgmt_txrx/src/target_if_mgmt_txrx_rx_reo.c

@@ -99,6 +99,32 @@ target_if_mgmt_rx_reo_fw_consumed_event_handler(
 	return 0;
 }
 
+void target_if_mgmt_rx_reo_release_frames(void *arg)
+{
+	ol_scn_t scn = arg;
+	struct wlan_objmgr_psoc *psoc;
+	struct wlan_lmac_if_mgmt_rx_reo_rx_ops *mgmt_rx_reo_rx_ops;
+	QDF_STATUS status;
+
+	psoc = target_if_get_psoc_from_scn_hdl(scn);
+	if (!psoc) {
+		mgmt_rx_reo_err("null psoc");
+		return;
+	}
+
+	mgmt_rx_reo_rx_ops = target_if_mgmt_rx_reo_get_rx_ops(psoc);
+	if (!mgmt_rx_reo_rx_ops) {
+		mgmt_rx_reo_err("rx_ops of MGMT Rx REO module is NULL");
+		return;
+	}
+
+	status = mgmt_rx_reo_rx_ops->release_frames(psoc);
+	if (QDF_IS_STATUS_ERROR(status)) {
+		mgmt_rx_reo_err("Failed to release entries, ret = %d", status);
+		return;
+	}
+}
+
 QDF_STATUS
 target_if_mgmt_rx_reo_register_event_handlers(struct wlan_objmgr_psoc *psoc)
 {
@@ -619,14 +645,10 @@ target_if_mgmt_rx_reo_extract_reo_params(
 static QDF_STATUS
 target_if_mgmt_rx_reo_schedule_delivery(struct wlan_objmgr_psoc *psoc)
 {
-	struct hif_opaque_softc *hif_handle;
 	struct wmi_unified *wmi_handle;
-
-	hif_handle = lmac_get_hif_hdl(psoc);
-	if (!hif_handle) {
-		mgmt_rx_reo_err("HIF handle is null");
-		return QDF_STATUS_E_NULL_VALUE;
-	}
+	QDF_STATUS status;
+	HTC_ENDPOINT_ID wmi_endpoint_id;
+	HTC_HANDLE htc_handle;
 
 	wmi_handle = get_wmi_unified_hdl_from_psoc(psoc);
 	if (!wmi_handle) {
@@ -634,6 +656,20 @@ target_if_mgmt_rx_reo_schedule_delivery(struct wlan_objmgr_psoc *psoc)
 		return QDF_STATUS_E_NULL_VALUE;
 	}
 
+	htc_handle = lmac_get_htc_hdl(psoc);
+	if (!htc_handle) {
+		mgmt_rx_reo_err("HTC_handle is NULL");
+		return QDF_STATUS_E_NULL_VALUE;
+	}
+
+	wmi_endpoint_id = wmi_get_endpoint(wmi_handle);
+
+	status = htc_enable_custom_cb(htc_handle, wmi_endpoint_id);
+	if (QDF_IS_STATUS_ERROR(status)) {
+		mgmt_rx_reo_err("Failed to schedule delivery");
+		return status;
+	}
+
 	return QDF_STATUS_SUCCESS;
 }
 
@@ -647,14 +683,10 @@ target_if_mgmt_rx_reo_schedule_delivery(struct wlan_objmgr_psoc *psoc)
 static QDF_STATUS
 target_if_mgmt_rx_reo_cancel_scheduled_delivery(struct wlan_objmgr_psoc *psoc)
 {
-	struct hif_opaque_softc *hif_handle;
 	struct wmi_unified *wmi_handle;
-
-	hif_handle = lmac_get_hif_hdl(psoc);
-	if (!hif_handle) {
-		mgmt_rx_reo_err("HIF handle is null");
-		return QDF_STATUS_E_NULL_VALUE;
-	}
+	QDF_STATUS status;
+	HTC_ENDPOINT_ID wmi_endpoint_id;
+	HTC_HANDLE htc_handle;
 
 	wmi_handle = get_wmi_unified_hdl_from_psoc(psoc);
 	if (!wmi_handle) {
@@ -662,6 +694,20 @@ target_if_mgmt_rx_reo_cancel_scheduled_delivery(struct wlan_objmgr_psoc *psoc)
 		return QDF_STATUS_E_NULL_VALUE;
 	}
 
+	htc_handle = lmac_get_htc_hdl(psoc);
+	if (!htc_handle) {
+		mgmt_rx_reo_err("HTC_handle is NULL");
+		return QDF_STATUS_E_NULL_VALUE;
+	}
+
+	wmi_endpoint_id = wmi_get_endpoint(wmi_handle);
+
+	status = htc_disable_custom_cb(htc_handle, wmi_endpoint_id);
+	if (QDF_IS_STATUS_ERROR(status)) {
+		mgmt_rx_reo_err("Failed to cancel scheduled delivery");
+		return status;
+	}
+
 	return QDF_STATUS_SUCCESS;
 }
 
@@ -719,29 +765,3 @@ target_if_mgmt_rx_reo_host_drop_handler(struct wlan_objmgr_pdev *pdev,
 
 	return mgmt_rx_reo_rx_ops->host_drop_handler(pdev, params->reo_params);
 }
-
-void target_if_mgmt_rx_reo_release_frames(void *arg)
-{
-	ol_scn_t scn = arg;
-	struct wlan_objmgr_psoc *psoc;
-	struct wlan_lmac_if_mgmt_rx_reo_rx_ops *mgmt_rx_reo_rx_ops;
-	QDF_STATUS status;
-
-	psoc = target_if_get_psoc_from_scn_hdl(scn);
-	if (!psoc) {
-		mgmt_rx_reo_err("null psoc");
-		return;
-	}
-
-	mgmt_rx_reo_rx_ops = target_if_mgmt_rx_reo_get_rx_ops(psoc);
-	if (!mgmt_rx_reo_rx_ops) {
-		mgmt_rx_reo_err("rx_ops of MGMT Rx REO module is NULL");
-		return;
-	}
-
-	status = mgmt_rx_reo_rx_ops->release_frames(psoc);
-	if (QDF_IS_STATUS_ERROR(status)) {
-		mgmt_rx_reo_err("Failed to release entries, ret = %d", status);
-		return;
-	}
-}

+ 432 - 80
umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_rx_reo.c

@@ -170,6 +170,9 @@ mgmt_rx_reo_is_stale_frame(
 	frame_desc->is_parallel_rx = false;
 	frame_desc->last_delivered_frame = *last_delivered_frame;
 
+	if (!frame_desc->reo_required)
+		return QDF_STATUS_SUCCESS;
+
 	if (!last_delivered_frame->valid)
 		return QDF_STATUS_SUCCESS;
 
@@ -1541,6 +1544,120 @@ mgmt_rx_reo_egress_frame_debug_info_enabled
 	return egress_frame_debug_info->frame_list_size;
 }
 
+/**
+ * mgmt_rx_reo_debug_print_scheduler_stats() - API to print the stats
+ * related to frames getting scheduled by mgmt rx reo scheduler
+ * @reo_ctx: Pointer to reorder context
+ *
+ * API to print the stats related to frames getting scheduled by management
+ * Rx reorder scheduler.
+ *
+ * Return: QDF_STATUS
+ */
+static QDF_STATUS
+mgmt_rx_reo_debug_print_scheduler_stats(struct mgmt_rx_reo_context *reo_ctx)
+{
+	struct reo_scheduler_stats *stats;
+	uint64_t scheduled_count_per_link[MAX_MLO_LINKS] = {0};
+	uint64_t scheduled_count_per_context[MGMT_RX_REO_CONTEXT_MAX] = {0};
+	uint64_t total_scheduled_count = 0;
+	uint64_t rescheduled_count_per_link[MAX_MLO_LINKS] = {0};
+	uint64_t rescheduled_count_per_context[MGMT_RX_REO_CONTEXT_MAX] = {0};
+	uint64_t total_rescheduled_count = 0;
+	uint64_t total_scheduler_cb_count = 0;
+	uint8_t link_id;
+	uint8_t ctx;
+
+	if (!reo_ctx)
+		return QDF_STATUS_E_NULL_VALUE;
+
+	stats = &reo_ctx->scheduler_debug_info.stats;
+
+	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
+		for (ctx = 0; ctx < MGMT_RX_REO_CONTEXT_MAX; ctx++) {
+			scheduled_count_per_link[link_id] +=
+				stats->scheduled_count[link_id][ctx];
+			rescheduled_count_per_link[link_id] +=
+				stats->rescheduled_count[link_id][ctx];
+		}
+
+		total_scheduled_count += scheduled_count_per_link[link_id];
+		total_rescheduled_count += rescheduled_count_per_link[link_id];
+		total_scheduler_cb_count += stats->scheduler_cb_count[link_id];
+	}
+
+	for (ctx = 0; ctx < MGMT_RX_REO_CONTEXT_MAX; ctx++) {
+		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
+			scheduled_count_per_context[ctx] +=
+				stats->scheduled_count[link_id][ctx];
+			rescheduled_count_per_context[ctx] +=
+				stats->rescheduled_count[link_id][ctx];
+		}
+	}
+
+	mgmt_rx_reo_alert("Scheduler stats:");
+	mgmt_rx_reo_alert("\t1) Scheduled count");
+	mgmt_rx_reo_alert("\t\t0 - MGMT_RX_REO_CONTEXT_MGMT_RX");
+	mgmt_rx_reo_alert("\t\t1 - MGMT_RX_REO_CONTEXT_INGRESS_LIST_TIMEOUT");
+	mgmt_rx_reo_alert("\t\t2 - MGMT_RX_REO_CONTEXT_SCHEDULER_CB");
+	mgmt_rx_reo_alert("\t------------------------------------");
+	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
+	mgmt_rx_reo_alert("\t|context   |      0|      1|      2|");
+	mgmt_rx_reo_alert("\t-------------------------------------------");
+
+	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
+		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
+				  stats->scheduled_count[link_id][0],
+				  stats->scheduled_count[link_id][1],
+				  stats->scheduled_count[link_id][2],
+				  scheduled_count_per_link[link_id]);
+		mgmt_rx_reo_alert("\t-------------------------------------------");
+	}
+	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
+			  scheduled_count_per_context[0],
+			  scheduled_count_per_context[1],
+			  scheduled_count_per_context[2],
+			  total_scheduled_count);
+
+	mgmt_rx_reo_alert("\t2) Rescheduled count");
+	mgmt_rx_reo_alert("\t\t0 - MGMT_RX_REO_CONTEXT_MGMT_RX");
+	mgmt_rx_reo_alert("\t\t1 - MGMT_RX_REO_CONTEXT_INGRESS_LIST_TIMEOUT");
+	mgmt_rx_reo_alert("\t\t2 - MGMT_RX_REO_CONTEXT_SCHEDULER_CB");
+	mgmt_rx_reo_alert("\t------------------------------------");
+	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
+	mgmt_rx_reo_alert("\t|context   |      0|      1|      2|");
+	mgmt_rx_reo_alert("\t-------------------------------------------");
+
+	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
+		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
+				  stats->rescheduled_count[link_id][0],
+				  stats->rescheduled_count[link_id][1],
+				  stats->rescheduled_count[link_id][2],
+				  rescheduled_count_per_link[link_id]);
+		mgmt_rx_reo_alert("\t-------------------------------------------");
+	}
+	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
+			  rescheduled_count_per_context[0],
+			  rescheduled_count_per_context[1],
+			  rescheduled_count_per_context[2],
+			  total_rescheduled_count);
+
+	mgmt_rx_reo_alert("\t3) Per link stats:");
+	mgmt_rx_reo_alert("\t----------------------");
+	mgmt_rx_reo_alert("\t|link id|Scheduler CB|");
+	mgmt_rx_reo_alert("\t|       |    Count   |");
+	mgmt_rx_reo_alert("\t----------------------");
+
+	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
+		mgmt_rx_reo_alert("\t|%7u|%12llu|", link_id,
+				  stats->scheduler_cb_count[link_id]);
+		mgmt_rx_reo_alert("\t----------------------");
+	}
+	mgmt_rx_reo_alert("\t%8s|%12llu|\n\n", "", total_scheduler_cb_count);
+
+	return QDF_STATUS_SUCCESS;
+}
+
 /**
  * mgmt_rx_reo_debug_print_egress_frame_stats() - API to print the stats
  * related to frames going out of the reorder module
@@ -1555,68 +1672,71 @@ static QDF_STATUS
 mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
 {
 	struct reo_egress_frame_stats *stats;
-	struct reo_scheduler_stats *scheduler_stats;
 	uint8_t link_id;
 	uint8_t reason;
+	uint8_t ctx;
 	uint64_t total_delivery_attempts_count = 0;
 	uint64_t total_delivery_success_count = 0;
+	uint64_t total_drop_count = 0;
 	uint64_t total_premature_delivery_count = 0;
-	uint64_t total_scheduled_count = 0;
-	uint64_t total_rescheduled_count = 0;
 	uint64_t delivery_count_per_link[MAX_MLO_LINKS] = {0};
 	uint64_t delivery_count_per_reason[RELEASE_REASON_MAX] = {0};
+	uint64_t delivery_count_per_context[MGMT_RX_REO_CONTEXT_MAX] = {0};
 	uint64_t total_delivery_count = 0;
 	char delivery_reason_stats_boarder_a[MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_A_MAX_SIZE + 1] = {0};
 	char delivery_reason_stats_boarder_b[MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_B_MAX_SIZE + 1] = {0};
+	QDF_STATUS status;
 
 	if (!reo_ctx)
 		return QDF_STATUS_E_NULL_VALUE;
 
 	stats = &reo_ctx->egress_frame_debug_info.stats;
-	scheduler_stats = &reo_ctx->scheduler_debug_info.stats;
 
 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
 		total_delivery_attempts_count +=
 				stats->delivery_attempts_count[link_id];
 		total_delivery_success_count +=
 				stats->delivery_success_count[link_id];
+		total_drop_count += stats->drop_count[link_id];
 		total_premature_delivery_count +=
 				stats->premature_delivery_count[link_id];
-		total_scheduled_count +=
-				scheduler_stats->scheduled_count[link_id];
-		total_rescheduled_count +=
-				scheduler_stats->rescheduled_count[link_id];
 	}
 
 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
 		for (reason = 0; reason < RELEASE_REASON_MAX;
 		     reason++)
 			delivery_count_per_link[link_id] +=
-				stats->delivery_count[link_id][reason];
+				stats->delivery_reason_count[link_id][reason];
 		total_delivery_count += delivery_count_per_link[link_id];
 	}
 	for (reason = 0; reason < RELEASE_REASON_MAX; reason++)
 		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
 			delivery_count_per_reason[reason] +=
-				stats->delivery_count[link_id][reason];
+				stats->delivery_reason_count[link_id][reason];
+	for (ctx = 0; ctx < MGMT_RX_REO_CONTEXT_MAX; ctx++)
+		for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++)
+			delivery_count_per_context[ctx] +=
+				stats->delivery_context_count[link_id][ctx];
 
 	mgmt_rx_reo_alert("Egress frame stats:");
 	mgmt_rx_reo_alert("\t1) Delivery related stats:");
-	mgmt_rx_reo_alert("\t------------------------------------------");
-	mgmt_rx_reo_alert("\t|link id   |Attempts |Success |Premature |");
-	mgmt_rx_reo_alert("\t|          | count   | count  | count    |");
-	mgmt_rx_reo_alert("\t------------------------------------------");
+	mgmt_rx_reo_alert("\t------------------------------------------------");
+	mgmt_rx_reo_alert("\t|link id  |Attempts|Success |Premature|Drop    |");
+	mgmt_rx_reo_alert("\t|         | count  | count  | count   |count   |");
+	mgmt_rx_reo_alert("\t------------------------------------------------");
 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
-		mgmt_rx_reo_alert("\t|%10u|%9llu|%8llu|%10llu|", link_id,
+		mgmt_rx_reo_alert("\t|%9u|%8llu|%8llu|%9llu|%8llu|", link_id,
 				  stats->delivery_attempts_count[link_id],
 				  stats->delivery_success_count[link_id],
-				  stats->premature_delivery_count[link_id]);
-	mgmt_rx_reo_alert("\t------------------------------------------");
+				  stats->premature_delivery_count[link_id],
+				  stats->drop_count[link_id]);
+		mgmt_rx_reo_alert("\t------------------------------------------------");
 	}
-	mgmt_rx_reo_alert("\t%11s|%9llu|%8llu|%10llu|\n\n", "",
+	mgmt_rx_reo_alert("\t%10s|%8llu|%8llu|%9llu|%8llu|\n\n", "",
 			  total_delivery_attempts_count,
 			  total_delivery_success_count,
-			  total_premature_delivery_count);
+			  total_premature_delivery_count,
+			  total_drop_count);
 
 	mgmt_rx_reo_alert("\t2) Delivery reason related stats");
 	mgmt_rx_reo_alert("\tRelease Reason Values:-");
@@ -1630,6 +1750,8 @@ mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
 			  RELEASE_REASON_INGRESS_LIST_OVERFLOW);
 	mgmt_rx_reo_alert("\tREASON_OLDER_THAN_READY_TO_DELIVER_FRAMES - 0x%lx",
 			  RELEASE_REASON_OLDER_THAN_READY_TO_DELIVER_FRAMES);
+	mgmt_rx_reo_alert("\tREASON_EGRESS_LIST_OVERFLOW - 0x%lx",
+			  RELEASE_REASON_EGRESS_LIST_OVERFLOW);
 
 	qdf_mem_set(delivery_reason_stats_boarder_a,
 		    MGMT_RX_REO_EGRESS_FRAME_DELIVERY_REASON_STATS_BOARDER_A_MAX_SIZE, '-');
@@ -1645,12 +1767,13 @@ mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
 
 	for (reason = 0; reason < RELEASE_REASON_MAX; reason++) {
 		mgmt_rx_reo_alert("\t|%16x|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu|%7llu",
-				  reason, stats->delivery_count[0][reason],
-				  stats->delivery_count[1][reason],
-				  stats->delivery_count[2][reason],
-				  stats->delivery_count[3][reason],
-				  stats->delivery_count[4][reason],
-				  stats->delivery_count[5][reason],
+				  reason,
+				  stats->delivery_reason_count[0][reason],
+				  stats->delivery_reason_count[1][reason],
+				  stats->delivery_reason_count[2][reason],
+				  stats->delivery_reason_count[3][reason],
+				  stats->delivery_reason_count[4][reason],
+				  stats->delivery_reason_count[5][reason],
 				  delivery_count_per_reason[reason]);
 		mgmt_rx_reo_alert("\t%s", delivery_reason_stats_boarder_b);
 	}
@@ -1663,20 +1786,38 @@ mgmt_rx_reo_debug_print_egress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
 			  delivery_count_per_link[5],
 			  total_delivery_count);
 
-	mgmt_rx_reo_alert("\t3) Scheduler stats:");
-	mgmt_rx_reo_alert("\t------------------------------------");
-	mgmt_rx_reo_alert("\t|link id   |Scheduled |Rescheduled |");
-	mgmt_rx_reo_alert("\t|          | count    | count      |");
+	mgmt_rx_reo_alert("\t3) Delivery context related stats");
+	mgmt_rx_reo_alert("\t\t0 - MGMT_RX_REO_CONTEXT_MGMT_RX");
+	mgmt_rx_reo_alert("\t\t1 - MGMT_RX_REO_CONTEXT_INGRESS_LIST_TIMEOUT");
+	mgmt_rx_reo_alert("\t\t2 - MGMT_RX_REO_CONTEXT_SCHEDULER_CB");
 	mgmt_rx_reo_alert("\t------------------------------------");
+	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
+	mgmt_rx_reo_alert("\t|context   |      0|      1|      2|");
+	mgmt_rx_reo_alert("\t-------------------------------------------");
+
 	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
-		mgmt_rx_reo_alert("\t|%10u|%10llu|%12llu|", link_id,
-				  scheduler_stats->scheduled_count[link_id],
-				  scheduler_stats->rescheduled_count[link_id]);
-	mgmt_rx_reo_alert("\t------------------------------------");
+		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
+				  stats->delivery_context_count[link_id][0],
+				  stats->delivery_context_count[link_id][1],
+				  stats->delivery_context_count[link_id][2],
+				  delivery_count_per_link[link_id]);
+		mgmt_rx_reo_alert("\t-------------------------------------------");
+	}
+	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
+			  delivery_count_per_context[0],
+			  delivery_count_per_context[1],
+			  delivery_count_per_context[2],
+			  total_delivery_count);
+
+	mgmt_rx_reo_alert("\t4) Misc stats:");
+	mgmt_rx_reo_alert("\t\tEgress list overflow count = %llu\n\n",
+			  reo_ctx->egress_list.reo_list.overflow_count);
+
+	status = mgmt_rx_reo_debug_print_scheduler_stats(reo_ctx);
+	if (QDF_IS_STATUS_ERROR(status)) {
+		mgmt_rx_reo_err("Failed to print scheduler stats");
+		return status;
 	}
-	mgmt_rx_reo_alert("\t%11s|%10llu|%12llu|\n\n", "",
-			  total_scheduled_count,
-			  total_rescheduled_count);
 
 	return QDF_STATUS_SUCCESS;
 }
@@ -1742,6 +1883,12 @@ mgmt_rx_reo_log_egress_frame_before_delivery(
 	cur_frame_debug_info->egress_list_removal_ts =
 					entry->egress_list_removal_ts;
 	cur_frame_debug_info->egress_timestamp = qdf_get_log_timestamp();
+	cur_frame_debug_info->egress_list_size = entry->egress_list_size;
+	cur_frame_debug_info->first_scheduled_ts = entry->first_scheduled_ts;
+	cur_frame_debug_info->last_scheduled_ts = entry->last_scheduled_ts;
+	cur_frame_debug_info->scheduled_count =
+				qdf_atomic_read(&entry->scheduled_count);
+	cur_frame_debug_info->ctx_info = entry->ctx_info;
 	cur_frame_debug_info->release_reason = entry->release_reason;
 	cur_frame_debug_info->is_premature_delivery =
 						entry->is_premature_delivery;
@@ -1769,20 +1916,28 @@ mgmt_rx_reo_log_egress_frame_after_delivery(
 	struct reo_egress_debug_info *egress_frame_debug_info;
 	struct reo_egress_debug_frame_info *cur_frame_debug_info;
 	struct reo_egress_frame_stats *stats;
+	uint8_t context;
 
 	if (!reo_ctx || !entry)
 		return QDF_STATUS_E_NULL_VALUE;
 
 	egress_frame_debug_info = &reo_ctx->egress_frame_debug_info;
+	context = entry->ctx_info.context;
+	if (context >= MGMT_RX_REO_CONTEXT_MAX)
+		return QDF_STATUS_E_INVAL;
 
 	stats = &egress_frame_debug_info->stats;
 	if (entry->is_delivered) {
 		uint8_t release_reason = entry->release_reason;
 
-		stats->delivery_count[link_id][release_reason]++;
+		stats->delivery_reason_count[link_id][release_reason]++;
+		stats->delivery_context_count[link_id][context]++;
 		stats->delivery_success_count[link_id]++;
 	}
 
+	if (entry->is_dropped)
+		stats->drop_count[link_id]++;
+
 	if (!mgmt_rx_reo_egress_frame_debug_info_enabled
 						(egress_frame_debug_info))
 		return QDF_STATUS_SUCCESS;
@@ -1791,6 +1946,7 @@ mgmt_rx_reo_log_egress_frame_after_delivery(
 			[egress_frame_debug_info->next_index];
 
 	cur_frame_debug_info->is_delivered = entry->is_delivered;
+	cur_frame_debug_info->is_dropped = entry->is_dropped;
 	cur_frame_debug_info->egress_duration = qdf_get_log_timestamp() -
 					cur_frame_debug_info->egress_timestamp;
 
@@ -2088,6 +2244,9 @@ mgmt_rx_reo_list_entry_get_release_reason(struct mgmt_rx_reo_list_entry *entry)
 	if (LIST_ENTRY_IS_OLDER_THAN_READY_TO_DELIVER_FRAMES(entry))
 		reason |= RELEASE_REASON_OLDER_THAN_READY_TO_DELIVER_FRAMES;
 
+	if (LIST_ENTRY_IS_REMOVED_DUE_TO_EGRESS_LIST_OVERFLOW(entry))
+		reason |= RELEASE_REASON_EGRESS_LIST_OVERFLOW;
+
 	return reason;
 }
 
@@ -2095,6 +2254,8 @@ mgmt_rx_reo_list_entry_get_release_reason(struct mgmt_rx_reo_list_entry *entry)
  * mgmt_rx_reo_list_entry_send_up() - API to send the frame to the upper layer.
  * @reo_context: Pointer to reorder context
  * @entry: List entry
+ * @deliver: Indicates whether this entry has to be delivered to upper layers
+ * or dropped in the reo layer itself.
  *
  * API to send the frame to the upper layer. This API has to be called only
  * for entries which can be released to upper layer. It is the caller's
@@ -2106,7 +2267,8 @@ mgmt_rx_reo_list_entry_get_release_reason(struct mgmt_rx_reo_list_entry *entry)
  */
 static QDF_STATUS
 mgmt_rx_reo_list_entry_send_up(struct mgmt_rx_reo_context *reo_context,
-			       struct mgmt_rx_reo_list_entry *entry)
+			       struct mgmt_rx_reo_list_entry *entry,
+			       bool deliver)
 {
 	uint8_t release_reason;
 	uint8_t link_id;
@@ -2125,6 +2287,7 @@ mgmt_rx_reo_list_entry_send_up(struct mgmt_rx_reo_context *reo_context,
 	qdf_assert_always(release_reason != 0);
 
 	entry->is_delivered = false;
+	entry->is_dropped = false;
 	entry->is_premature_delivery = false;
 	entry->release_reason = release_reason;
 
@@ -2141,16 +2304,23 @@ mgmt_rx_reo_list_entry_send_up(struct mgmt_rx_reo_context *reo_context,
 	if (QDF_IS_STATUS_ERROR(status))
 		goto exit;
 
-	status = wlan_mgmt_txrx_process_rx_frame(entry->pdev, entry->nbuf,
-						 entry->rx_params);
-	/* Above call frees nbuf and rx_params, make it null explicitly */
-	entry->nbuf = NULL;
-	entry->rx_params = NULL;
+	if (deliver) {
+		status = wlan_mgmt_txrx_process_rx_frame(entry->pdev,
+							 entry->nbuf,
+							 entry->rx_params);
+		/* Above call frees nbuf and rx_params, make them null */
+		entry->nbuf = NULL;
+		entry->rx_params = NULL;
 
-	if (QDF_IS_STATUS_ERROR(status))
-		goto exit_log;
+		if (QDF_IS_STATUS_ERROR(status))
+			goto exit_log;
 
-	entry->is_delivered = true;
+		entry->is_delivered = true;
+	} else {
+		free_mgmt_rx_event_params(entry->rx_params);
+		qdf_nbuf_free(entry->nbuf);
+		entry->is_dropped = true;
+	}
 
 	status = QDF_STATUS_SUCCESS;
 
@@ -2182,6 +2352,7 @@ mgmt_rx_reo_is_entry_ready_to_send_up(struct mgmt_rx_reo_list_entry *entry)
 	qdf_assert_always(entry);
 
 	return LIST_ENTRY_IS_REMOVED_DUE_TO_INGRESS_LIST_OVERFLOW(entry) ||
+	       LIST_ENTRY_IS_REMOVED_DUE_TO_EGRESS_LIST_OVERFLOW(entry) ||
 	       !LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(entry) ||
 	       LIST_ENTRY_IS_AGED_OUT(entry) ||
 	       LIST_ENTRY_IS_OLDER_THAN_LATEST_AGED_OUT_FRAME(entry) ||
@@ -2208,7 +2379,6 @@ mgmt_rx_reo_scheduler_debug_info_enabled
  * frame getting scheduled by mgmt rx reo scheduler
  * @reo_ctx: management rx reorder context
  * @entry: Pointer to reorder list entry
- * @context: Current execution context
  * @reschedule: Indicates rescheduling
  *
  * Return: QDF_STATUS of operation
@@ -2216,7 +2386,6 @@ mgmt_rx_reo_scheduler_debug_info_enabled
 static QDF_STATUS
 mgmt_rx_reo_log_scheduler_debug_info(struct mgmt_rx_reo_context *reo_ctx,
 				     struct mgmt_rx_reo_list_entry *entry,
-				     enum mgmt_rx_reo_execution_context context,
 				     bool reschedule)
 {
 	struct reo_scheduler_debug_info *scheduler_debug_info;
@@ -2231,9 +2400,9 @@ mgmt_rx_reo_log_scheduler_debug_info(struct mgmt_rx_reo_context *reo_ctx,
 
 	stats = &scheduler_debug_info->stats;
 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
-	stats->scheduled_count[link_id]++;
+	stats->scheduled_count[link_id][entry->ctx_info.context]++;
 	if (reschedule)
-		stats->rescheduled_count[link_id]++;
+		stats->rescheduled_count[link_id][entry->ctx_info.context]++;
 
 	if (!mgmt_rx_reo_scheduler_debug_info_enabled(scheduler_debug_info))
 		return QDF_STATUS_SUCCESS;
@@ -2263,11 +2432,12 @@ mgmt_rx_reo_log_scheduler_debug_info(struct mgmt_rx_reo_context *reo_ctx,
 	cur_frame_debug_info->egress_list_insertion_ts =
 					entry->egress_list_insertion_ts;
 	cur_frame_debug_info->scheduled_ts = qdf_get_log_timestamp();
-	cur_frame_debug_info->release_reason = entry->release_reason;
-	cur_frame_debug_info->is_premature_delivery =
-						entry->is_premature_delivery;
+	cur_frame_debug_info->first_scheduled_ts = entry->first_scheduled_ts;
+	cur_frame_debug_info->last_scheduled_ts = entry->last_scheduled_ts;
+	cur_frame_debug_info->scheduled_count =
+				qdf_atomic_read(&entry->scheduled_count);
 	cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id();
-	cur_frame_debug_info->context = context;
+	cur_frame_debug_info->ctx_info = entry->ctx_info;
 
 	scheduler_debug_info->next_index++;
 	scheduler_debug_info->next_index %=
@@ -2313,10 +2483,28 @@ mgmt_rx_reo_defer_delivery(struct mgmt_rx_reo_list_entry *entry,
 			   uint32_t link_bitmap)
 {
 	uint8_t link_id;
+	uint8_t mlo_grp_id;
+	struct wlan_objmgr_pdev *pdev;
 
 	qdf_assert_always(entry);
 
 	link_id = mgmt_rx_reo_get_link_id(entry->rx_params);
+	mlo_grp_id = entry->rx_params->reo_params->mlo_grp_id;
+
+	pdev = wlan_get_pdev_from_mlo_link_id(link_id, mlo_grp_id,
+					      WLAN_MGMT_RX_REO_ID);
+	if (!pdev) {
+		mgmt_rx_reo_err("pdev for link %u, group %u is null",
+				link_id, mlo_grp_id);
+		return false;
+	}
+
+	if (!wlan_mgmt_rx_reo_is_scheduler_enabled_at_pdev(pdev)) {
+		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
+		return false;
+	}
+
+	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
 
 	return !(link_bitmap & (1 << link_id));
 }
@@ -2327,14 +2515,12 @@ mgmt_rx_reo_defer_delivery(struct mgmt_rx_reo_list_entry *entry,
  * @reo_context: Pointer to reorder context
  * @entry: List entry corresponding to the frame which has to be scheduled
  * for delivery
- * @context: Current execution context
  *
  * Return: QDF_STATUS
  */
 QDF_STATUS
 mgmt_rx_reo_schedule_delivery(struct mgmt_rx_reo_context *reo_context,
-			      struct mgmt_rx_reo_list_entry *entry,
-			      enum mgmt_rx_reo_execution_context context)
+			      struct mgmt_rx_reo_list_entry *entry)
 {
 	int scheduled_count;
 	int8_t link_id;
@@ -2353,17 +2539,12 @@ mgmt_rx_reo_schedule_delivery(struct mgmt_rx_reo_context *reo_context,
 		return QDF_STATUS_E_NULL_VALUE;
 	}
 
-	if (context >= MGMT_RX_REO_CONTEXT_MAX) {
-		mgmt_rx_reo_err("Invalid execution context %d", context);
-		return QDF_STATUS_E_NULL_VALUE;
-	}
-
 	scheduled_count = qdf_atomic_inc_return(&entry->scheduled_count);
 	qdf_assert_always(scheduled_count > 0);
 
 	reschedule = (scheduled_count > 1);
 	status = mgmt_rx_reo_log_scheduler_debug_info(reo_context, entry,
-						      context, reschedule);
+						      reschedule);
 	if (QDF_IS_STATUS_ERROR(status)) {
 		mgmt_rx_reo_err("Failed to log scheduler debug info");
 		return status;
@@ -2390,8 +2571,10 @@ mgmt_rx_reo_schedule_delivery(struct mgmt_rx_reo_context *reo_context,
 	if (QDF_IS_STATUS_ERROR(status)) {
 		mgmt_rx_reo_err("Failed to schedule for link %u, group %u",
 				link_id, mlo_grp_id);
+		wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
 		return status;
 	}
+	wlan_objmgr_pdev_release_ref(pdev, WLAN_MGMT_RX_REO_ID);
 
 	return QDF_STATUS_SUCCESS;
 }
@@ -2400,6 +2583,9 @@ mgmt_rx_reo_schedule_delivery(struct mgmt_rx_reo_context *reo_context,
  * mgmt_rx_reo_release_egress_list_entries() - Release entries from the
  * egress list
  * @reo_context: Pointer to management Rx reorder context
+ * @link_bitmap: Bitmap of links for which frames can be released in the current
+ * context
+ * @ctx: Current execution context info
  *
  * This API releases the entries from the egress list based on the following
  * conditions.
@@ -2413,7 +2599,9 @@ mgmt_rx_reo_schedule_delivery(struct mgmt_rx_reo_context *reo_context,
  * Return: QDF_STATUS
  */
 static QDF_STATUS
-mgmt_rx_reo_release_egress_list_entries(struct mgmt_rx_reo_context *reo_context)
+mgmt_rx_reo_release_egress_list_entries(struct mgmt_rx_reo_context *reo_context,
+					uint32_t link_bitmap,
+					struct mgmt_rx_reo_context_info *ctx)
 {
 	QDF_STATUS status;
 	struct mgmt_rx_reo_egress_list *egress_list;
@@ -2440,7 +2628,9 @@ mgmt_rx_reo_release_egress_list_entries(struct mgmt_rx_reo_context *reo_context)
 		struct mgmt_rx_reo_frame_info *last_released_frame =
 					&reo_egress_list->last_released_frame;
 		uint32_t last_released_frame_ts;
-		bool deliver;
+		bool ready;
+		bool defer;
+		bool overflow;
 
 		qdf_spin_lock_bh(&reo_egress_list->list_lock);
 
@@ -2452,11 +2642,23 @@ mgmt_rx_reo_release_egress_list_entries(struct mgmt_rx_reo_context *reo_context)
 			goto exit_unlock_egress_list_lock;
 		}
 
-		deliver = mgmt_rx_reo_is_entry_ready_to_send_up(first_entry);
-		qdf_assert_always(deliver);
+		ready = mgmt_rx_reo_is_entry_ready_to_send_up(first_entry);
+		qdf_assert_always(ready);
 
-		rx_params = first_entry->rx_params;
+		first_entry->ctx_info = *ctx;
+		defer = mgmt_rx_reo_defer_delivery(first_entry, link_bitmap);
+		overflow =
+		 LIST_ENTRY_IS_REMOVED_DUE_TO_EGRESS_LIST_OVERFLOW(first_entry);
+		if (defer && !overflow) {
+			status = mgmt_rx_reo_schedule_delivery(reo_context,
+							       first_entry);
+			if (QDF_IS_STATUS_ERROR(status))
+				mgmt_rx_reo_err("Failed to schedule delivery");
+			goto exit_unlock_egress_list_lock;
+		}
 
+		first_entry->egress_list_size =
+					qdf_list_size(&reo_egress_list->list);
 		status = qdf_list_remove_node(&reo_egress_list->list,
 					      &first_entry->node);
 		if (QDF_IS_STATUS_ERROR(status)) {
@@ -2476,6 +2678,7 @@ mgmt_rx_reo_release_egress_list_entries(struct mgmt_rx_reo_context *reo_context)
 		 * global time stamp, deliver the current frame to upper layer
 		 * and update the last released frame global time stamp.
 		 */
+		rx_params = first_entry->rx_params;
 		first_entry_ts = mgmt_rx_reo_get_global_ts(rx_params);
 		last_released_frame_ts =
 			last_released_frame->reo_params.global_timestamp;
@@ -2503,7 +2706,8 @@ mgmt_rx_reo_release_egress_list_entries(struct mgmt_rx_reo_context *reo_context)
 		qdf_spin_unlock_bh(&reo_egress_list->list_lock);
 
 		status = mgmt_rx_reo_list_entry_send_up(reo_context,
-							first_entry);
+							first_entry,
+							!defer || !overflow);
 		if (QDF_IS_STATUS_ERROR(status)) {
 			status = QDF_STATUS_E_FAILURE;
 			qdf_mem_free(first_entry);
@@ -2518,6 +2722,8 @@ mgmt_rx_reo_release_egress_list_entries(struct mgmt_rx_reo_context *reo_context)
 	goto exit_unlock_frame_release_lock;
 
 exit_unlock_egress_list_lock:
+	qdf_assert_always(qdf_list_size(&reo_egress_list->list) <=
+					reo_egress_list->max_list_size);
 	qdf_spin_unlock_bh(&reo_egress_list->list_lock);
 exit_unlock_frame_release_lock:
 	qdf_spin_unlock(&reo_context->frame_release_lock);
@@ -2529,6 +2735,9 @@ QDF_STATUS
 mgmt_rx_reo_release_frames(uint8_t mlo_grp_id, uint32_t link_bitmap)
 {
 	struct mgmt_rx_reo_context *reo_context;
+	QDF_STATUS ret;
+	struct mgmt_rx_reo_context_info ctx_info = {0};
+	uint8_t link;
 
 	reo_context = mgmt_rx_reo_get_context(mlo_grp_id);
 	if (!reo_context) {
@@ -2536,6 +2745,24 @@ mgmt_rx_reo_release_frames(uint8_t mlo_grp_id, uint32_t link_bitmap)
 		return QDF_STATUS_E_NULL_VALUE;
 	}
 
+	for (link = 0; link < MAX_MLO_LINKS; link++)
+		if (link_bitmap & (1 << link)) {
+			struct reo_scheduler_stats *stats;
+
+			stats = &reo_context->scheduler_debug_info.stats;
+			stats->scheduler_cb_count[link]++;
+		}
+
+	ctx_info.context = MGMT_RX_REO_CONTEXT_SCHEDULER_CB;
+	ctx_info.context_id = qdf_atomic_inc_return(&reo_context->context_id);
+	ret = mgmt_rx_reo_release_egress_list_entries(reo_context, link_bitmap,
+						      &ctx_info);
+	if (QDF_IS_STATUS_ERROR(ret)) {
+		mgmt_rx_reo_err("Failure to release frames grp = %u bm = 0x%x",
+				mlo_grp_id, link_bitmap);
+		return ret;
+	}
+
 	return QDF_STATUS_SUCCESS;
 }
 
@@ -2655,6 +2882,48 @@ mgmt_rx_reo_check_sanity_lists(struct mgmt_rx_reo_list *reo_egress_list,
 	return QDF_STATUS_SUCCESS;
 }
 
+/**
+ * mgmt_rx_reo_handle_egress_overflow() - Handle overflow of management
+ * rx reorder egress list
+ * @reo_egress_list: Pointer to egress reorder list
+ *
+ * API to handle overflow of management rx reorder egress list.
+ *
+ * Return: QDF_STATUS
+ */
+static QDF_STATUS
+mgmt_rx_reo_handle_egress_overflow(struct mgmt_rx_reo_list *reo_egress_list)
+{
+	struct mgmt_rx_reo_list_entry *cur_entry;
+	uint32_t egress_list_max_size;
+	uint32_t egress_list_cur_size;
+	uint32_t num_overflow_frames;
+
+	if (!reo_egress_list) {
+		mgmt_rx_reo_err("Egress reorder list is null");
+		return QDF_STATUS_E_NULL_VALUE;
+	}
+
+	reo_egress_list->overflow_count++;
+	reo_egress_list->last_overflow_ts = qdf_get_log_timestamp();
+	mgmt_rx_reo_err_rl("Egress overflow, cnt:%llu size:%u",
+			   reo_egress_list->overflow_count,
+			   qdf_list_size(&reo_egress_list->list));
+
+	egress_list_cur_size = qdf_list_size(&reo_egress_list->list);
+	egress_list_max_size = reo_egress_list->max_list_size;
+	num_overflow_frames = egress_list_cur_size - egress_list_max_size;
+
+	qdf_list_for_each(&reo_egress_list->list, cur_entry, node) {
+		if (num_overflow_frames > 0) {
+			cur_entry->status |= STATUS_EGRESS_LIST_OVERFLOW;
+			num_overflow_frames--;
+		}
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
 /**
  * mgmt_rx_reo_move_entries_ingress_to_egress_list() - Moves frames in
  * the ingress list which are ready to be delivered to the egress list
@@ -2740,10 +3009,17 @@ mgmt_rx_reo_move_entries_ingress_to_egress_list
 				       &temp_list_frames_ready_to_deliver);
 		qdf_assert_always(QDF_IS_STATUS_SUCCESS(status));
 
+		if (mgmt_rx_reo_list_overflowed(reo_egress_list)) {
+			status =
+			    mgmt_rx_reo_handle_egress_overflow(reo_egress_list);
+			if (QDF_IS_STATUS_ERROR(status)) {
+				mgmt_rx_reo_err("Failed to handle overflow");
+				qdf_assert_always(0);
+			}
+		}
+
 		qdf_assert_always(qdf_list_size(&reo_ingress_list->list) <=
 				  reo_ingress_list->max_list_size);
-		qdf_assert_always(qdf_list_size(&reo_egress_list->list) <=
-						reo_egress_list->max_list_size);
 
 		status = mgmt_rx_reo_check_sanity_lists(reo_egress_list,
 							reo_ingress_list);
@@ -2838,6 +3114,7 @@ mgmt_rx_reo_ingress_list_ageout_timer_handler(void *arg)
 	 * list which has the largest global time stamp value.
 	 */
 	struct mgmt_rx_reo_list_entry *latest_aged_out_entry = NULL;
+	struct mgmt_rx_reo_context_info ctx_info = {0};
 
 	qdf_assert_always(ingress_list);
 	reo_ctx = mgmt_rx_reo_get_context_from_ingress_list(ingress_list);
@@ -2865,7 +3142,9 @@ mgmt_rx_reo_ingress_list_ageout_timer_handler(void *arg)
 		return;
 	}
 
-	ret = mgmt_rx_reo_release_egress_list_entries(reo_ctx);
+	ctx_info.context = MGMT_RX_REO_CONTEXT_INGRESS_LIST_TIMEOUT;
+	ctx_info.context_id = qdf_atomic_inc_return(&reo_ctx->context_id);
+	ret = mgmt_rx_reo_release_egress_list_entries(reo_ctx, 0, &ctx_info);
 	if (QDF_IS_STATUS_ERROR(ret)) {
 		mgmt_rx_reo_err("Failure to release entries, ret = %d", ret);
 		return;
@@ -3159,6 +3438,8 @@ mgmt_rx_reo_update_ingress_list(struct mgmt_rx_reo_ingress_list *ingress_list,
 			qdf_list_t *ingress_list_ptr = &reo_ingress_list->list;
 
 			reo_ingress_list->overflow_count++;
+			reo_ingress_list->last_overflow_ts =
+							qdf_get_log_timestamp();
 			mgmt_rx_reo_err_rl("Ingress overflow, cnt:%llu size:%u",
 					   reo_ingress_list->overflow_count,
 					   qdf_list_size(ingress_list_ptr));
@@ -3336,6 +3617,14 @@ mgmt_rx_reo_update_egress_list(struct mgmt_rx_reo_egress_list *egress_list,
 	if (QDF_IS_STATUS_ERROR(ret))
 		return ret;
 
+	if (mgmt_rx_reo_list_overflowed(reo_egress_list)) {
+		ret = mgmt_rx_reo_handle_egress_overflow(reo_egress_list);
+		if (QDF_IS_STATUS_ERROR(ret)) {
+			mgmt_rx_reo_err("Failed to handle egress overflow");
+			qdf_assert_always(0);
+		}
+	}
+
 	*is_queued = true;
 	frame_desc->queued_list = MGMT_RX_REO_LIST_TYPE_EGRESS;
 
@@ -3697,9 +3986,13 @@ mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
 	uint64_t stale_count_per_link[MAX_MLO_LINKS] = {0};
 	uint64_t stale_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
 	uint64_t total_stale_count = 0;
+	uint64_t parallel_rx_count_per_link[MAX_MLO_LINKS] = {0};
+	uint64_t parallel_rx_per_desc[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
+	uint64_t total_parallel_rx_count = 0;
 	uint64_t error_count_per_link[MAX_MLO_LINKS] = {0};
 	uint64_t error_count_per_desc_type[MGMT_RX_REO_FRAME_DESC_TYPE_MAX] = {0};
 	uint64_t total_error_count = 0;
+	uint64_t total_missing_count = 0;
 	uint64_t total_queued = 0;
 	uint64_t queued_per_list[MGMT_RX_REO_LIST_TYPE_MAX] = {0};
 	uint64_t queued_per_link[MAX_MLO_LINKS] = {0};
@@ -3726,12 +4019,16 @@ mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
 					stats->stale_count[link_id][desc_type];
 			error_count_per_link[link_id] +=
 					stats->error_count[link_id][desc_type];
+			parallel_rx_count_per_link[link_id] +=
+				   stats->parallel_rx_count[link_id][desc_type];
 		}
 
 		total_ingress_count += ingress_count_per_link[link_id];
 		total_reo_count += reo_count_per_link[link_id];
 		total_stale_count += stale_count_per_link[link_id];
 		total_error_count += error_count_per_link[link_id];
+		total_parallel_rx_count += parallel_rx_count_per_link[link_id];
+		total_missing_count += stats->missing_count[link_id];
 	}
 
 	for (desc_type = 0; desc_type < MGMT_RX_REO_FRAME_DESC_TYPE_MAX;
@@ -3745,6 +4042,8 @@ mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
 					stats->stale_count[link_id][desc_type];
 			error_count_per_desc_type[desc_type] +=
 					stats->error_count[link_id][desc_type];
+			parallel_rx_per_desc[desc_type] +=
+				stats->parallel_rx_count[link_id][desc_type];
 		}
 	}
 
@@ -3842,7 +4141,24 @@ mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
 			  stale_count_per_desc_type[2],
 			  total_stale_count);
 
-	mgmt_rx_reo_alert("\t4) Error Frame Count:");
+	mgmt_rx_reo_alert("\t4) Parallel rx Frame Count:");
+	mgmt_rx_reo_alert("\t------------------------------------");
+	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
+	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
+	mgmt_rx_reo_alert("\t-------------------------------------------");
+	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
+		mgmt_rx_reo_alert("\t|%10u|%7llu|%7llu|%7llu|%7llu", link_id,
+				  stats->parallel_rx_count[link_id][0],
+				  stats->parallel_rx_count[link_id][1],
+				  stats->parallel_rx_count[link_id][2],
+				  parallel_rx_count_per_link[link_id]);
+		mgmt_rx_reo_alert("\t-------------------------------------------");
+	}
+	mgmt_rx_reo_alert("\t           |%7llu|%7llu|%7llu|%7llu\n\n",
+			  parallel_rx_per_desc[0], parallel_rx_per_desc[1],
+			  parallel_rx_per_desc[2], total_parallel_rx_count);
+
+	mgmt_rx_reo_alert("\t5) Error Frame Count:");
 	mgmt_rx_reo_alert("\t------------------------------------");
 	mgmt_rx_reo_alert("\t|link id/  |       |       |       |");
 	mgmt_rx_reo_alert("\t|desc type |      0|      1|      2|");
@@ -3861,7 +4177,19 @@ mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
 			  error_count_per_desc_type[2],
 			  total_error_count);
 
-	mgmt_rx_reo_alert("\t5) Host consumed frames related stats:");
+	mgmt_rx_reo_alert("\t6) Per link stats:");
+	mgmt_rx_reo_alert("\t----------------------------");
+	mgmt_rx_reo_alert("\t|link id   | Missing frame |");
+	mgmt_rx_reo_alert("\t|          |     count     |");
+	mgmt_rx_reo_alert("\t----------------------------");
+	for (link_id = 0; link_id < MAX_MLO_LINKS; link_id++) {
+		mgmt_rx_reo_alert("\t|%10u|%15llu|", link_id,
+				  stats->missing_count[link_id]);
+		mgmt_rx_reo_alert("\t----------------------------");
+	}
+	mgmt_rx_reo_alert("\t%11s|%15llu|\n\n", "", total_missing_count);
+
+	mgmt_rx_reo_alert("\t7) Host consumed frames related stats:");
 	mgmt_rx_reo_alert("\tOverall:");
 	mgmt_rx_reo_alert("\t------------------------------------------------");
 	mgmt_rx_reo_alert("\t|link id   |Queued frame |Zero wait |Immediate |");
@@ -3913,6 +4241,10 @@ mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
 			  zero_wait_count_rx_per_list[1],
 			  immediate_delivery_per_list[1]);
 
+	mgmt_rx_reo_alert("\t8) Misc stats:");
+	mgmt_rx_reo_alert("\t\tIngress list overflow count = %llu\n\n",
+			  reo_ctx->ingress_list.reo_list.overflow_count);
+
 	return QDF_STATUS_SUCCESS;
 }
 
@@ -3923,13 +4255,15 @@ mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
  * @desc: Pointer to frame descriptor
  * @is_queued: Indicates whether this frame is queued to reorder list
  * @is_error: Indicates whether any error occurred during processing this frame
+ * @context_id: context identifier
  *
  * Return: QDF_STATUS of operation
  */
 static QDF_STATUS
 mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx,
 			      struct mgmt_rx_reo_frame_descriptor *desc,
-			      bool is_queued, bool is_error)
+			      bool is_queued, bool is_error,
+			      int32_t context_id)
 {
 	struct reo_ingress_debug_info *ingress_frame_debug_info;
 	struct reo_ingress_debug_frame_info *cur_frame_debug_info;
@@ -3958,6 +4292,10 @@ mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx,
 		stats->error_count[link_id][desc->type]++;
 	if (desc->is_stale)
 		stats->stale_count[link_id][desc->type]++;
+	if (desc->pkt_ctr_delta > 1)
+		stats->missing_count[link_id] += desc->pkt_ctr_delta - 1;
+	if (desc->is_parallel_rx)
+		stats->parallel_rx_count[link_id][desc->type]++;
 
 	if (!mgmt_rx_reo_ingress_frame_debug_info_enabled
 						(ingress_frame_debug_info))
@@ -4009,6 +4347,7 @@ mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx,
 					desc->egress_list_insertion_pos;
 	cur_frame_debug_info->cpu_id = qdf_get_smp_processor_id();
 	cur_frame_debug_info->reo_required = desc->reo_required;
+	cur_frame_debug_info->context_id = context_id;
 
 	ingress_frame_debug_info->next_index++;
 	ingress_frame_debug_info->next_index %=
@@ -4249,13 +4588,15 @@ mgmt_rx_reo_debug_print_ingress_frame_stats(struct mgmt_rx_reo_context *reo_ctx)
  * @desc: Pointer to frame descriptor
  * @is_queued: Indicates whether this frame is queued to reorder list
  * @is_error: Indicates whether any error occurred during processing this frame
+ * @context_id: context identifier
  *
  * Return: QDF_STATUS of operation
  */
 static QDF_STATUS
 mgmt_rx_reo_log_ingress_frame(struct mgmt_rx_reo_context *reo_ctx,
 			      struct mgmt_rx_reo_frame_descriptor *desc,
-			      bool is_queued, bool is_error)
+			      bool is_queued, bool is_error,
+			      int32_t context_id)
 {
 	return QDF_STATUS_SUCCESS;
 }
@@ -4283,6 +4624,9 @@ wlan_mgmt_rx_reo_algo_entry(struct wlan_objmgr_pdev *pdev,
 	struct mgmt_rx_reo_ingress_list *ingress_list;
 	struct mgmt_rx_reo_egress_list *egress_list;
 	QDF_STATUS ret;
+	int16_t cur_link;
+	struct mgmt_rx_reo_context_info ctx_info = {0};
+	int32_t context_id = 0;
 
 	if (!is_queued) {
 		mgmt_rx_reo_err("Pointer to queued indication is null");
@@ -4393,6 +4737,7 @@ wlan_mgmt_rx_reo_algo_entry(struct wlan_objmgr_pdev *pdev,
 	 */
 	qdf_spin_lock(&reo_ctx->reo_algo_entry_lock);
 
+	cur_link = mgmt_rx_reo_get_link_id(desc->rx_params);
 	qdf_assert_always(desc->rx_params->reo_params->valid);
 	qdf_assert_always(desc->frame_type == IEEE80211_FC0_TYPE_MGT);
 
@@ -4416,8 +4761,9 @@ wlan_mgmt_rx_reo_algo_entry(struct wlan_objmgr_pdev *pdev,
 	if (QDF_IS_STATUS_ERROR(ret))
 		goto failure;
 
-	ret = mgmt_rx_reo_log_ingress_frame(reo_ctx, desc,
-					    *is_queued, false);
+	context_id = qdf_atomic_inc_return(&reo_ctx->context_id);
+	ret = mgmt_rx_reo_log_ingress_frame(reo_ctx, desc, *is_queued, false,
+					    context_id);
 	if (QDF_IS_STATUS_ERROR(ret)) {
 		qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
 		return ret;
@@ -4430,15 +4776,20 @@ wlan_mgmt_rx_reo_algo_entry(struct wlan_objmgr_pdev *pdev,
 	if (QDF_IS_STATUS_ERROR(ret))
 		return ret;
 
+	ctx_info.context = MGMT_RX_REO_CONTEXT_MGMT_RX;
+	ctx_info.in_reo_params = *desc->rx_params->reo_params;
+	ctx_info.context_id = context_id;
 	/* Finally, release the entries for which pending frame is received */
-	return mgmt_rx_reo_release_egress_list_entries(reo_ctx);
+	return mgmt_rx_reo_release_egress_list_entries(reo_ctx, 1 << cur_link,
+						       &ctx_info);
 
 failure:
 	/**
 	 * Ignore the return value of this function call, return
 	 * the actual reason for failure.
 	 */
-	mgmt_rx_reo_log_ingress_frame(reo_ctx, desc, *is_queued, true);
+	mgmt_rx_reo_log_ingress_frame(reo_ctx, desc, *is_queued, true,
+				      context_id);
 
 	qdf_spin_unlock(&reo_ctx->reo_algo_entry_lock);
 
@@ -6562,6 +6913,7 @@ mgmt_rx_reo_init_context(uint8_t ml_grp_id)
 
 	qdf_spinlock_create(&reo_context->reo_algo_entry_lock);
 	qdf_spinlock_create(&reo_context->frame_release_lock);
+	qdf_atomic_init(&reo_context->context_id);
 
 	return QDF_STATUS_SUCCESS;
 

+ 72 - 13
umac/cmn_services/mgmt_txrx/core/src/wlan_mgmt_txrx_rx_reo_i.h

@@ -52,6 +52,7 @@
 #define STATUS_OLDER_THAN_LATEST_AGED_OUT_FRAME      (BIT(2))
 #define STATUS_INGRESS_LIST_OVERFLOW                 (BIT(3))
 #define STATUS_OLDER_THAN_READY_TO_DELIVER_FRAMES    (BIT(4))
+#define STATUS_EGRESS_LIST_OVERFLOW                  (BIT(5))
 
 #define MGMT_RX_REO_INVALID_LINK   (-1)
 
@@ -61,8 +62,9 @@
 #define RELEASE_REASON_OLDER_THAN_AGED_OUT_FRAME                (BIT(2))
 #define RELEASE_REASON_INGRESS_LIST_OVERFLOW                    (BIT(3))
 #define RELEASE_REASON_OLDER_THAN_READY_TO_DELIVER_FRAMES       (BIT(4))
+#define RELEASE_REASON_EGRESS_LIST_OVERFLOW                     (BIT(5))
 #define RELEASE_REASON_MAX  \
-		(RELEASE_REASON_OLDER_THAN_READY_TO_DELIVER_FRAMES << 1)
+		(RELEASE_REASON_EGRESS_LIST_OVERFLOW << 1)
 
 #define LIST_ENTRY_IS_WAITING_FOR_FRAME_ON_OTHER_LINK(entry)   \
 	((entry)->status & STATUS_WAIT_FOR_FRAME_ON_OTHER_LINKS)
@@ -74,6 +76,8 @@
 	((entry)->status & STATUS_INGRESS_LIST_OVERFLOW)
 #define LIST_ENTRY_IS_OLDER_THAN_READY_TO_DELIVER_FRAMES(entry)  \
 	((entry)->status & STATUS_OLDER_THAN_READY_TO_DELIVER_FRAMES)
+#define LIST_ENTRY_IS_REMOVED_DUE_TO_EGRESS_LIST_OVERFLOW(entry)  \
+	((entry)->status & STATUS_EGRESS_LIST_OVERFLOW)
 
 #ifdef WLAN_MGMT_RX_REO_DEBUG_SUPPORT
 #define MGMT_RX_REO_EGRESS_FRAME_DEBUG_INFO_BOARDER_MAX_SIZE   (848)
@@ -260,6 +264,19 @@ enum mgmt_rx_reo_execution_context {
 	MGMT_RX_REO_CONTEXT_INVALID,
 };
 
+/**
+ * struct mgmt_rx_reo_context_info - This structure holds the information
+ * about the current execution context
+ * @context: Current execution context
+ * @in_reo_params: Reo parameters of the current management frame
+ * @context_id: Context identifier
+ */
+struct mgmt_rx_reo_context_info {
+	enum mgmt_rx_reo_execution_context context;
+	struct mgmt_rx_reo_params in_reo_params;
+	int32_t context_id;
+};
+
 /**
  * struct mgmt_rx_reo_frame_info - This structure holds the information
  * about a management frame.
@@ -280,6 +297,7 @@ struct mgmt_rx_reo_frame_info {
  * @list_lock: Spin lock to protect the list
  * @max_list_size: Maximum size of the reorder list
  * @overflow_count: Number of times list overflow occurred
+ * @last_overflow_ts: Host time stamp of last overflow
  * @last_inserted_frame: Information about the last frame inserted to the list
  * @last_released_frame: Information about the last frame released from the list
  */
@@ -288,6 +306,7 @@ struct mgmt_rx_reo_list {
 	qdf_spinlock_t list_lock;
 	uint32_t max_list_size;
 	uint64_t overflow_count;
+	uint64_t last_overflow_ts;
 	struct mgmt_rx_reo_frame_info last_inserted_frame;
 	struct mgmt_rx_reo_frame_info last_released_frame;
 };
@@ -352,10 +371,12 @@ struct mgmt_rx_reo_wait_count {
  * by scheduler
  * @egress_timestamp: Host time stamp when this frame has exited reorder
  * module
+ * @egress_list_size: Egress list size just before removing this frame
  * @status: Status for this entry
  * @pdev: Pointer to pdev object corresponding to this frame
  * @release_reason: Release reason
  * @is_delivered: Indicates whether the frame is delivered successfully
+ * @is_dropped: Indciates whether the frame is dropped in reo layer
  * @is_premature_delivery: Indicates whether the frame is delivered
  * prematurely
  * @is_parallel_rx: Indicates that this frame is received in parallel to the
@@ -363,6 +384,7 @@ struct mgmt_rx_reo_wait_count {
  * @shared_snapshots: snapshots shared b/w host and target
  * @host_snapshot: host snapshot
  * @scheduled_count: Number of times scheduler is invoked for this frame
+ * @ctx_info: Execution context info
  */
 struct mgmt_rx_reo_list_entry {
 	qdf_list_node_t node;
@@ -378,16 +400,19 @@ struct mgmt_rx_reo_list_entry {
 	uint64_t first_scheduled_ts;
 	uint64_t last_scheduled_ts;
 	uint64_t egress_timestamp;
+	uint64_t egress_list_size;
 	uint32_t status;
 	struct wlan_objmgr_pdev *pdev;
 	uint8_t release_reason;
 	bool is_delivered;
+	bool is_dropped;
 	bool is_premature_delivery;
 	bool is_parallel_rx;
 	struct mgmt_rx_reo_snapshot_params shared_snapshots
 			[MAX_MLO_LINKS][MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
 	struct mgmt_rx_reo_snapshot_params host_snapshot[MAX_MLO_LINKS];
 	qdf_atomic_t scheduled_count;
+	struct mgmt_rx_reo_context_info ctx_info;
 };
 
 #ifdef WLAN_MGMT_RX_REO_SIM_SUPPORT
@@ -595,6 +620,7 @@ struct mgmt_rx_reo_sim_context {
  * @reo_required: Indicates whether reorder is required for the current frame.
  * If reorder is not required, current frame will just be used for updating the
  * wait count of frames already part of the reorder list.
+ * @context_id: Context identifier
  */
 struct reo_ingress_debug_frame_info {
 	uint8_t link_id;
@@ -626,12 +652,14 @@ struct reo_ingress_debug_frame_info {
 	struct mgmt_rx_reo_snapshot_params host_snapshot[MAX_MLO_LINKS];
 	int cpu_id;
 	bool reo_required;
+	int32_t context_id;
 };
 
 /**
  * struct reo_egress_debug_frame_info - Debug information about a frame
  * leaving the reorder module
  * @is_delivered: Indicates whether the frame is delivered to upper layers
+ * @is_dropped: Indciates whether the frame is dropped in reo layer
  * @is_premature_delivery: Indicates whether the frame is delivered
  * prematurely
  * @link_id: link id
@@ -650,15 +678,23 @@ struct reo_ingress_debug_frame_info {
  * layer
  * @egress_duration: Duration in us taken by the upper layer to process
  * the frame.
+ * @egress_list_size: Egress list size just before removing this frame
+ * @first_scheduled_ts: Host time stamp when this entry is first scheduled for
+ * delivery
+ * @last_scheduled_ts: Host time stamp when this entry is last scheduled for
+ * delivery
+ * @scheduled_count: Number of times this entry is scheduled
  * @initial_wait_count: Wait count when the frame is queued
  * @final_wait_count: Wait count when frame is released to upper layer
  * @release_reason: Reason for delivering the frame to upper layers
  * @shared_snapshots: snapshots shared b/w host and target
  * @host_snapshot: host snapshot
  * @cpu_id: CPU index
+ * @ctx_info: Execution context info
  */
 struct reo_egress_debug_frame_info {
 	bool is_delivered;
+	bool is_dropped;
 	bool is_premature_delivery;
 	uint8_t link_id;
 	uint16_t mgmt_pkt_ctr;
@@ -670,6 +706,10 @@ struct reo_egress_debug_frame_info {
 	uint64_t egress_list_removal_ts;
 	uint64_t egress_timestamp;
 	uint64_t egress_duration;
+	uint64_t egress_list_size;
+	uint64_t first_scheduled_ts;
+	uint64_t last_scheduled_ts;
+	int32_t scheduled_count;
 	struct mgmt_rx_reo_wait_count initial_wait_count;
 	struct mgmt_rx_reo_wait_count final_wait_count;
 	uint8_t release_reason;
@@ -677,6 +717,7 @@ struct reo_egress_debug_frame_info {
 			[MAX_MLO_LINKS][MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
 	struct mgmt_rx_reo_snapshot_params host_snapshot[MAX_MLO_LINKS];
 	int cpu_id;
+	struct mgmt_rx_reo_context_info ctx_info;
 };
 
 /**
@@ -698,6 +739,9 @@ struct reo_egress_debug_frame_info {
  * last frame delivered to upper layer is a stale frame.
  * @error_count: Number of frames dropped due to error occurred
  * within the reorder module
+ * @parallel_rx_count: Number of frames which are categorised as parallel rx
+ * @missing_count: Number of frames missing. This is calculated based on the
+ * packet counter holes.
  */
 struct reo_ingress_frame_stats {
 	uint64_t ingress_count
@@ -713,6 +757,9 @@ struct reo_ingress_frame_stats {
 			    [MGMT_RX_REO_FRAME_DESC_TYPE_MAX];
 	uint64_t error_count[MAX_MLO_LINKS]
 			    [MGMT_RX_REO_FRAME_DESC_TYPE_MAX];
+	uint64_t parallel_rx_count[MAX_MLO_LINKS]
+			    [MGMT_RX_REO_FRAME_DESC_TYPE_MAX];
+	uint64_t missing_count[MAX_MLO_LINKS];
 };
 
 /**
@@ -722,17 +769,22 @@ struct reo_ingress_frame_stats {
  * frames to upper layers
  * @delivery_success_count: Number of successful management frame
  * deliveries to upper layer
+ * @drop_count: Number of management frames dropped within reo layer
  * @premature_delivery_count:  Number of frames delivered
  * prematurely. Premature delivery is the delivery of a management frame
  * to the upper layers even before its wait count is reaching zero.
- * @delivery_count: Number frames delivered successfully for
- * each link and release  reason.
+ * @delivery_reason_count: Number frames delivered successfully for
+ * each link and release reason.
+ * @delivery_context_count: Number frames delivered successfully for
+ * each link and execution context.
  */
 struct reo_egress_frame_stats {
 	uint64_t delivery_attempts_count[MAX_MLO_LINKS];
 	uint64_t delivery_success_count[MAX_MLO_LINKS];
+	uint64_t drop_count[MAX_MLO_LINKS];
 	uint64_t premature_delivery_count[MAX_MLO_LINKS];
-	uint64_t delivery_count[MAX_MLO_LINKS][RELEASE_REASON_MAX];
+	uint64_t delivery_reason_count[MAX_MLO_LINKS][RELEASE_REASON_MAX];
+	uint64_t delivery_context_count[MAX_MLO_LINKS][MGMT_RX_REO_CONTEXT_MAX];
 };
 
 /**
@@ -778,8 +830,6 @@ struct reo_egress_debug_info {
 /**
  * struct reo_scheduler_debug_frame_info - Debug information about a frame
  * gettign scheduled by management Rx reo scheduler
- * @is_premature_delivery: Indicates whether the frame is delivered
- * prematurely
  * @link_id: link id
  * @mgmt_pkt_ctr: management packet counter
  * @global_timestamp: MLO global time stamp
@@ -791,16 +841,19 @@ struct reo_egress_debug_info {
  * @egress_list_insertion_ts: Host time stamp when this entry is inserted to
  * the egress list.
  * @scheduled_ts: Host time stamp when this entry is scheduled for delivery
+ * @first_scheduled_ts: Host time stamp when this entry is first scheduled for
+ * delivery
+ * @last_scheduled_ts: Host time stamp when this entry is last scheduled for
+ * delivery
+ * @scheduled_count: Number of times this entry is scheduled
  * @initial_wait_count: Wait count when the frame is queued
  * @final_wait_count: Wait count when frame is released to upper layer
- * @release_reason: Reason for delivering the frame to upper layers
  * @shared_snapshots: snapshots shared b/w host and target
  * @host_snapshot: host snapshot
  * @cpu_id: CPU index
- * @context: Execution context
+ * @ctx_info: Execution context info
  */
 struct reo_scheduler_debug_frame_info {
-	bool is_premature_delivery;
 	uint8_t link_id;
 	uint16_t mgmt_pkt_ctr;
 	uint32_t global_timestamp;
@@ -809,14 +862,16 @@ struct reo_scheduler_debug_frame_info {
 	uint64_t ingress_list_removal_ts;
 	uint64_t egress_list_insertion_ts;
 	uint64_t scheduled_ts;
+	uint64_t first_scheduled_ts;
+	uint64_t last_scheduled_ts;
+	int32_t scheduled_count;
 	struct mgmt_rx_reo_wait_count initial_wait_count;
 	struct mgmt_rx_reo_wait_count final_wait_count;
-	uint8_t release_reason;
 	struct mgmt_rx_reo_snapshot_params shared_snapshots
 			[MAX_MLO_LINKS][MGMT_RX_REO_SHARED_SNAPSHOT_MAX];
 	struct mgmt_rx_reo_snapshot_params host_snapshot[MAX_MLO_LINKS];
 	int cpu_id;
-	enum mgmt_rx_reo_execution_context context;
+	struct mgmt_rx_reo_context_info ctx_info;
 };
 
 /**
@@ -824,10 +879,12 @@ struct reo_scheduler_debug_frame_info {
  * frames scheduled by reo scheduler
  * @scheduled_count: Scheduled count
  * @rescheduled_count: Rescheduled count
+ * @scheduler_cb_count: Scheduler callback count
  */
 struct reo_scheduler_stats {
-	uint64_t scheduled_count[MAX_MLO_LINKS];
-	uint64_t rescheduled_count[MAX_MLO_LINKS];
+	uint64_t scheduled_count[MAX_MLO_LINKS][MGMT_RX_REO_CONTEXT_MAX];
+	uint64_t rescheduled_count[MAX_MLO_LINKS][MGMT_RX_REO_CONTEXT_MAX];
+	uint64_t scheduler_cb_count[MAX_MLO_LINKS];
 };
 
 /**
@@ -882,6 +939,7 @@ struct reo_scheduler_debug_info {
  * @simulation_in_progress: Flag to indicate whether simulation is
  * in progress
  * @mlo_grp_id: MLO Group ID which it belongs to
+ * @context_id: Context identifier
  */
 struct mgmt_rx_reo_context {
 	struct mgmt_rx_reo_ingress_list ingress_list;
@@ -901,6 +959,7 @@ struct mgmt_rx_reo_context {
 #endif /* WLAN_MGMT_RX_REO_DEBUG_SUPPORT */
 	bool simulation_in_progress;
 	uint8_t mlo_grp_id;
+	qdf_atomic_t context_id;
 };
 
 /**

+ 22 - 0
umac/cmn_services/mgmt_txrx/dispatcher/inc/cfg_mgmt_rx_reo.h

@@ -46,6 +46,27 @@
 	CFG_INI_BOOL("mgmt_rx_reo_enable", false, \
 			"Enable MGMT Rx REO feature")
 
+/*
+ * <ini>
+ * mgmt_rx_reo_scheduler_enable - Enable MGMT Rx REO scheduler
+ * @Min: 0
+ * @Max: 1
+ * @Default: 0
+ *
+ * This ini is used to enable MGMT Rx REO scheduler
+ *
+ * Related: None
+ *
+ * Supported Feature: MGMT Rx REO
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_MGMT_RX_REO_SCHEDULER_ENABLE \
+	CFG_INI_BOOL("mgmt_rx_reo_scheduler_enable", false, \
+			"Enable MGMT Rx REO scheduler")
+
 /*
  * <ini>
  * mgmt_rx_reo_pkt_ctr_delta_thresh - Packet counter delta threshold
@@ -149,6 +170,7 @@
 
 #define CFG_MGMT_RX_REO_ALL \
 	CFG(CFG_MGMT_RX_REO_ENABLE) \
+	CFG(CFG_MGMT_RX_REO_SCHEDULER_ENABLE) \
 	CFG(CFG_MGMT_RX_REO_PKT_CTR_DELTA_THRESH) \
 	CFG(CFG_MGMT_RX_REO_INGRESS_FRAME_DEBUG_LIST_SIZE) \
 	CFG(CFG_MGMT_RX_REO_EGRESS_FRAME_DEBUG_LIST_SIZE) \

+ 20 - 0
umac/cmn_services/mgmt_txrx/dispatcher/inc/wlan_mgmt_txrx_rx_reo_utils_api.h

@@ -352,6 +352,26 @@ wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(struct wlan_objmgr_psoc *psoc);
 bool
 wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(struct wlan_objmgr_pdev *pdev);
 
+/**
+ * wlan_mgmt_rx_reo_is_scheduler_enabled_at_psoc() - Check if MGMT Rx REO
+ * scheduler is enabled on a given psoc
+ * @psoc: pointer to psoc object
+ *
+ * Return: true if the scheduler is enabled, else false
+ */
+bool
+wlan_mgmt_rx_reo_is_scheduler_enabled_at_psoc(struct wlan_objmgr_psoc *psoc);
+
+/**
+ * wlan_mgmt_rx_reo_is_scheduler_enabled_at_pdev() - Check if MGMT Rx REO
+ * scheduler is enabled on a given pdev
+ * @pdev: pointer to pdev object
+ *
+ * Return: true if the scheduler is enabled, else false
+ */
+bool
+wlan_mgmt_rx_reo_is_scheduler_enabled_at_pdev(struct wlan_objmgr_pdev *pdev);
+
 /**
  * wlan_mgmt_rx_reo_get_pkt_ctr_delta_thresh() - Get the packet counter delta
  * threshold value

+ 1 - 1
umac/cmn_services/mgmt_txrx/dispatcher/src/wlan_mgmt_txrx_rx_reo_tgt_api.c

@@ -223,7 +223,7 @@ tgt_mgmt_rx_reo_release_frames(struct wlan_objmgr_psoc *psoc)
 				     psoc_get_hw_link_id_bmap,
 				     &link_bitmap, false, WLAN_MGMT_RX_REO_ID);
 
-	return QDF_STATUS_SUCCESS;
+	return wlan_mgmt_rx_reo_release_frames(mlo_grp_id, link_bitmap);
 }
 
 QDF_STATUS tgt_mgmt_rx_reo_filter_config(struct wlan_objmgr_pdev *pdev,

+ 29 - 0
umac/cmn_services/mgmt_txrx/dispatcher/src/wlan_mgmt_txrx_rx_reo_utils_api.c

@@ -376,6 +376,35 @@ wlan_mgmt_rx_reo_is_feature_enabled_at_pdev(struct wlan_objmgr_pdev *pdev)
 }
 
 qdf_export_symbol(wlan_mgmt_rx_reo_is_feature_enabled_at_pdev);
+
+bool
+wlan_mgmt_rx_reo_is_scheduler_enabled_at_psoc(struct wlan_objmgr_psoc *psoc)
+{
+	if (!psoc) {
+		mgmt_rx_reo_err("psoc is NULL!");
+		return false;
+	}
+
+	return cfg_get(psoc, CFG_MGMT_RX_REO_SCHEDULER_ENABLE);
+}
+
+qdf_export_symbol(wlan_mgmt_rx_reo_is_scheduler_enabled_at_psoc);
+
+bool
+wlan_mgmt_rx_reo_is_scheduler_enabled_at_pdev(struct wlan_objmgr_pdev *pdev)
+{
+	struct wlan_objmgr_psoc *psoc;
+
+	if (!pdev) {
+		mgmt_rx_reo_err("pdev is NULL!");
+		return false;
+	}
+
+	psoc = wlan_pdev_get_psoc(pdev);
+	return wlan_mgmt_rx_reo_is_scheduler_enabled_at_psoc(psoc);
+}
+
+qdf_export_symbol(wlan_mgmt_rx_reo_is_scheduler_enabled_at_pdev);
 #else
 bool
 wlan_mgmt_rx_reo_is_feature_enabled_at_psoc(struct wlan_objmgr_psoc *psoc)