Przeglądaj źródła

qcacld-3.0: Use spinlock and local variable store to avoid race condition

Access to TSO stats related parameters are not protected by spinlock.
Due to this, race condition is happening in concurrency scenario where
processing of 2 packets is going on simultaneously and crash is happening
as we are writing in out of bound memory and then tries to access it.

Use spinlock and local variable store to avoid race condition.

Change-Id: I37413ab7ccf6bd5171815af0038401925aa7831b
CRs-Fixed: 1114150
Himanshu Agarwal 8 lat temu
rodzic
commit
5501c19153

+ 2 - 1
core/dp/ol/inc/ol_txrx_stats.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2014-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012, 2014-2017 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -108,6 +108,7 @@ struct ol_txrx_stats_tx_tso {
 #if defined(FEATURE_TSO)
 	struct ol_txrx_stats_tso_info tso_info;
 	struct ol_txrx_tso_histogram tso_hist;
+	qdf_spinlock_t tso_stats_lock;
 #endif
 };
 

+ 79 - 26
core/dp/txrx/ol_tx.c

@@ -232,12 +232,64 @@ qdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev, qdf_nbuf_t skb)
 }
 #endif
 
+#if defined(FEATURE_TSO)
+/**
+ * ol_tx_tso_update_stats() - update TSO stats
+ * @pdev: pointer to ol_txrx_pdev_t structure
+ * @msdu_info: tso msdu_info for the msdu
+ * @msdu: tso mdsu for which stats are updated
+ * @tso_msdu_idx: stats index in the global TSO stats array where stats will be
+ *                updated
+ *
+ * Return: None
+ */
+static inline void ol_tx_tso_update_stats(struct ol_txrx_pdev_t *pdev,
+					struct qdf_tso_info_t  *tso_info,
+					qdf_nbuf_t msdu,
+					uint32_t tso_msdu_idx)
+{
+	TXRX_STATS_TSO_HISTOGRAM(pdev,  tso_info->num_segs);
+	TXRX_STATS_TSO_GSO_SIZE_UPDATE(pdev, tso_msdu_idx,
+					qdf_nbuf_tcp_tso_size(msdu));
+	TXRX_STATS_TSO_TOTAL_LEN_UPDATE(pdev,
+					tso_msdu_idx, qdf_nbuf_len(msdu));
+	TXRX_STATS_TSO_NUM_FRAGS_UPDATE(pdev, tso_msdu_idx,
+					qdf_nbuf_get_nr_frags(msdu));
+}
+
+/**
+ * ol_tx_tso_get_stats_idx() - retrieve global TSO stats index and increment it
+ * @pdev: pointer to ol_txrx_pdev_t structure
+ *
+ * Retrieve  the current value of the global variable and increment it. This is
+ * done in a spinlock as the global TSO stats may be accessed in parallel by
+ * multiple TX streams.
+ *
+ * Return: The current value of TSO stats index.
+ */
+static uint32_t ol_tx_tso_get_stats_idx(struct ol_txrx_pdev_t *pdev)
+{
+	uint32_t msdu_stats_idx = 0;
+
+	qdf_spin_lock_bh(&pdev->stats.pub.tx.tso.tso_stats_lock);
+	msdu_stats_idx = pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx;
+	pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx++;
+	pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx &=
+					NUM_MAX_TSO_MSDUS_MASK;
+	qdf_spin_unlock_bh(&pdev->stats.pub.tx.tso.tso_stats_lock);
+
+	TXRX_STATS_TSO_RESET_MSDU(pdev, msdu_stats_idx);
+
+	return msdu_stats_idx;
+}
+#endif
 
 #if defined(FEATURE_TSO)
 qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
 {
 	qdf_nbuf_t msdu = msdu_list;
 	struct ol_txrx_msdu_info_t msdu_info;
+	uint32_t tso_msdu_stats_idx = 0;
 
 	msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
 	msdu_info.htt.action.tx_comp_req = 0;
@@ -263,14 +315,15 @@ qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
 		}
 
 		segments = msdu_info.tso_info.num_segs;
-		TXRX_STATS_TSO_HISTOGRAM(vdev->pdev, segments);
-		TXRX_STATS_TSO_GSO_SIZE_UPDATE(vdev->pdev,
-					 qdf_nbuf_tcp_tso_size(msdu));
-		TXRX_STATS_TSO_TOTAL_LEN_UPDATE(vdev->pdev,
-					 qdf_nbuf_len(msdu));
-		TXRX_STATS_TSO_NUM_FRAGS_UPDATE(vdev->pdev,
-					 qdf_nbuf_get_nr_frags(msdu));
 
+		if (msdu_info.tso_info.is_tso) {
+			tso_msdu_stats_idx =
+					ol_tx_tso_get_stats_idx(vdev->pdev);
+			msdu_info.tso_info.msdu_stats_idx = tso_msdu_stats_idx;
+			ol_tx_tso_update_stats(vdev->pdev,
+						&(msdu_info.tso_info),
+						msdu, tso_msdu_stats_idx);
+		}
 
 		/*
 		 * The netbuf may get linked into a different list inside the
@@ -317,16 +370,14 @@ qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
 			qdf_nbuf_reset_num_frags(msdu);
 
 			if (msdu_info.tso_info.is_tso) {
-				TXRX_STATS_TSO_INC_SEG(vdev->pdev);
-				TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
+				TXRX_STATS_TSO_INC_SEG(vdev->pdev,
+					tso_msdu_stats_idx);
+				TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev,
+					tso_msdu_stats_idx);
 			}
 		} /* while segments */
 
 		msdu = next;
-		if (msdu_info.tso_info.is_tso) {
-			TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
-			TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
-		}
 	} /* while msdus */
 	return NULL;            /* all MSDUs were accepted */
 }
@@ -459,6 +510,7 @@ ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
 		htt_tx_desc_fill_tso_info(pdev->htt_pdev,
 			 tx_desc->htt_frag_desc, &msdu_info->tso_info);
 		TXRX_STATS_TSO_SEG_UPDATE(pdev,
+			 msdu_info->tso_info.msdu_stats_idx,
 			 msdu_info->tso_info.curr_seg->seg);
 	} else {
 		for (i = 1; i < num_frags; i++) {
@@ -535,6 +587,7 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
 		((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
 	uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
 	struct ol_txrx_msdu_info_t msdu_info;
+	uint32_t tso_msdu_stats_idx = 0;
 
 	msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
 	msdu_info.htt.action.tx_comp_req = 0;
@@ -560,13 +613,15 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
 		}
 
 		segments = msdu_info.tso_info.num_segs;
-		TXRX_STATS_TSO_HISTOGRAM(vdev->pdev, segments);
-		TXRX_STATS_TSO_GSO_SIZE_UPDATE(vdev->pdev,
-				 qdf_nbuf_tcp_tso_size(msdu));
-		TXRX_STATS_TSO_TOTAL_LEN_UPDATE(vdev->pdev,
-				 qdf_nbuf_len(msdu));
-		TXRX_STATS_TSO_NUM_FRAGS_UPDATE(vdev->pdev,
-				 qdf_nbuf_get_nr_frags(msdu));
+
+		if (msdu_info.tso_info.is_tso) {
+			tso_msdu_stats_idx =
+					ol_tx_tso_get_stats_idx(vdev->pdev);
+			msdu_info.tso_info.msdu_stats_idx = tso_msdu_stats_idx;
+			ol_tx_tso_update_stats(vdev->pdev,
+						&(msdu_info.tso_info),
+						msdu, tso_msdu_stats_idx);
+		}
 
 		/*
 		 * The netbuf may get linked into a different list
@@ -653,8 +708,10 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
 
 				if (msdu_info.tso_info.is_tso) {
 					qdf_nbuf_reset_num_frags(msdu);
-					TXRX_STATS_TSO_INC_SEG(vdev->pdev);
-					TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
+					TXRX_STATS_TSO_INC_SEG(vdev->pdev,
+						tso_msdu_stats_idx);
+					TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev,
+						tso_msdu_stats_idx);
 				}
 			} else {
 				TXRX_STATS_MSDU_LIST_INCR(
@@ -665,10 +722,6 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
 		} /* while segments */
 
 		msdu = next;
-		if (msdu_info.tso_info.is_tso) {
-			TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
-			TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
-		}
 	} /* while msdus */
 	return NULL; /* all MSDUs were accepted */
 }

+ 1 - 0
core/dp/txrx/ol_tx_desc.c

@@ -561,6 +561,7 @@ struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
 		htt_tx_desc_fill_tso_info(pdev->htt_pdev,
 			 tx_desc->htt_frag_desc, &msdu_info->tso_info);
 		TXRX_STATS_TSO_SEG_UPDATE(pdev,
+			 msdu_info->tso_info.msdu_stats_idx,
 			 msdu_info->tso_info.curr_seg->seg);
 	} else {
 		for (i = 1; i < num_frags; i++) {

+ 109 - 77
core/dp/txrx/ol_txrx.c

@@ -984,6 +984,111 @@ ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
 }
 #endif
 
+#if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
+static void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
+{
+	qdf_spinlock_create(&pdev->stats.pub.tx.tso.tso_stats_lock);
+}
+
+static void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
+{
+	qdf_spinlock_destroy(&pdev->stats.pub.tx.tso.tso_stats_lock);
+}
+
+static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
+{
+	int msdu_idx;
+	int seg_idx;
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		"TSO Statistics:");
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		"TSO pkts %lld, bytes %lld\n",
+		pdev->stats.pub.tx.tso.tso_pkts.pkts,
+		pdev->stats.pub.tx.tso.tso_pkts.bytes);
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"TSO Histogram for numbers of segments:\n"
+			"Single segment	%d\n"
+			"  2-5 segments	%d\n"
+			" 6-10 segments	%d\n"
+			"11-15 segments	%d\n"
+			"16-20 segments	%d\n"
+			"  20+ segments	%d\n",
+			pdev->stats.pub.tx.tso.tso_hist.pkts_1,
+			pdev->stats.pub.tx.tso.tso_hist.pkts_2_5,
+			pdev->stats.pub.tx.tso.tso_hist.pkts_6_10,
+			pdev->stats.pub.tx.tso.tso_hist.pkts_11_15,
+			pdev->stats.pub.tx.tso.tso_hist.pkts_16_20,
+			pdev->stats.pub.tx.tso.tso_hist.pkts_20_plus);
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"TSO History Buffer: Total size %d, current_index %d",
+			NUM_MAX_TSO_MSDUS,
+			TXRX_STATS_TSO_MSDU_IDX(pdev));
+
+	for (msdu_idx = 0; msdu_idx < NUM_MAX_TSO_MSDUS; msdu_idx++) {
+		if (TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx) == 0)
+			continue;
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"jumbo pkt idx: %d num segs %d gso_len %d total_len %d nr_frags %d",
+			msdu_idx,
+			TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx),
+			TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, msdu_idx),
+			TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx),
+			TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, msdu_idx));
+
+		for (seg_idx = 0;
+			 ((seg_idx < TXRX_STATS_TSO_MSDU_NUM_SEG(pdev,
+			   msdu_idx)) && (seg_idx < NUM_MAX_TSO_SEGS));
+			 seg_idx++) {
+			struct qdf_tso_seg_t tso_seg =
+				 TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx);
+
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				 "seg idx: %d", seg_idx);
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				 "tso_enable: %d",
+				 tso_seg.tso_flags.tso_enable);
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				 "fin %d syn %d rst %d psh %d ack %d urg %d ece %d cwr %d ns %d",
+				 tso_seg.tso_flags.fin, tso_seg.tso_flags.syn,
+				 tso_seg.tso_flags.rst, tso_seg.tso_flags.psh,
+				 tso_seg.tso_flags.ack, tso_seg.tso_flags.urg,
+				 tso_seg.tso_flags.ece, tso_seg.tso_flags.cwr,
+				 tso_seg.tso_flags.ns);
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				 "tcp_seq_num: 0x%x ip_id: %d",
+				 tso_seg.tso_flags.tcp_seq_num,
+				 tso_seg.tso_flags.ip_id);
+		}
+	}
+}
+#else
+static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
+{
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+	 "TSO is not supported\n");
+}
+
+static void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
+{
+	/*
+	 * keeping the body empty and not keeping an error print as print will
+	 * will show up everytime during driver load if TSO is not enabled.
+	 */
+}
+
+static void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
+{
+	/*
+	 * keeping the body empty and not keeping an error print as print will
+	 * will show up everytime during driver unload if TSO is not enabled.
+	 */
+}
+
+#endif /* defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG) */
+
 /**
  * ol_txrx_pdev_attach() - allocate txrx pdev
  * @ctrl_pdev: cfg pdev
@@ -1017,6 +1122,7 @@ ol_txrx_pdev_attach(ol_txrx_soc_handle soc, void *pctrl_pdev,
 		pdev->sec_types[i] = (enum ol_sec_type)i;
 
 	TXRX_STATS_INIT(pdev);
+	ol_txrx_tso_stats_init(pdev);
 
 	TAILQ_INIT(&pdev->vdev_list);
 
@@ -1054,6 +1160,7 @@ fail2:
 		qdf_spinlock_destroy(&pdev->tx_queue_spinlock);
 
 fail1:
+	ol_txrx_tso_stats_deinit(pdev);
 	qdf_mem_free(pdev);
 
 fail0:
@@ -1713,6 +1820,8 @@ static void ol_txrx_pdev_detach(void *ppdev, int force)
 
 	ol_txrx_peer_find_detach(pdev);
 
+	ol_txrx_tso_stats_deinit(pdev);
+
 	qdf_spinlock_destroy(&pdev->tx_mutex);
 	qdf_spinlock_destroy(&pdev->peer_ref_mutex);
 	qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
@@ -3698,83 +3807,6 @@ void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent)
 }
 #endif /* TXRX_DEBUG_LEVEL */
 
-#if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
-static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
-{
-	int msdu_idx;
-	int seg_idx;
-
-	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-		"TSO Statistics:");
-	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-		"TSO pkts %lld, bytes %lld\n",
-		pdev->stats.pub.tx.tso.tso_pkts.pkts,
-		pdev->stats.pub.tx.tso.tso_pkts.bytes);
-
-	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			"TSO Histogram for numbers of segments:\n"
-			"Single segment	%d\n"
-			"  2-5 segments	%d\n"
-			" 6-10 segments	%d\n"
-			"11-15 segments	%d\n"
-			"16-20 segments	%d\n"
-			"  20+ segments	%d\n",
-			pdev->stats.pub.tx.tso.tso_hist.pkts_1,
-			pdev->stats.pub.tx.tso.tso_hist.pkts_2_5,
-			pdev->stats.pub.tx.tso.tso_hist.pkts_6_10,
-			pdev->stats.pub.tx.tso.tso_hist.pkts_11_15,
-			pdev->stats.pub.tx.tso.tso_hist.pkts_16_20,
-			pdev->stats.pub.tx.tso.tso_hist.pkts_20_plus);
-
-	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			"TSO History Buffer: Total size %d, current_index %d",
-			NUM_MAX_TSO_MSDUS,
-			TXRX_STATS_TSO_MSDU_IDX(pdev));
-
-	for (msdu_idx = 0; msdu_idx < NUM_MAX_TSO_MSDUS; msdu_idx++) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			"jumbo pkt idx: %d num segs %d gso_len %d total_len %d nr_frags %d",
-			msdu_idx,
-			TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx),
-			TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, msdu_idx),
-			TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx),
-			TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, msdu_idx));
-
-		for (seg_idx = 0;
-			 ((seg_idx < TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx)) &&
-			  (seg_idx < NUM_MAX_TSO_SEGS));
-			 seg_idx++) {
-			struct qdf_tso_seg_t tso_seg =
-				 TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx);
-
-			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-				 "seg idx: %d", seg_idx);
-			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-				 "tso_enable: %d",
-				 tso_seg.tso_flags.tso_enable);
-			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-				 "fin %d syn %d rst %d psh %d ack %d urg %d ece %d cwr %d ns %d",
-				 tso_seg.tso_flags.fin, tso_seg.tso_flags.syn,
-				 tso_seg.tso_flags.rst, tso_seg.tso_flags.psh,
-				 tso_seg.tso_flags.ack, tso_seg.tso_flags.urg,
-				 tso_seg.tso_flags.ece, tso_seg.tso_flags.cwr,
-				 tso_seg.tso_flags.ns);
-			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-				 "tcp_seq_num: 0x%x ip_id: %d",
-				 tso_seg.tso_flags.tcp_seq_num,
-				 tso_seg.tso_flags.ip_id);
-		}
-	 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, "\n");
-	}
-}
-#else
-static void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
-{
-	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-	 "TSO is not supported\n");
-}
-#endif
-
 /**
  * ol_txrx_stats() - update ol layer stats
  * @vdev_id: vdev_id

+ 39 - 46
core/dp/txrx/ol_txrx_internal.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -711,9 +711,8 @@ NOT_IP_TCP:
 		}                                                             \
 	} while (0)
 
-#define TXRX_STATS_TSO_RESET_MSDU(pdev) \
+#define TXRX_STATS_TSO_RESET_MSDU(pdev, idx) \
 	do { \
-		int idx = TXRX_STATS_TSO_MSDU_IDX(pdev);\
 		pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].num_seg = 0; \
 		pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].tso_seg_idx = 0; \
 	} while (0)
@@ -736,75 +735,69 @@ NOT_IP_TCP:
 #define TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, idx) \
 	pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].nr_frags
 
-#define TXRX_STATS_TSO_CURR_MSDU(pdev) \
-	TXRX_STATS_TSO_MSDU(pdev, TXRX_STATS_TSO_MSDU_IDX(pdev))
+#define TXRX_STATS_TSO_CURR_MSDU(pdev, idx) \
+	TXRX_STATS_TSO_MSDU(pdev, idx)
 
-#define TXRX_STATS_TSO_INC_MSDU_IDX(pdev) \
-	do { \
-		TXRX_STATS_TSO_MSDU_IDX(pdev)++; \
-		TXRX_STATS_TSO_MSDU_IDX(pdev) &= NUM_MAX_TSO_MSDUS_MASK; \
-	} while (0)
-
-#define TXRX_STATS_TSO_SEG_IDX(pdev) \
-	TXRX_STATS_TSO_CURR_MSDU(pdev).tso_seg_idx
+#define TXRX_STATS_TSO_SEG_IDX(pdev, idx) \
+	TXRX_STATS_TSO_CURR_MSDU(pdev, idx).tso_seg_idx
 
-#define TXRX_STATS_TSO_INC_SEG(pdev) \
+#define TXRX_STATS_TSO_INC_SEG(pdev, idx) \
 	do { \
-		TXRX_STATS_TSO_CURR_MSDU(pdev).num_seg++; \
-		TXRX_STATS_TSO_CURR_MSDU(pdev).num_seg &= \
+		TXRX_STATS_TSO_CURR_MSDU(pdev, idx).num_seg++; \
+		TXRX_STATS_TSO_CURR_MSDU(pdev, idx).num_seg &= \
 					 NUM_MAX_TSO_SEGS_MASK; \
 	} while (0)
 
-#define TXRX_STATS_TSO_RST_SEG(pdev) \
-	TXRX_STATS_TSO_CURR_MSDU(pdev).num_seg = 0
+#define TXRX_STATS_TSO_RST_SEG(pdev, idx) \
+	TXRX_STATS_TSO_CURR_MSDU(pdev, idx).num_seg = 0
 
-#define TXRX_STATS_TSO_RST_SEG_IDX(pdev) \
-	TXRX_STATS_TSO_CURR_MSDU(pdev).tso_seg_idx = 0
+#define TXRX_STATS_TSO_RST_SEG_IDX(pdev, idx) \
+	TXRX_STATS_TSO_CURR_MSDU(pdev, idx).tso_seg_idx = 0
 
 #define TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx) \
 	TXRX_STATS_TSO_MSDU(pdev, msdu_idx).tso_segs[seg_idx]
 
-#define TXRX_STATS_TSO_CURR_SEG(pdev) \
-	TXRX_STATS_TSO_SEG(pdev, TXRX_STATS_TSO_MSDU_IDX(pdev), \
-	 TXRX_STATS_TSO_SEG_IDX(pdev)) \
+#define TXRX_STATS_TSO_CURR_SEG(pdev, idx) \
+	TXRX_STATS_TSO_SEG(pdev, idx, \
+	 TXRX_STATS_TSO_SEG_IDX(pdev, idx)) \
 
-#define TXRX_STATS_TSO_INC_SEG_IDX(pdev) \
+#define TXRX_STATS_TSO_INC_SEG_IDX(pdev, idx) \
 	do { \
-		TXRX_STATS_TSO_SEG_IDX(pdev)++; \
-		TXRX_STATS_TSO_SEG_IDX(pdev) &= NUM_MAX_TSO_SEGS_MASK; \
+		TXRX_STATS_TSO_SEG_IDX(pdev, idx)++; \
+		TXRX_STATS_TSO_SEG_IDX(pdev, idx) &= NUM_MAX_TSO_SEGS_MASK; \
 	} while (0)
 
-#define TXRX_STATS_TSO_SEG_UPDATE(pdev, tso_seg) \
-	(TXRX_STATS_TSO_CURR_SEG(pdev) = tso_seg)
+#define TXRX_STATS_TSO_SEG_UPDATE(pdev, idx, tso_seg) \
+	(TXRX_STATS_TSO_CURR_SEG(pdev, idx) = tso_seg)
 
-#define TXRX_STATS_TSO_GSO_SIZE_UPDATE(pdev, size) \
-	(TXRX_STATS_TSO_CURR_MSDU(pdev).gso_size = size)
+#define TXRX_STATS_TSO_GSO_SIZE_UPDATE(pdev, idx, size) \
+	(TXRX_STATS_TSO_CURR_MSDU(pdev, idx).gso_size = size)
 
-#define TXRX_STATS_TSO_TOTAL_LEN_UPDATE(pdev, len) \
-	(TXRX_STATS_TSO_CURR_MSDU(pdev).total_len = len)
+#define TXRX_STATS_TSO_TOTAL_LEN_UPDATE(pdev, idx, len) \
+	(TXRX_STATS_TSO_CURR_MSDU(pdev, idx).total_len = len)
 
-#define TXRX_STATS_TSO_NUM_FRAGS_UPDATE(pdev, frags) \
-	(TXRX_STATS_TSO_CURR_MSDU(pdev).nr_frags = frags)
+#define TXRX_STATS_TSO_NUM_FRAGS_UPDATE(pdev, idx, frags) \
+	(TXRX_STATS_TSO_CURR_MSDU(pdev, idx).nr_frags = frags)
 
 #else
 #define TXRX_STATS_TSO_HISTOGRAM(_pdev, _p_cntrs)  /* no-op */
-#define TXRX_STATS_TSO_RESET_MSDU(pdev) /* no-op */
+#define TXRX_STATS_TSO_RESET_MSDU(pdev, idx) /* no-op */
 #define TXRX_STATS_TSO_MSDU_IDX(pdev) /* no-op */
 #define TXRX_STATS_TSO_MSDU(pdev, idx) /* no-op */
 #define TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, idx) /* no-op */
-#define TXRX_STATS_TSO_CURR_MSDU(pdev) /* no-op */
+#define TXRX_STATS_TSO_CURR_MSDU(pdev, idx) /* no-op */
 #define TXRX_STATS_TSO_INC_MSDU_IDX(pdev) /* no-op */
-#define TXRX_STATS_TSO_SEG_IDX(pdev) /* no-op */
+#define TXRX_STATS_TSO_SEG_IDX(pdev, idx) /* no-op */
 #define TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx) /* no-op */
-#define TXRX_STATS_TSO_CURR_SEG(pdev) /* no-op */
-#define TXRX_STATS_TSO_INC_SEG_IDX(pdev) /* no-op */
-#define TXRX_STATS_TSO_SEG_UPDATE(pdev, tso_seg) /* no-op */
-#define TXRX_STATS_TSO_INC_SEG(pdev) /* no-op */
-#define TXRX_STATS_TSO_RST_SEG(pdev) /* no-op */
-#define TXRX_STATS_TSO_RST_SEG_IDX(pdev) /* no-op */
-#define TXRX_STATS_TSO_GSO_SIZE_UPDATE(pdev, size) /* no-op */
-#define TXRX_STATS_TSO_TOTAL_LEN_UPDATE(pdev, len) /* no-op */
-#define TXRX_STATS_TSO_NUM_FRAGS_UPDATE(pdev, frags) /* no-op */
+#define TXRX_STATS_TSO_CURR_SEG(pdev, idx) /* no-op */
+#define TXRX_STATS_TSO_INC_SEG_IDX(pdev, idx) /* no-op */
+#define TXRX_STATS_TSO_SEG_UPDATE(pdev, idx, tso_seg) /* no-op */
+#define TXRX_STATS_TSO_INC_SEG(pdev, idx) /* no-op */
+#define TXRX_STATS_TSO_RST_SEG(pdev, idx) /* no-op */
+#define TXRX_STATS_TSO_RST_SEG_IDX(pdev, idx) /* no-op */
+#define TXRX_STATS_TSO_GSO_SIZE_UPDATE(pdev, idx, size) /* no-op */
+#define TXRX_STATS_TSO_TOTAL_LEN_UPDATE(pdev, idx, len) /* no-op */
+#define TXRX_STATS_TSO_NUM_FRAGS_UPDATE(pdev, idx, frags) /* no-op */
 #define TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, idx) /* no-op */
 #define TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, idx) /* no-op */
 #define TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, idx) /* no-op */