Browse Source

qcacld-3.0: Fix nbuf user count logic for TSO packets TX

Current mechanism of maintaining nbuf user count to handle
TSO segments during transmission is not proper and causing
double free in corner cases.

Fix this logic by incrementing users count before HW send
and revert the users count if H.W send fails.

Change-Id: I47bd5d3b214705dc7ba12f5e482ee80447831bfe
CRs-Fixed: 3516215
Karthik Kantamneni 1 year ago
parent
commit
72f846f20d
1 changed files with 16 additions and 35 deletions
  1. 16 35
      core/dp/txrx/ol_tx_ll_fastpath.c

+ 16 - 35
core/dp/txrx/ol_tx_ll_fastpath.c

@@ -358,14 +358,6 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
 		 * pointer before the ce_send call.
 		 */
 		next = qdf_nbuf_next(msdu);
-		/*
-		 * Increment the skb->users count here, for this SKB, to make
-		 * sure it will be freed only after receiving Tx completion
-		 * of the last segment.
-		 * Decrement skb->users count before sending last segment
-		 */
-		if (qdf_nbuf_is_tso(msdu) && segments)
-			qdf_nbuf_inc_users(msdu);
 
 		/* init the current segment to the 1st segment in the list */
 		while (segments) {
@@ -406,17 +398,6 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
 			if (qdf_likely(tx_desc)) {
 				struct qdf_tso_seg_elem_t *next_seg;
 
-				/*
-				 * if this is a jumbo nbuf, then increment the
-				 * number of nbuf users for each additional
-				 * segment of the msdu. This will ensure that
-				 * the skb is freed only after receiving tx
-				 * completion for all segments of an nbuf.
-				 */
-				if (segments !=
-					(msdu_info.tso_info.num_segs - 1))
-					qdf_nbuf_inc_users(msdu);
-
 				ol_tx_trace_pkt(msdu, tx_desc->id,
 						vdev->vdev_id,
 						vdev->qdf_opmode);
@@ -433,17 +414,19 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
 						sent_to_target = 1;
 					next_seg = msdu_info.tso_info.
 						curr_seg->next;
+					/*
+					 * If this is a jumbo nbuf, then increment the
+					 * number of nbuf users for each additional
+					 * segment of the msdu. This will ensure that
+					 * the skb is freed only after receiving tx
+					 * completion for all segments of an nbuf
+					 */
+					if (next_seg)
+						qdf_nbuf_inc_users(msdu);
 				} else {
 					next_seg = NULL;
 				}
 
-				/* Decrement the skb-users count if segment
-				 * is the last segment or the only segment
-				 */
-				if (tx_desc->pkt_type == OL_TX_FRM_TSO &&
-				    segments == 0)
-					qdf_nbuf_tx_free(msdu, 0);
-
 				if ((ce_send_fast(pdev->ce_tx_hdl, msdu,
 						  ep_id,
 						  pkt_download_len) == 0)) {
@@ -458,9 +441,12 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
 						tso_info->curr_seg = next_seg;
 						ol_free_remaining_tso_segs(vdev,
 							&msdu_info, true);
-						if (segments ==
-						    (msdu_info.tso_info.num_segs
-						     - 1))
+						/*
+						 * Revert the nbuf users
+						 * increment done for the
+						 * current segment
+						 */
+						if (next_seg)
 							qdf_nbuf_tx_free(
 							msdu,
 							QDF_NBUF_PKT_ERROR);
@@ -490,14 +476,9 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
 				 * If TSO packet, free associated
 				 * remaining TSO segment descriptors
 				 */
-				if (qdf_nbuf_is_tso(msdu)) {
+				if (qdf_nbuf_is_tso(msdu))
 					ol_free_remaining_tso_segs(vdev,
 							&msdu_info, true);
-					if (segments ==
-					    (msdu_info.tso_info.num_segs - 1))
-						qdf_nbuf_tx_free(msdu,
-							 QDF_NBUF_PKT_ERROR);
-				}
 				TXRX_STATS_MSDU_LIST_INCR(
 					pdev, tx.dropped.host_reject, msdu);
 				/* the list of unaccepted MSDUs */