Selaa lähdekoodia

qcacmn: Extend tid_lock in rx defrag store fragment path

Currently, we are Flushing fragments for peer as part of rx defrag
clean up. There is the possibility that frag flush is called while
we are accessing frags in rx defrag store fragment path in parallel.
Which will cause access after the free issue.

Extending lock in rx defrag store fragment path to avoid any
parallel flush while accessing the frags.

Change-Id: I1e897b195f61d80ea6738e9a93f7bcaaa04adc97
CRs-Fixed: 3065414
Amit Mehta 3 vuotta sitten
vanhempi
sitoutus
08f31818b9
1 muutettua tiedostoa jossa 15 lisäystä ja 6 poistoa
  1. 15 6
      dp/wifi3.0/dp_rx_defrag.c

+ 15 - 6
dp/wifi3.0/dp_rx_defrag.c

@@ -1734,9 +1734,6 @@ dp_rx_defrag_store_fragment(struct dp_soc *soc,
 		goto discard_frag;
 	}
 
-	pdev = peer->vdev->pdev;
-	rx_tid = &peer->rx_tid[tid];
-
 	mpdu_sequence_control_valid =
 		hal_rx_get_mpdu_sequence_control_valid(soc->hal_soc,
 						       rx_desc->rx_buf_start);
@@ -1771,10 +1768,15 @@ dp_rx_defrag_store_fragment(struct dp_soc *soc,
 	 */
 	fragno = dp_rx_frag_get_mpdu_frag_number(soc, rx_desc->rx_buf_start);
 
+	pdev = peer->vdev->pdev;
+	rx_tid = &peer->rx_tid[tid];
+
+	qdf_spin_lock_bh(&rx_tid->tid_lock);
 	rx_reorder_array_elem = peer->rx_tid[tid].array;
 	if (!rx_reorder_array_elem) {
 		dp_err_rl("Rcvd Fragmented pkt before tid setup for peer %pK",
 			  peer);
+		qdf_spin_unlock_bh(&rx_tid->tid_lock);
 		goto discard_frag;
 	}
 
@@ -1792,6 +1794,7 @@ dp_rx_defrag_store_fragment(struct dp_soc *soc,
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
 			"Rcvd unfragmented pkt on REO Err srng, dropping");
 
+		qdf_spin_unlock_bh(&rx_tid->tid_lock);
 		qdf_assert(0);
 		goto discard_frag;
 	}
@@ -1819,6 +1822,13 @@ dp_rx_defrag_store_fragment(struct dp_soc *soc,
 			rx_tid->curr_seq_num = rxseq;
 		}
 	} else {
+		/* Check if we are processing first fragment if it is
+		 * not first fragment discard fragment.
+		 */
+		if (fragno) {
+			qdf_spin_unlock_bh(&rx_tid->tid_lock);
+			goto discard_frag;
+		}
 		dp_debug("cur rxseq %d\n", rxseq);
 		/* Start of a new sequence */
 		dp_rx_defrag_cleanup(peer, tid);
@@ -1830,11 +1840,9 @@ dp_rx_defrag_store_fragment(struct dp_soc *soc,
 	 * If the earlier sequence was dropped, this will be the fresh start.
 	 * Else, continue with next fragment in a given sequence
 	 */
-	qdf_spin_lock_bh(&rx_tid->tid_lock);
 	status = dp_rx_defrag_fraglist_insert(peer, tid, &rx_reorder_array_elem->head,
 			&rx_reorder_array_elem->tail, frag,
 			&all_frag_present);
-	qdf_spin_unlock_bh(&rx_tid->tid_lock);
 
 	/*
 	 * Currently, we can have only 6 MSDUs per-MPDU, if the current
@@ -1852,6 +1860,7 @@ dp_rx_defrag_store_fragment(struct dp_soc *soc,
 		if (status != QDF_STATUS_SUCCESS) {
 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
 				"%s: Unable to store ring desc !", __func__);
+			qdf_spin_unlock_bh(&rx_tid->tid_lock);
 			goto discard_frag;
 		}
 	} else {
@@ -1880,6 +1889,7 @@ dp_rx_defrag_store_fragment(struct dp_soc *soc,
 
 		dp_rx_defrag_waitlist_add(peer, tid);
 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
+		qdf_spin_unlock_bh(&rx_tid->tid_lock);
 
 		return QDF_STATUS_SUCCESS;
 	}
@@ -1888,7 +1898,6 @@ dp_rx_defrag_store_fragment(struct dp_soc *soc,
 		  "All fragments received for sequence: %d", rxseq);
 
 	/* Process the fragments */
-	qdf_spin_lock_bh(&rx_tid->tid_lock);
 	status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head,
 		rx_reorder_array_elem->tail);
 	if (QDF_IS_STATUS_ERROR(status)) {