Jelajahi Sumber

qcacmn: Fix defrag waitlist flush issues

Check if peer is valid before access during flush.

Check waitlist during peer cleanup even if REO queue was
not allocated, since these are independent.

Add a timeout to avoid calling flush function too frequently.

Change-Id: Ib8da2014f81a48ccc3ca6a330209a942ac0998a2
Karunakar Dasineni 6 tahun lalu
induk
melakukan
f8ec0cbe57
5 mengubah file dengan 43 tambahan dan 12 penghapusan
  1. 1 0
      dp/wifi3.0/dp_main.c
  2. 5 3
      dp/wifi3.0/dp_peer.c
  3. 29 7
      dp/wifi3.0/dp_rx_defrag.c
  4. 7 2
      dp/wifi3.0/dp_rx_err.c
  5. 1 0
      dp/wifi3.0/dp_types.h

+ 1 - 0
dp/wifi3.0/dp_main.c

@@ -2803,6 +2803,7 @@ static int dp_soc_cmn_setup(struct dp_soc *soc)
 	TAILQ_INIT(&soc->rx.defrag.waitlist);
 	soc->rx.defrag.timeout_ms =
 		wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
+	soc->rx.defrag.next_flush_ms = 0;
 	soc->rx.flags.defrag_timeout_check =
 		wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);

+ 5 - 3
dp/wifi3.0/dp_peer.c

@@ -2086,12 +2086,14 @@ void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
 
 		qdf_spin_lock_bh(&rx_tid->tid_lock);
-		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned != NULL) {
-			dp_rx_tid_delete_wifi3(peer, tid);
-
+		if (!peer->bss_peer) {
 			/* Cleanup defrag related resource */
 			dp_rx_defrag_waitlist_remove(peer, tid);
 			dp_rx_reorder_flush_frag(peer, tid);
+		}
+
+		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
+			dp_rx_tid_delete_wifi3(peer, tid);
 
 			tid_delete_mask |= (1 << tid);
 		}

+ 29 - 7
dp/wifi3.0/dp_rx_defrag.c

@@ -157,7 +157,7 @@ void dp_rx_reorder_flush_frag(struct dp_peer *peer,
  */
 void dp_rx_defrag_waitlist_flush(struct dp_soc *soc)
 {
-	struct dp_rx_tid *rx_reorder;
+	struct dp_rx_tid *rx_reorder = NULL;
 	struct dp_rx_tid *tmp;
 	uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
 	TAILQ_HEAD(, dp_rx_tid) temp_list;
@@ -189,20 +189,39 @@ void dp_rx_defrag_waitlist_flush(struct dp_soc *soc)
 		TAILQ_INSERT_TAIL(&temp_list, rx_reorder,
 				  defrag_waitlist_elem);
 	}
+	if (rx_reorder) {
+		soc->rx.defrag.next_flush_ms =
+			rx_reorder->defrag_timeout_ms;
+	} else {
+		soc->rx.defrag.next_flush_ms =
+			now_ms + soc->rx.defrag.timeout_ms;
+	}
+
 	qdf_spin_unlock_bh(&soc->rx.defrag.defrag_lock);
 
 	TAILQ_FOREACH_SAFE(rx_reorder, &temp_list,
 			   defrag_waitlist_elem, tmp) {
-		struct dp_peer *peer;
+		struct dp_peer *peer, *temp_peer = NULL;
 
+		qdf_spin_lock_bh(&rx_reorder->tid_lock);
+		TAILQ_REMOVE(&temp_list, rx_reorder,
+			     defrag_waitlist_elem);
 		/* get address of current peer */
 		peer =
 			container_of(rx_reorder, struct dp_peer,
 				     rx_tid[rx_reorder->tid]);
-
-		qdf_spin_lock_bh(&rx_reorder->tid_lock);
-		dp_rx_reorder_flush_frag(peer, rx_reorder->tid);
 		qdf_spin_unlock_bh(&rx_reorder->tid_lock);
+
+		temp_peer = dp_peer_find_by_id(soc, peer->peer_ids[0]);
+		if (temp_peer == peer) {
+			qdf_spin_lock_bh(&rx_reorder->tid_lock);
+			dp_rx_reorder_flush_frag(peer, rx_reorder->tid);
+			qdf_spin_unlock_bh(&rx_reorder->tid_lock);
+		}
+
+		if (temp_peer)
+			dp_peer_unref_del_find_by_id(temp_peer);
+
 	}
 }
 
@@ -226,6 +245,8 @@ static void dp_rx_defrag_waitlist_add(struct dp_peer *peer, unsigned tid)
 
 	/* TODO: use LIST macros instead of TAIL macros */
 	qdf_spin_lock_bh(&psoc->rx.defrag.defrag_lock);
+	if (TAILQ_EMPTY(&psoc->rx.defrag.waitlist))
+		psoc->rx.defrag.next_flush_ms = rx_reorder->defrag_timeout_ms;
 	TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, rx_reorder,
 				defrag_waitlist_elem);
 	DP_STATS_INC(psoc, rx.rx_frag_wait, 1);
@@ -246,6 +267,7 @@ void dp_rx_defrag_waitlist_remove(struct dp_peer *peer, unsigned tid)
 	struct dp_pdev *pdev = peer->vdev->pdev;
 	struct dp_soc *soc = pdev->soc;
 	struct dp_rx_tid *rx_reorder;
+	struct dp_rx_tid *tmp;
 
 	if (tid > DP_MAX_TIDS) {
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
@@ -255,8 +277,8 @@ void dp_rx_defrag_waitlist_remove(struct dp_peer *peer, unsigned tid)
 	}
 
 	qdf_spin_lock_bh(&soc->rx.defrag.defrag_lock);
-	TAILQ_FOREACH(rx_reorder, &soc->rx.defrag.waitlist,
-			   defrag_waitlist_elem) {
+	TAILQ_FOREACH_SAFE(rx_reorder, &soc->rx.defrag.waitlist,
+			   defrag_waitlist_elem, tmp) {
 		struct dp_peer *peer_on_waitlist;
 
 		/* get address of current peer */

+ 7 - 2
dp/wifi3.0/dp_rx_err.c

@@ -1201,8 +1201,13 @@ dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
 done:
 	hal_srng_access_end(hal_soc, hal_ring);
 
-	if (soc->rx.flags.defrag_timeout_check)
-		dp_rx_defrag_waitlist_flush(soc);
+	if (soc->rx.flags.defrag_timeout_check) {
+		uint32_t now_ms =
+			qdf_system_ticks_to_msecs(qdf_system_ticks());
+
+		if (now_ms >= soc->rx.defrag.next_flush_ms)
+			dp_rx_defrag_waitlist_flush(soc);
+	}
 
 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
 		if (rx_bufs_reaped[mac_id]) {

+ 1 - 0
dp/wifi3.0/dp_types.h

@@ -898,6 +898,7 @@ struct dp_soc {
 		struct {
 			TAILQ_HEAD(, dp_rx_tid) waitlist;
 			uint32_t timeout_ms;
+			uint32_t next_flush_ms;
 			qdf_spinlock_t defrag_lock;
 		} defrag;
 		struct {