Browse Source

qcacmn: NAWDS multicast issue CR2130039

Multicast frames in NAWDS mode causing flood of packets between the
nawds enabled AP's due to continuous sending of mutlicast frames received
from  another AP and vicevera.Resolved the issues by droppping the 3
address Multicast frames and 4 address multicast frames with sa_peer
same as tx_peer.

Change-Id: I5edbcb060720510eb6ea5f5ffd46402535ba6748
CRs-Fixed: 2130039
Ruchi, Agrawal 7 years ago
parent
commit
bd894b3bb9
5 changed files with 156 additions and 38 deletions
  1. 8 0
      dp/inc/cdp_txrx_cmn_struct.h
  2. 18 0
      dp/wifi3.0/dp_main.c
  3. 13 2
      dp/wifi3.0/dp_rx.c
  4. 14 0
      dp/wifi3.0/dp_rx_err.c
  5. 103 36
      dp/wifi3.0/dp_tx.c

+ 8 - 0
dp/inc/cdp_txrx_cmn_struct.h

@@ -744,6 +744,10 @@ struct cdp_tx_stats {
 	struct cdp_pkt_info ucast;
 	/* Multicast Packet Count */
 	struct cdp_pkt_info mcast;
+	/*NAWDS  Multicast Packet Count */
+	struct cdp_pkt_info nawds_mcast;
+	/*NAWDS  Multicast Drop Count */
+	uint32_t nawds_mcast_drop;
 	/* Successful Tx Packets */
 	struct cdp_pkt_info tx_success;
 	/* Total Tx failure */
@@ -809,6 +813,8 @@ struct cdp_rx_stats {
 	struct cdp_pkt_info wds;
 	/* Raw Pakets received */
 	struct cdp_pkt_info raw;
+	/* Total multicast packets */
+	struct cdp_pkt_info nawds_mcast_drop;
 
 	struct {
 	/* Intra BSS packets received */
@@ -863,6 +869,8 @@ struct cdp_tx_ingress_stats {
 	struct cdp_pkt_info reinject_pkts;
 	/*  Total packets passed to inspect handler */
 	struct cdp_pkt_info inspect_pkts;
+	/*NAWDS  Multicast Packet Count */
+	struct cdp_pkt_info nawds_mcast;
 
 	struct {
 		/* Total Raw packets */

+ 18 - 0
dp/wifi3.0/dp_main.c

@@ -3765,6 +3765,7 @@ static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
 			DP_STATS_AGGR_PKT(pdev, vdev, rx.intra_bss.pkts);
 			DP_STATS_AGGR_PKT(pdev, vdev, rx.intra_bss.fail);
 			DP_STATS_AGGR_PKT(pdev, vdev, rx.raw);
+			DP_STATS_AGGR_PKT(pdev, vdev, tx_i.nawds_mcast);
 
 			DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
 			DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
@@ -3900,6 +3901,11 @@ dp_print_pdev_tx_stats(struct dp_pdev *pdev)
 			pdev->stats.tx_i.inspect_pkts.num);
 	DP_PRINT_STATS("	Bytes = %d",
 			pdev->stats.tx_i.inspect_pkts.bytes);
+	DP_PRINT_STATS("Nawds Multicast:");
+	DP_PRINT_STATS("	Packets = %d",
+			pdev->stats.tx_i.nawds_mcast.num);
+	DP_PRINT_STATS("	Bytes = %d",
+			pdev->stats.tx_i.nawds_mcast.bytes);
 }
 
 /**
@@ -4330,6 +4336,13 @@ static inline void dp_print_peer_stats(struct dp_peer *peer)
 			peer->stats.tx.dropped.fw_rem_notx);
 	DP_PRINT_STATS("Dropped : Age Out = %d",
 			peer->stats.tx.dropped.age_out);
+	DP_PRINT_STATS("NAWDS : ");
+	DP_PRINT_STATS("	Nawds multicast Drop Tx Packet = %d",
+			peer->stats.tx.nawds_mcast_drop);
+	DP_PRINT_STATS("	Nawds multicast  Tx Packet Count = %d",
+			peer->stats.tx.nawds_mcast.num);
+	DP_PRINT_STATS("	Nawds multicast  Tx Packet Bytes = %d",
+			peer->stats.tx.nawds_mcast.bytes);
 
 	DP_PRINT_STATS("Rate Info:");
 
@@ -4406,6 +4419,11 @@ static inline void dp_print_peer_stats(struct dp_peer *peer)
 			peer->stats.rx.non_amsdu_cnt);
 	DP_PRINT_STATS("MSDUs Received As Part of Amsdu = %d",
 			peer->stats.rx.amsdu_cnt);
+	DP_PRINT_STATS("NAWDS : ");
+	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet = %d",
+			peer->stats.rx.nawds_mcast_drop.num);
+	DP_PRINT_STATS("	Nawds multicast Drop Rx Packet Bytes = %d",
+			peer->stats.rx.nawds_mcast_drop.bytes);
 	DP_PRINT_STATS("SGI ="
 			" 0.8us %d"
 			" 0.4us %d"

+ 13 - 2
dp/wifi3.0/dp_rx.c

@@ -376,6 +376,7 @@ dp_rx_intrabss_fwd(struct dp_soc *soc,
 	 * and also check if the source peer and destination peer
 	 * belong to the same vap and destination peer is not bss peer.
 	 */
+
 	if ((hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr) &&
 	   !hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
 		da_idx = hal_rx_msdu_end_da_idx_get(rx_tlv_hdr);
@@ -1190,6 +1191,17 @@ done:
 		}
 
 		pdev = vdev->pdev;
+
+		if (qdf_unlikely((peer->nawds_enabled == true) &&
+			(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) &&
+			(hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr) == false))) {
+			DP_STATS_INC_PKT(peer, rx.nawds_mcast_drop, 1,
+				qdf_nbuf_len(nbuf));
+			qdf_nbuf_free(nbuf);
+			nbuf = next;
+			continue;
+		}
+
 		if (qdf_likely(
 			!hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr)
 			&&
@@ -1343,8 +1355,7 @@ done:
 						nbuf);
 
 			/* Intrabss-fwd */
-			if (dp_rx_check_ap_bridge(vdev) &&
-				!vdev->nawds_enabled)
+			if (dp_rx_check_ap_bridge(vdev))
 				if (dp_rx_intrabss_fwd(soc,
 							peer,
 							rx_tlv_hdr,

+ 14 - 0
dp/wifi3.0/dp_rx_err.c

@@ -475,6 +475,20 @@ dp_rx_null_q_desc_handle(struct dp_soc *soc, struct dp_rx_desc *rx_desc,
 	qdf_spin_unlock_bh(&soc->ast_lock);
 
 skip_mec_check:
+
+	if (qdf_unlikely((peer->nawds_enabled == true) &&
+			hal_rx_msdu_end_da_is_mcbc_get(rx_desc->rx_buf_start)
+			)) {
+		QDF_TRACE(QDF_MODULE_ID_DP,
+					QDF_TRACE_LEVEL_DEBUG,
+					"%s free buffer for multicast packet",
+					 __func__);
+		DP_STATS_INC_PKT(peer, rx.nawds_mcast_drop,
+					1, qdf_nbuf_len(nbuf));
+		qdf_nbuf_free(nbuf);
+		goto fail;
+	}
+
 	/* WDS Source Port Learning */
 	if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))
 		dp_rx_wds_srcport_learn(soc, rx_desc->rx_buf_start, peer, nbuf);

+ 103 - 36
dp/wifi3.0/dp_tx.c

@@ -52,6 +52,9 @@
 /* disable TQM_BYPASS */
 #define TQM_BYPASS_WAR 0
 
+/* invalid peer id for reinject*/
+#define DP_INVALID_PEER 0XFFFE
+
 /**
  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
  * @vdev: DP Virtual device handle
@@ -1060,7 +1063,10 @@ static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
 		goto fail_return;
 	}
 
-	if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
+	if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
+		htt_tcl_metadata = vdev->htt_tcl_metadata;
+		HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
+	} else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
 		HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
 				HTT_TCL_METADATA_TYPE_PEER_BASED);
 		HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
@@ -1395,6 +1401,7 @@ qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
 
 #endif
 
+#ifdef DP_FEATURE_NAWDS_TX
 /**
  * dp_tx_prepare_nawds(): Tramit NAWDS frames
  * @vdev: dp_vdev handle
@@ -1407,14 +1414,36 @@ qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  * return: NULL on success nbuf on failure
  */
 static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
-		uint8_t tid, struct dp_tx_queue *tx_q, uint32_t *meta_data,
-		uint32_t peer_id)
+		uint8_t tid, struct dp_tx_queue *tx_q, uint32_t *meta_data)
 {
 	struct dp_peer *peer = NULL;
+	struct dp_soc *soc = vdev->pdev->soc;
+	struct dp_ast_entry *ast_entry = NULL;
+	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
+	uint16_t peer_id = HTT_INVALID_PEER;
+
+	struct dp_peer *sa_peer = NULL;
 	qdf_nbuf_t nbuf_copy;
+
+	qdf_spin_lock_bh(&(soc->ast_lock));
+	ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost), 0);
+	if (ast_entry)
+		sa_peer = ast_entry->peer;
+
+	qdf_spin_unlock_bh(&(soc->ast_lock));
+
 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
 		if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
-				(peer->nawds_enabled || peer->bss_peer)) {
+				(peer->nawds_enabled)) {
+			if (sa_peer == peer) {
+				QDF_TRACE(QDF_MODULE_ID_DP,
+						QDF_TRACE_LEVEL_DEBUG,
+						" %s: broadcast multicast packet",
+						 __func__);
+				DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
+				continue;
+			}
+
 			nbuf_copy = qdf_nbuf_copy(nbuf);
 			if (!nbuf_copy) {
 				QDF_TRACE(QDF_MODULE_ID_DP,
@@ -1426,17 +1455,19 @@ static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
 			nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy, tid,
 					tx_q, meta_data, peer_id);
 			if (nbuf_copy != NULL) {
-				qdf_nbuf_free(nbuf);
-				return nbuf_copy;
+				qdf_nbuf_free(nbuf_copy);
+				continue;
 			}
+			DP_STATS_INC_PKT(peer, tx.nawds_mcast,
+						1, qdf_nbuf_len(nbuf));
 		}
 	}
 	if (peer_id == HTT_INVALID_PEER)
 		return nbuf;
 
-	qdf_nbuf_free(nbuf);
 	return NULL;
 }
+#endif
 
 /**
  * dp_tx_send() - Transmit a frame on a given VAP
@@ -1578,16 +1609,6 @@ qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
 
 	}
 
-	if (vdev->nawds_enabled) {
-		eh = (struct ether_header *)qdf_nbuf_data(nbuf);
-		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
-			nbuf = dp_tx_prepare_nawds(vdev, nbuf, msdu_info.tid,
-					&msdu_info.tx_queue,
-					msdu_info.meta_data, peer_id);
-			return nbuf;
-		}
-	}
-
 	/*  Single linear frame */
 	/*
 	 * If nbuf is a simple linear frame, use send_single function to
@@ -1624,8 +1645,13 @@ void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
 	qdf_nbuf_t nbuf = tx_desc->nbuf;
 	qdf_nbuf_t nbuf_copy = NULL;
 	struct dp_tx_msdu_info_s msdu_info;
+	struct dp_peer *sa_peer = NULL;
+	struct dp_ast_entry *ast_entry = NULL;
+	struct dp_soc *soc = NULL;
+	struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
 
 	vdev = tx_desc->vdev;
+	soc = vdev->pdev->soc;
 
 	qdf_assert(vdev);
 
@@ -1639,21 +1665,41 @@ void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
 	DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
 			qdf_nbuf_len(tx_desc->nbuf));
 
-	if (!vdev->osif_proxy_arp) {
-		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
-				"function pointer to proxy arp not present\n");
-		return;
-	}
+
+	qdf_spin_lock_bh(&(soc->ast_lock));
+
+	ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost), 0);
+	if (ast_entry)
+		sa_peer = ast_entry->peer;
+
+	qdf_spin_unlock_bh(&(soc->ast_lock));
 
 	if (qdf_unlikely(vdev->mesh_vdev)) {
 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
 	} else {
 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
 			if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
-					(peer->bss_peer || peer->nawds_enabled)
-					&& !(vdev->osif_proxy_arp(
-							vdev->osif_vdev,
-							nbuf))) {
+					((peer->bss_peer &&
+					!(vdev->osif_proxy_arp(
+						vdev->osif_vdev,
+						nbuf))) ||
+					peer->nawds_enabled)) {
+				peer_id = DP_INVALID_PEER;
+
+				if (peer->nawds_enabled) {
+					peer_id = peer->peer_ids[0];
+					if (sa_peer == peer) {
+						QDF_TRACE(
+							QDF_MODULE_ID_DP,
+							QDF_TRACE_LEVEL_DEBUG,
+							" %s: multicast packet",
+							__func__);
+						DP_STATS_INC(peer,
+							tx.nawds_mcast_drop, 1);
+						continue;
+					}
+				}
+
 				nbuf_copy = qdf_nbuf_copy(nbuf);
 
 				if (!nbuf_copy) {
@@ -1663,27 +1709,48 @@ void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
 					break;
 				}
 
-				if (peer->nawds_enabled)
-					peer_id = peer->peer_ids[0];
-				else
-					peer_id = HTT_INVALID_PEER;
-
 				nbuf_copy = dp_tx_send_msdu_single(vdev,
-						nbuf_copy, msdu_info.tid,
+						nbuf_copy,
+						msdu_info.tid,
 						&msdu_info.tx_queue,
-						msdu_info.meta_data, peer_id);
+						msdu_info.meta_data,
+						peer_id);
 
 				if (nbuf_copy) {
 					QDF_TRACE(QDF_MODULE_ID_DP,
-							QDF_TRACE_LEVEL_DEBUG,
-							FL("pkt send failed"));
+						QDF_TRACE_LEVEL_DEBUG,
+						FL("pkt send failed"));
 					qdf_nbuf_free(nbuf_copy);
+				} else {
+					if (peer_id != DP_INVALID_PEER)
+						DP_STATS_INC_PKT(peer,
+							tx.nawds_mcast,
+							1, qdf_nbuf_len(nbuf));
 				}
 			}
 		}
 	}
 
-	qdf_nbuf_free(nbuf);
+	if (vdev->nawds_enabled) {
+		peer_id = DP_INVALID_PEER;
+
+		DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
+					1, qdf_nbuf_len(nbuf));
+
+		nbuf = dp_tx_send_msdu_single(vdev,
+				nbuf, msdu_info.tid,
+				&msdu_info.tx_queue,
+				msdu_info.meta_data, peer_id);
+
+		if (nbuf) {
+			QDF_TRACE(QDF_MODULE_ID_DP,
+				QDF_TRACE_LEVEL_DEBUG,
+				FL("pkt send failed"));
+			qdf_nbuf_free(nbuf);
+		}
+	} else
+		qdf_nbuf_free(nbuf);
+
 	dp_tx_desc_release(tx_desc, tx_desc->pool_id);
 }