Kaynağa Gözat

qcacmn: use txrx_peer in rx and tx paths

Use txrx_peer in rx and tx data paths instead of
main dp peer.

Change-Id: If628543092be220021240b6f25ee43b009592bac
CRs-Fixed: 3095637
Pavankumar Nandeshwar 3 yıl önce
ebeveyn
işleme
98b25a2ee6

+ 1 - 2
dp/inc/cdp_txrx_cmn_struct.h

@@ -944,8 +944,7 @@ typedef QDF_STATUS(*ol_txrx_get_key_fp)(void *osif_dev, uint8_t *key_buf, uint8_
  */
 typedef QDF_STATUS(*ol_txrx_rsim_rx_decap_fp)(void *osif_dev,
 						qdf_nbuf_t *list_head,
-						qdf_nbuf_t *list_tail,
-						uint8_t *peer_mac);
+						qdf_nbuf_t *list_tail);
 
 /* ol_txrx_rx_fp - external tx free function to read per packet stats and
  *                            free tx buffer externally

+ 83 - 62
dp/wifi3.0/be/dp_be_rx.c

@@ -44,7 +44,7 @@ static void
 dp_rx_wds_learn(struct dp_soc *soc,
 		struct dp_vdev *vdev,
 		uint8_t *rx_tlv_hdr,
-		struct dp_peer *peer,
+		struct dp_txrx_peer *txrx_peer,
 		qdf_nbuf_t nbuf,
 		struct hal_rx_msdu_metadata msdu_metadata)
 {
@@ -52,7 +52,7 @@ dp_rx_wds_learn(struct dp_soc *soc,
 	if (qdf_likely(vdev->wds_enabled))
 		dp_rx_wds_srcport_learn(soc,
 				rx_tlv_hdr,
-				peer,
+				txrx_peer,
 				nbuf,
 				msdu_metadata);
 }
@@ -62,40 +62,51 @@ dp_rx_wds_learn(struct dp_soc *soc,
  * dp_wds_ext_peer_learn_be() - function to send event to control
  * path on receiving 1st 4-address frame from backhaul.
  * @soc: DP soc
- * @ta_peer: WDS repeater peer
+ * @ta_txrx_peer: WDS repeater txrx peer
  * @rx_tlv_hdr  : start address of rx tlvs
  *
  * Return: void
  */
 static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc,
-					    struct dp_peer *ta_peer,
+					    struct dp_txrx_peer *ta_txrx_peer,
 					    uint8_t *rx_tlv_hdr)
 {
 	uint8_t wds_ext_src_mac[QDF_MAC_ADDR_SIZE];
+	struct dp_peer *ta_base_peer;
 
 	/* instead of checking addr4 is valid or not in per packet path
 	 * check for init bit, which will be set on reception of
 	 * first addr4 valid packet.
 	 */
-	if (!ta_peer->vdev->wds_ext_enabled ||
-	    qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &ta_peer->wds_ext.init))
+	if (!ta_txrx_peer->vdev->wds_ext_enabled ||
+	    qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
+				&ta_txrx_peer->wds_ext.init))
 		return;
 
 	if (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, rx_tlv_hdr)) {
 		qdf_atomic_test_and_set_bit(WDS_EXT_PEER_INIT_BIT,
-					    &ta_peer->wds_ext.init);
-		qdf_mem_copy(wds_ext_src_mac, &ta_peer->mac_addr.raw[0],
+					    &ta_txrx_peer->wds_ext.init);
+
+		ta_base_peer = dp_peer_get_ref_by_id(soc, ta_txrx_peer->peer_id,
+						     DP_MOD_ID_RX);
+
+		if (!ta_base_peer)
+			return;
+
+		qdf_mem_copy(wds_ext_src_mac, &ta_base_peer->mac_addr.raw[0],
 			     QDF_MAC_ADDR_SIZE);
+		dp_peer_unref_delete(ta_base_peer, DP_MOD_ID_RX);
+
 		soc->cdp_soc.ol_ops->rx_wds_ext_peer_learn(
 						soc->ctrl_psoc,
-						ta_peer->peer_id,
-						ta_peer->vdev->vdev_id,
+						ta_txrx_peer->peer_id,
+						ta_txrx_peer->vdev->vdev_id,
 						wds_ext_src_mac);
 	}
 }
 #else
 static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc,
-					    struct dp_peer *ta_peer,
+					    struct dp_txrx_peer *ta_txrx_peer,
 					    uint8_t *rx_tlv_hdr)
 {
 }
@@ -104,11 +115,11 @@ static void
 dp_rx_wds_learn(struct dp_soc *soc,
 		struct dp_vdev *vdev,
 		uint8_t *rx_tlv_hdr,
-		struct dp_peer *ta_peer,
+		struct dp_txrx_peer *ta_txrx_peer,
 		qdf_nbuf_t nbuf,
 		struct hal_rx_msdu_metadata msdu_metadata)
 {
-	dp_wds_ext_peer_learn_be(soc, ta_peer, rx_tlv_hdr);
+	dp_wds_ext_peer_learn_be(soc, ta_txrx_peer, rx_tlv_hdr);
 }
 #endif
 
@@ -141,7 +152,8 @@ uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
 	uint16_t msdu_len = 0;
 	uint16_t peer_id;
 	uint8_t vdev_id;
-	struct dp_peer *peer;
+	struct dp_txrx_peer *txrx_peer;
+	dp_txrx_ref_handle txrx_ref_handle;
 	struct dp_vdev *vdev;
 	uint32_t pkt_len = 0;
 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
@@ -196,7 +208,7 @@ more_data:
 	nbuf_tail = NULL;
 	deliver_list_head = NULL;
 	deliver_list_tail = NULL;
-	peer = NULL;
+	txrx_peer = NULL;
 	vdev = NULL;
 	num_rx_bufs_reaped = 0;
 	ebuf_head = NULL;
@@ -468,7 +480,7 @@ done:
 
 	dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]);
 	/* Peer can be NULL is case of LFR */
-	if (qdf_likely(peer))
+	if (qdf_likely(txrx_peer))
 		vdev = NULL;
 
 	/*
@@ -491,9 +503,9 @@ done:
 		vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
 		peer_id =  QDF_NBUF_CB_RX_PEER_ID(nbuf);
 
-		if (dp_rx_is_list_ready(deliver_list_head, vdev, peer,
+		if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer,
 					peer_id, vdev_id)) {
-			dp_rx_deliver_to_stack(soc, vdev, peer,
+			dp_rx_deliver_to_stack(soc, vdev, txrx_peer,
 					       deliver_list_head,
 					       deliver_list_tail);
 			deliver_list_head = NULL;
@@ -504,16 +516,19 @@ done:
 		if (qdf_nbuf_is_rx_chfrag_start(nbuf))
 			tid = qdf_nbuf_get_tid_val(nbuf);
 
-		if (qdf_unlikely(!peer)) {
-			peer = dp_peer_get_ref_by_id(soc, peer_id,
-						     DP_MOD_ID_RX);
-		} else if (peer && peer->peer_id != peer_id) {
-			dp_peer_unref_delete(peer, DP_MOD_ID_RX);
-			peer = dp_peer_get_ref_by_id(soc, peer_id,
-						     DP_MOD_ID_RX);
+		if (qdf_unlikely(!txrx_peer)) {
+			txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id,
+							       &txrx_ref_handle,
+							       DP_MOD_ID_RX);
+		} else if (txrx_peer && txrx_peer->peer_id != peer_id) {
+			dp_txrx_peer_unref_delete(txrx_ref_handle,
+						  DP_MOD_ID_RX);
+			txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id,
+							       &txrx_ref_handle,
+							       DP_MOD_ID_RX);
 		}
 
-		if (peer) {
+		if (txrx_peer) {
 			QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
 			qdf_dp_trace_set_track(nbuf, QDF_RX);
 			QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
@@ -523,8 +538,8 @@ done:
 
 		rx_bufs_used++;
 
-		if (qdf_likely(peer)) {
-			vdev = peer->vdev;
+		if (qdf_likely(txrx_peer)) {
+			vdev = txrx_peer->vdev;
 		} else {
 			nbuf->next = NULL;
 			dp_rx_deliver_to_pkt_capture_no_peer(
@@ -654,7 +669,8 @@ done:
 		 * process frame for mulitpass phrase processing
 		 */
 		if (qdf_unlikely(vdev->multipass_en)) {
-			if (dp_rx_multipass_process(peer, nbuf, tid) == false) {
+			if (dp_rx_multipass_process(txrx_peer, nbuf,
+						    tid) == false) {
 				DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1);
 				dp_rx_nbuf_free(nbuf);
 				nbuf = next;
@@ -662,7 +678,7 @@ done:
 			}
 		}
 
-		if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
+		if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
 			dp_rx_err("%pK: Policy Check Drop pkt", soc);
 			DP_STATS_INC(peer, rx.policy_check_drop, 1);
 			tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
@@ -673,7 +689,7 @@ done:
 			continue;
 		}
 
-		if (qdf_unlikely(peer && (peer->nawds_enabled) &&
+		if (qdf_unlikely(txrx_peer && (txrx_peer->nawds_enabled) &&
 				 (qdf_nbuf_is_da_mcbc(nbuf)) &&
 				 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc,
 								rx_tlv_hdr) ==
@@ -688,7 +704,8 @@ done:
 		/*
 		 * Drop non-EAPOL frames from unauthorized peer.
 		 */
-		if (qdf_likely(peer) && qdf_unlikely(!peer->authorize) &&
+		if (qdf_likely(txrx_peer) &&
+		    qdf_unlikely(!txrx_peer->authorize) &&
 		    !qdf_nbuf_is_raw_frame(nbuf)) {
 			bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
 					qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
@@ -735,13 +752,14 @@ done:
 		    qdf_likely(!vdev->mesh_vdev)) {
 			dp_rx_wds_learn(soc, vdev,
 					rx_tlv_hdr,
-					peer,
+					txrx_peer,
 					nbuf,
 					msdu_metadata);
 
 			/* Intrabss-fwd */
 			if (dp_rx_check_ap_bridge(vdev))
-				if (dp_rx_intrabss_fwd_be(soc, peer, rx_tlv_hdr,
+				if (dp_rx_intrabss_fwd_be(soc, txrx_peer,
+							  rx_tlv_hdr,
 							  nbuf,
 							  msdu_metadata)) {
 					nbuf = next;
@@ -758,7 +776,7 @@ done:
 				  nbuf);
 		DP_PEER_TO_STACK_INCC_PKT(peer, 1, QDF_NBUF_CB_RX_PKT_LEN(nbuf),
 					  enh_flag);
-		if (qdf_unlikely(peer->in_twt))
+		if (qdf_unlikely(txrx_peer->in_twt))
 			DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1,
 					 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
 
@@ -767,12 +785,12 @@ done:
 	}
 
 	if (qdf_likely(deliver_list_head)) {
-		if (qdf_likely(peer)) {
+		if (qdf_likely(txrx_peer)) {
 			dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id,
 						     pkt_capture_offload,
 						     deliver_list_head);
 			if (!pkt_capture_offload)
-				dp_rx_deliver_to_stack(soc, vdev, peer,
+				dp_rx_deliver_to_stack(soc, vdev, txrx_peer,
 						       deliver_list_head,
 						       deliver_list_tail);
 		} else {
@@ -786,8 +804,8 @@ done:
 		}
 	}
 
-	if (qdf_likely(peer))
-		dp_peer_unref_delete(peer, DP_MOD_ID_RX);
+	if (qdf_likely(txrx_peer))
+		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
 
 	/*
 	 * If we are processing in near-full condition, there are 3 scenario
@@ -1067,7 +1085,7 @@ static inline void dp_rx_dummy_src_mac(qdf_nbuf_t nbuf)
 
 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
 			    struct dp_vdev *vdev,
-			    struct dp_peer *peer,
+			    struct dp_txrx_peer *peer,
 			    qdf_nbuf_t nbuf)
 {
 	struct dp_vdev *mcast_primary_vdev = NULL;
@@ -1147,14 +1165,14 @@ uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
  * Return: true - MLO forwarding case, false: not
  */
 static inline bool
-dp_rx_intrabss_fwd_mlo_allow(struct dp_peer *ta_peer,
-			     struct dp_peer *da_peer)
+dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer,
+			     struct dp_txrx_peer *da_peer)
 {
 	/* one of TA/DA peer should belong to MLO connection peer,
 	 * only MLD peer type is as expected
 	 */
-	if (!IS_MLO_DP_MLD_PEER(ta_peer) &&
-	    !IS_MLO_DP_MLD_PEER(da_peer))
+	if (!IS_MLO_DP_MLD_TXRX_PEER(ta_peer) &&
+	    !IS_MLO_DP_MLD_TXRX_PEER(da_peer))
 		return false;
 
 	/* TA peer and DA peer's vdev should be partner MLO vdevs */
@@ -1166,8 +1184,8 @@ dp_rx_intrabss_fwd_mlo_allow(struct dp_peer *ta_peer,
 }
 #else
 static inline bool
-dp_rx_intrabss_fwd_mlo_allow(struct dp_peer *ta_peer,
-			     struct dp_peer *da_peer)
+dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer,
+			     struct dp_txrx_peer *da_peer)
 {
 	return false;
 }
@@ -1188,12 +1206,13 @@ dp_rx_intrabss_fwd_mlo_allow(struct dp_peer *ta_peer,
  */
 static bool
 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
-			      struct dp_peer *ta_peer,
+			      struct dp_txrx_peer *ta_peer,
 			      struct hal_rx_msdu_metadata *msdu_metadata,
 			      struct dp_be_intrabss_params *params)
 {
 	uint16_t da_peer_id;
-	struct dp_peer *da_peer;
+	struct dp_txrx_peer *da_peer;
+	dp_txrx_ref_handle txrx_ref_handle;
 
 	if (!qdf_nbuf_is_intra_bss(nbuf))
 		return false;
@@ -1201,12 +1220,12 @@ dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
 	da_peer_id = dp_rx_peer_metadata_peer_id_get_be(
 						params->dest_soc,
 						msdu_metadata->da_idx);
-	da_peer = dp_peer_get_ref_by_id(params->dest_soc, da_peer_id,
-					DP_MOD_ID_RX);
+	da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id,
+					     &txrx_ref_handle, DP_MOD_ID_RX);
 	if (!da_peer)
 		return false;
 	params->tx_vdev_id = da_peer->vdev->vdev_id;
-	dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
+	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
 
 	return true;
 }
@@ -1214,15 +1233,16 @@ dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
 #ifdef WLAN_MLO_MULTI_CHIP
 static bool
 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
-			      struct dp_peer *ta_peer,
+			      struct dp_txrx_peer *ta_peer,
 			      struct hal_rx_msdu_metadata *msdu_metadata,
 			      struct dp_be_intrabss_params *params)
 {
 	uint16_t da_peer_id;
-	struct dp_peer *da_peer;
+	struct dp_txrx_peer *da_peer;
 	bool ret = false;
 	uint8_t dest_chip_id;
 	uint8_t soc_idx;
+	dp_txrx_ref_handle txrx_ref_handle;
 	struct dp_vdev_be *be_vdev =
 		dp_get_be_vdev_from_dp_vdev(ta_peer->vdev);
 	struct dp_soc_be *be_soc =
@@ -1245,8 +1265,8 @@ dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
 
 	da_peer_id = dp_rx_peer_metadata_peer_id_get_be(params->dest_soc,
 							msdu_metadata->da_idx);
-	da_peer = dp_peer_get_ref_by_id(params->dest_soc, da_peer_id,
-					DP_MOD_ID_RX);
+	da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id,
+					     &txrx_ref_handle, DP_MOD_ID_RX);
 	if (!da_peer)
 		return false;
 	/* soc unref if needed */
@@ -1284,19 +1304,20 @@ dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
 	}
 
 rel_da_peer:
-	dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
+	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
 	return ret;
 }
 #else
 static bool
 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
-			      struct dp_peer *ta_peer,
+			      struct dp_txrx_peer *ta_peer,
 			      struct hal_rx_msdu_metadata *msdu_metadata,
 			      struct dp_be_intrabss_params *params)
 {
 	uint16_t da_peer_id;
-	struct dp_peer *da_peer;
+	struct dp_txrx_peer *da_peer;
 	bool ret = false;
+	dp_txrx_ref_handle txrx_ref_handle;
 
 	if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf))
 		return false;
@@ -1305,8 +1326,8 @@ dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
 						params->dest_soc,
 						msdu_metadata->da_idx);
 
-	da_peer = dp_peer_get_ref_by_id(params->dest_soc, da_peer_id,
-					DP_MOD_ID_RX);
+	da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id,
+					     &txrx_ref_handle, DP_MOD_ID_RX);
 	if (!da_peer)
 		return false;
 
@@ -1334,7 +1355,7 @@ dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
 	}
 
 rel_da_peer:
-	dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
+	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
 	return ret;
 }
 #endif /* WLAN_MLO_MULTI_CHIP */
@@ -1351,7 +1372,7 @@ rel_da_peer:
  *
  * Return: true if it is forwarded else false
  */
-bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_peer *ta_peer,
+bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
 			   uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
 			   struct hal_rx_msdu_metadata msdu_metadata)
 {

+ 2 - 2
dp/wifi3.0/be/dp_be_rx.h

@@ -41,7 +41,7 @@ struct dp_be_intrabss_params {
  * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL
  *  pkt with DA not equal to vdev mac addr, fwd is not allowed.
  * @soc: core txrx main context
- * @ta_peer: source peer entry
+ * @ta_txrx_peer: source peer entry
  * @rx_tlv_hdr: start address of rx tlvs
  * @nbuf: nbuf that has to be intrabss forwarded
  * @msdu_metadata: msdu metadata
@@ -50,7 +50,7 @@ struct dp_be_intrabss_params {
  */
 
 bool dp_rx_intrabss_fwd_be(struct dp_soc *soc,
-			   struct dp_peer *ta_peer,
+			   struct dp_txrx_peer *ta_txrx_peer,
 			   uint8_t *rx_tlv_hdr,
 			   qdf_nbuf_t nbuf,
 			   struct hal_rx_msdu_metadata msdu_metadata);

+ 11 - 7
dp/wifi3.0/be/dp_be_tx.c

@@ -182,7 +182,8 @@ void dp_tx_process_htt_completion_be(struct dp_soc *soc,
 	struct dp_vdev *vdev = NULL;
 	struct hal_tx_completion_status ts = {0};
 	uint32_t *htt_desc = (uint32_t *)status;
-	struct dp_peer *peer;
+	struct dp_txrx_peer *txrx_peer;
+	dp_txrx_ref_handle txrx_ref_handle;
 	struct cdp_tid_tx_stats *tid_stats = NULL;
 	struct htt_soc *htt_handle;
 	uint8_t vdev_id;
@@ -276,8 +277,9 @@ void dp_tx_process_htt_completion_be(struct dp_soc *soc,
 		if (tx_status < CDP_MAX_TX_HTT_STATUS)
 			tid_stats->htt_status_cnt[tx_status]++;
 
-		peer = dp_peer_get_ref_by_id(soc, ts.peer_id,
-					     DP_MOD_ID_HTT_COMP);
+		txrx_peer = dp_txrx_peer_get_ref_by_id(soc, ts.peer_id,
+						       &txrx_ref_handle,
+						       DP_MOD_ID_HTT_COMP);
 		if (qdf_likely(peer))
 			dp_tx_update_peer_basic_stats(
 						peer,
@@ -285,12 +287,14 @@ void dp_tx_process_htt_completion_be(struct dp_soc *soc,
 						tx_status,
 						pdev->enhanced_stats_en);
 
-		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, peer, ring_id);
-		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
+		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer,
+					     ring_id);
+		dp_tx_comp_process_desc(soc, tx_desc, &ts, txrx_peer);
 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
 
-		if (qdf_likely(peer))
-			dp_peer_unref_delete(peer, DP_MOD_ID_HTT_COMP);
+		if (qdf_likely(txrx_peer))
+			dp_txrx_peer_unref_delete(txrx_ref_handle,
+						  DP_MOD_ID_HTT_COMP);
 
 		break;
 	}

+ 79 - 57
dp/wifi3.0/dp_rx.c

@@ -755,13 +755,13 @@ qdf_export_symbol(__dp_rx_buffers_replenish);
  *
  * @vdev: vdev on which RAW mode is enabled
  * @nbuf_list: list of RAW pkts to process
- * @peer: peer object from which the pkt is rx
+ * @txrx_peer: peer object from which the pkt is rx
  *
  * Return: void
  */
 void
 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
-					struct dp_peer *peer)
+		  struct dp_txrx_peer *txrx_peer)
 {
 	qdf_nbuf_t deliver_list_head = NULL;
 	qdf_nbuf_t deliver_list_tail = NULL;
@@ -790,7 +790,7 @@ dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
 	}
 
 	vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
-				 &deliver_list_tail, peer->mac_addr.raw);
+				 &deliver_list_tail);
 
 	vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
 }
@@ -798,7 +798,7 @@ dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
 #ifndef QCA_HOST_MODE_WIFI_DISABLED
 #ifndef FEATURE_WDS
 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
-		    struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
+		    struct dp_txrx_peer *ta_peer, qdf_nbuf_t nbuf)
 {
 }
 #endif
@@ -814,7 +814,7 @@ void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
  *
  * Return: bool: true if it is forwarded else false
  */
-bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_peer *ta_peer,
+bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
 			     uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
 			     struct cdp_tid_rx_stats *tid_stats)
 {
@@ -865,7 +865,7 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_peer *ta_peer,
  *
  * Return: bool: true if it is forwarded else false
  */
-bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_peer *ta_peer,
+bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
 			      uint8_t tx_vdev_id,
 			      uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
 			      struct cdp_tid_rx_stats *tid_stats)
@@ -919,7 +919,7 @@ bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_peer *ta_peer,
  * @vdev: DP Virtual device handle
  * @nbuf: Buffer pointer
  * @rx_tlv_hdr: start of rx tlv header
- * @peer: pointer to peer
+ * @txrx_peer: pointer to peer
  *
  * This function allocated memory for mesh receive stats and fill the
  * required stats. Stores the memory address in skb cb.
@@ -928,7 +928,8 @@ bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_peer *ta_peer,
  */
 
 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
-				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
+			   uint8_t *rx_tlv_hdr,
+			   struct dp_txrx_peer *txrx_peer)
 {
 	struct mesh_recv_hdr_s *rx_info = NULL;
 	uint32_t pkt_type;
@@ -938,6 +939,7 @@ void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
 	uint8_t primary_chan_num;
 	uint32_t center_chan_freq;
 	struct dp_soc *soc = vdev->pdev->soc;
+	struct dp_peer *peer;
 
 	/* fill recv mesh stats */
 	rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
@@ -958,18 +960,23 @@ void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
 	if (qdf_nbuf_is_rx_chfrag_end(nbuf))
 		rx_info->rs_flags |= MESH_RX_LAST_MSDU;
 
-	if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) {
-		rx_info->rs_flags |= MESH_RX_DECRYPTED;
-		rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc,
-							  rx_tlv_hdr);
-		if (vdev->osif_get_key)
-			vdev->osif_get_key(vdev->osif_vdev,
-					&rx_info->rs_decryptkey[0],
-					&peer->mac_addr.raw[0],
-					rx_info->rs_keyix);
+	peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, DP_MOD_ID_MESH);
+	if (peer) {
+		if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) {
+			rx_info->rs_flags |= MESH_RX_DECRYPTED;
+			rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc,
+								  rx_tlv_hdr);
+			if (vdev->osif_get_key)
+				vdev->osif_get_key(vdev->osif_vdev,
+						   &rx_info->rs_decryptkey[0],
+						   &peer->mac_addr.raw[0],
+						   rx_info->rs_keyix);
+		}
+
+		rx_info->rs_snr = peer->stats.rx.snr;
+		dp_peer_unref_delete(peer, DP_MOD_ID_MESH);
 	}
 
-	rx_info->rs_snr = peer->stats.rx.snr;
 	rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR;
 
 	soc = vdev->pdev->soc;
@@ -1607,14 +1614,14 @@ static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
  * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta
  * @soc: core txrx main context
  * @vdev: vdev
- * @peer: peer
+ * @txrx_peer: txrx peer
  * @nbuf_head: skb list head
  *
  * Return: true if packet is delivered to netdev per STA.
  */
 static inline bool
 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
-			   struct dp_peer *peer, qdf_nbuf_t nbuf_head)
+			   struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head)
 {
 	/*
 	 * When extended WDS is disabled, frames are sent to AP netdevice.
@@ -1629,11 +1636,11 @@ dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
 	 *    a. Send frame to appropriate netdev using registered fp.
 	 *    b. If fp is NULL, drop the frames.
 	 */
-	if (!peer->wds_ext.init)
+	if (!txrx_peer->wds_ext.init)
 		return false;
 
-	if (peer->osif_rx)
-		peer->osif_rx(peer->wds_ext.osif_peer, nbuf_head);
+	if (txrx_peer->osif_rx)
+		txrx_peer->osif_rx(txrx_peer->wds_ext.osif_peer, nbuf_head);
 	else
 		dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
 
@@ -1643,7 +1650,7 @@ dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
 #else
 static inline bool
 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
-			   struct dp_peer *peer, qdf_nbuf_t nbuf_head)
+			   struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head)
 {
 	return false;
 }
@@ -1674,8 +1681,8 @@ void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
 		return;
 	}
 
-	if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
-		qdf_atomic_dec(&peer->flush_in_progress);
+	if (qdf_atomic_inc_return(&peer->txrx_peer->flush_in_progress) > 1) {
+		qdf_atomic_dec(&peer->txrx_peer->flush_in_progress);
 		return;
 	}
 
@@ -1686,7 +1693,7 @@ void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
 		drop = true;
 	qdf_spin_unlock_bh(&peer->peer_info_lock);
 
-	bufqi = &peer->bufq_info;
+	bufqi = &peer->txrx_peer->bufq_info;
 
 	qdf_spin_lock_bh(&bufqi->bufq_lock);
 	qdf_list_remove_front(&bufqi->cached_bufq,
@@ -1714,7 +1721,7 @@ void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
 				      (qdf_list_node_t **)&cache_buf);
 	}
 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
-	qdf_atomic_dec(&peer->flush_in_progress);
+	qdf_atomic_dec(&peer->txrx_peer->flush_in_progress);
 }
 
 /**
@@ -1725,26 +1732,37 @@ void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  * Return: None
  */
 static QDF_STATUS
-dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
+dp_rx_enqueue_rx(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t rx_buf_list)
 {
 	struct dp_rx_cached_buf *cache_buf;
-	struct dp_peer_cached_bufq *bufqi = &peer->bufq_info;
+	struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info;
 	int num_buff_elem;
+	QDF_STATUS ret = QDF_STATUS_SUCCESS;
+	struct dp_peer *peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
+						     DP_MOD_ID_RX);
+
+	if (!peer) {
+		bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
+						      rx_buf_list);
+		return QDF_STATUS_E_INVAL;
+	}
 
 	dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries,
 		    bufqi->dropped);
 	if (!peer->valid) {
-		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
+		bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
 						      rx_buf_list);
-		return QDF_STATUS_E_INVAL;
+		ret = QDF_STATUS_E_INVAL;
+		goto fail;
 	}
 
 	qdf_spin_lock_bh(&bufqi->bufq_lock);
 	if (bufqi->entries >= bufqi->thresh) {
-		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
+		bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
 						      rx_buf_list);
 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
-		return QDF_STATUS_E_RESOURCES;
+		ret = QDF_STATUS_E_RESOURCES;
+		goto fail;
 	}
 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
 
@@ -1754,9 +1772,10 @@ dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
 	if (!cache_buf) {
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
 			  "Failed to allocate buf to cache rx frames");
-		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
+		bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
 						      rx_buf_list);
-		return QDF_STATUS_E_NOMEM;
+		ret = QDF_STATUS_E_NOMEM;
+		goto fail;
 	}
 
 	cache_buf->buf = rx_buf_list;
@@ -1767,7 +1786,9 @@ dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
 	bufqi->entries += num_buff_elem;
 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
 
-	return QDF_STATUS_SUCCESS;
+fail:
+	dp_peer_unref_delete(peer, DP_MOD_ID_RX);
+	return ret;
 }
 
 static inline
@@ -1783,7 +1804,7 @@ bool dp_rx_is_peer_cache_bufq_supported(void)
 }
 
 static inline QDF_STATUS
-dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
+dp_rx_enqueue_rx(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t rx_buf_list)
 {
 	return QDF_STATUS_SUCCESS;
 }
@@ -1803,11 +1824,11 @@ dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
  */
 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
 					  struct dp_vdev *vdev,
-					  struct dp_peer *peer,
+					  struct dp_txrx_peer *txrx_peer,
 					  qdf_nbuf_t nbuf_head)
 {
 	if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev,
-						    peer, nbuf_head)))
+						    txrx_peer, nbuf_head)))
 		return;
 
 	/* Function pointer initialized only when FISA is enabled */
@@ -1824,7 +1845,7 @@ static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
  * using the appropriate call back functions.
  * @soc: soc
  * @vdev: vdev
- * @peer: peer
+ * @txrx_peer: txrx peer
  * @nbuf_head: skb list head
  * @nbuf_tail: skb list tail
  *
@@ -1835,7 +1856,7 @@ static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
  */
 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
 					  struct dp_vdev *vdev,
-					  struct dp_peer *peer,
+					  struct dp_txrx_peer *txrx_peer,
 					  qdf_nbuf_t nbuf_head)
 {
 	int num_nbuf = 0;
@@ -1851,8 +1872,8 @@ static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
 	if (!QDF_IS_STATUS_SUCCESS(ret_val)) {
 		num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
 		DP_STATS_INC(soc, rx.err.rejected, num_nbuf);
-		if (peer)
-			DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf);
+		if (txrx_peer)
+			DP_STATS_DEC(txrx_peer, to_stack.num, num_nbuf);
 	}
 }
 #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */
@@ -1861,7 +1882,7 @@ static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
  * dp_rx_validate_rx_callbacks() - validate rx callbacks
  * @soc DP soc
  * @vdev: DP vdev handle
- * @peer: pointer to the peer object
+ * @txrx_peer: pointer to the txrx peer object
  * nbuf_head: skb list head
  *
  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
@@ -1870,7 +1891,7 @@ static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
 static inline QDF_STATUS
 dp_rx_validate_rx_callbacks(struct dp_soc *soc,
 			    struct dp_vdev *vdev,
-			    struct dp_peer *peer,
+			    struct dp_txrx_peer *txrx_peer,
 			    qdf_nbuf_t nbuf_head)
 {
 	int num_nbuf;
@@ -1891,8 +1912,8 @@ dp_rx_validate_rx_callbacks(struct dp_soc *soc,
 	 * callback function. if so let us free the nbuf_list.
 	 */
 	if (qdf_unlikely(!vdev->osif_rx)) {
-		if (peer && dp_rx_is_peer_cache_bufq_supported()) {
-			dp_rx_enqueue_rx(peer, nbuf_head);
+		if (txrx_peer && dp_rx_is_peer_cache_bufq_supported()) {
+			dp_rx_enqueue_rx(txrx_peer, nbuf_head);
 		} else {
 			num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev,
 							nbuf_head);
@@ -1907,21 +1928,21 @@ dp_rx_validate_rx_callbacks(struct dp_soc *soc,
 
 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc,
 				  struct dp_vdev *vdev,
-				  struct dp_peer *peer,
+				  struct dp_txrx_peer *txrx_peer,
 				  qdf_nbuf_t nbuf_head,
 				  qdf_nbuf_t nbuf_tail)
 {
-	if (dp_rx_validate_rx_callbacks(soc, vdev, peer, nbuf_head) !=
+	if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) !=
 					QDF_STATUS_SUCCESS)
 		return QDF_STATUS_E_FAILURE;
 
 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
 			(vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
 		vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
-				&nbuf_tail, peer->mac_addr.raw);
+					 &nbuf_tail);
 	}
 
-	dp_rx_check_delivery_to_stack(soc, vdev, peer, nbuf_head);
+	dp_rx_check_delivery_to_stack(soc, vdev, txrx_peer, nbuf_head);
 
 	return QDF_STATUS_SUCCESS;
 }
@@ -1929,11 +1950,11 @@ QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc,
 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
 					struct dp_vdev *vdev,
-					struct dp_peer *peer,
+					struct dp_txrx_peer *txrx_peer,
 					qdf_nbuf_t nbuf_head,
 					qdf_nbuf_t nbuf_tail)
 {
-	if (dp_rx_validate_rx_callbacks(soc, vdev, peer, nbuf_head) !=
+	if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) !=
 					QDF_STATUS_SUCCESS)
 		return QDF_STATUS_E_FAILURE;
 
@@ -2088,7 +2109,7 @@ void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
 #ifndef WDS_VENDOR_EXTENSION
 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
 			   struct dp_vdev *vdev,
-			   struct dp_peer *peer)
+			   struct dp_txrx_peer *txrx_peer)
 {
 	return 1;
 }
@@ -2838,7 +2859,8 @@ dp_rx_pdev_buffers_free(struct dp_pdev *pdev)
 }
 
 #ifdef DP_RX_SPECIAL_FRAME_NEED
-bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
+bool dp_rx_deliver_special_frame(struct dp_soc *soc,
+				 struct dp_txrx_peer *txrx_peer,
 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
 				 uint8_t *rx_tlv_hdr)
 {
@@ -2865,7 +2887,7 @@ bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
 		dp_info("special frame, mpdu sn 0x%x",
 			hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
 		qdf_nbuf_set_exc_frame(nbuf, 1);
-		dp_rx_deliver_to_stack(soc, peer->vdev, peer,
+		dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer,
 				       nbuf, NULL);
 		return true;
 	}

+ 71 - 47
dp/wifi3.0/dp_rx.h

@@ -236,7 +236,7 @@ bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
  *
  * return: true - nbuf has been delivered to stack, false - not.
  */
-bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
+bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer,
 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
 				 uint8_t *rx_tlv_hdr);
 #else
@@ -247,7 +247,7 @@ bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
 }
 
 static inline
-bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
+bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer,
 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
 				 uint8_t *rx_tlv_hdr)
 {
@@ -258,18 +258,20 @@ bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
 #ifndef QCA_HOST_MODE_WIFI_DISABLED
 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING
 static inline
-bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
+bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer,
+				 qdf_nbuf_t nbuf)
 {
-	if (ta_peer->vdev->opmode == wlan_op_mode_ndi &&
+	if (ta_txrx_peer->vdev->opmode == wlan_op_mode_ndi &&
 	    qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
-		DP_STATS_INC(ta_peer, rx.intra_bss.mdns_no_fwd, 1);
+		DP_STATS_INC(ta_txrx_peer, rx.intra_bss.mdns_no_fwd, 1);
 		return false;
 	}
 		return true;
 }
 #else
 static inline
-bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
+bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer,
+				 qdf_nbuf_t nbuf)
 {
 	return true;
 }
@@ -788,7 +790,7 @@ void dp_rx_desc_pool_free(struct dp_soc *soc,
 			  struct rx_desc_pool *rx_desc_pool);
 
 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
-				struct dp_peer *peer);
+				struct dp_txrx_peer *peer);
 
 #ifdef RX_DESC_LOGGING
 /*
@@ -899,7 +901,7 @@ uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
 		qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id);
 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
-			     uint8_t *rx_tlv_hdr, struct dp_peer *peer);
+			     uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer);
 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
 		       uint16_t peer_id, uint8_t tid);
 
@@ -1116,7 +1118,7 @@ static inline bool dp_nbuf_dst_addr_is_self_addr(struct dp_vdev *vdev,
  * dp_rx_intrabss_eapol_drop_check() - API For EAPOL
  *  pkt with DA not equal to vdev mac addr, fwd is not allowed.
  * @soc: core txrx main context
- * @ta_peer: source peer entry
+ * @ta_txrx_peer: source peer entry
  * @rx_tlv_hdr: start address of rx tlvs
  * @nbuf: nbuf that has to be intrabss forwarded
  *
@@ -1124,12 +1126,14 @@ static inline bool dp_nbuf_dst_addr_is_self_addr(struct dp_vdev *vdev,
  */
 static inline
 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc,
-				     struct dp_peer *ta_peer,
+				     struct dp_txrx_peer *ta_txrx_peer,
 				     uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf)
 {
 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) &&
-			 !(dp_nbuf_dst_addr_is_self_addr(ta_peer->vdev, nbuf) ||
-			   dp_nbuf_dst_addr_is_mld_addr(ta_peer->vdev, nbuf)))) {
+			 !(dp_nbuf_dst_addr_is_self_addr(ta_txrx_peer->vdev,
+							 nbuf) ||
+			   dp_nbuf_dst_addr_is_mld_addr(ta_txrx_peer->vdev,
+							nbuf)))) {
 		qdf_nbuf_free(nbuf);
 		DP_STATS_INC(soc, rx.err.intrabss_eapol_drop, 1);
 		return true;
@@ -1141,18 +1145,20 @@ bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc,
 
 static inline
 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc,
-				     struct dp_peer *ta_peer,
+				     struct dp_txrx_peer *ta_txrx_peer,
 				     uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf)
 {
 	return false;
 }
 #endif /* DISABLE_EAPOL_INTRABSS_FWD */
 
-bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_peer *ta_peer,
+bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc,
+			     struct dp_txrx_peer *ta_txrx_peer,
 			     uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
 			     struct cdp_tid_rx_stats *tid_stats);
 
-bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_peer *ta_peer,
+bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc,
+			      struct dp_txrx_peer *ta_txrx_peer,
 			      uint8_t tx_vdev_id,
 			      uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
 			      struct cdp_tid_rx_stats *tid_stats);
@@ -1191,7 +1197,7 @@ static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
 
 #ifndef FEATURE_WDS
 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
-		    struct dp_peer *ta_peer, qdf_nbuf_t nbuf);
+		    struct dp_txrx_peer *ta_txrx_peer, qdf_nbuf_t nbuf);
 
 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
 {
@@ -1201,7 +1207,7 @@ static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_id
 static inline void
 dp_rx_wds_srcport_learn(struct dp_soc *soc,
 			uint8_t *rx_tlv_hdr,
-			struct dp_peer *ta_peer,
+			struct dp_txrx_peer *txrx_peer,
 			qdf_nbuf_t nbuf,
 			struct hal_rx_msdu_metadata msdu_metadata)
 {
@@ -1490,7 +1496,7 @@ QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
 					uint8_t *rx_tlv_hdr);
 
 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
-			   struct dp_peer *peer);
+			   struct dp_txrx_peer *peer);
 
 /*
  * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
@@ -1635,17 +1641,18 @@ void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
 				bool is_mon_dest_desc);
 
 void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
-			     uint8_t *rx_tlv_hdr, struct dp_peer *peer,
+			     uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer,
 			     uint8_t err_code, uint8_t mac_id);
 
 #ifndef QCA_MULTIPASS_SUPPORT
 static inline
-bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid)
+bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf,
+			     uint8_t tid)
 {
 	return false;
 }
 #else
-bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf,
+bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf,
 			     uint8_t tid);
 #endif
 
@@ -1668,7 +1675,7 @@ QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev,
  * Caller to hold peer refcount and check for valid peer
  * @soc: soc
  * @vdev: vdev
- * @peer: peer
+ * @txrx_peer: txrx peer
  * @nbuf_head: skb list head
  * @nbuf_tail: skb list tail
  *
@@ -1676,7 +1683,7 @@ QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev,
  */
 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc,
 				  struct dp_vdev *vdev,
-				  struct dp_peer *peer,
+				  struct dp_txrx_peer *peer,
 				  qdf_nbuf_t nbuf_head,
 				  qdf_nbuf_t nbuf_tail);
 
@@ -1694,7 +1701,7 @@ QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc,
  */
 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
 					struct dp_vdev *vdev,
-					struct dp_peer *peer,
+					struct dp_txrx_peer *peer,
 					qdf_nbuf_t nbuf_head,
 					qdf_nbuf_t nbuf_tail);
 #endif
@@ -1866,12 +1873,12 @@ dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
  *
  */
 bool dp_rx_mcast_echo_check(struct dp_soc *soc,
-			    struct dp_peer *peer,
+			    struct dp_txrx_peer *peer,
 			    uint8_t *rx_tlv_hdr,
 			    qdf_nbuf_t nbuf);
 #else
 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
-					  struct dp_peer *peer,
+					  struct dp_txrx_peer *peer,
 					  uint8_t *rx_tlv_hdr,
 					  qdf_nbuf_t nbuf)
 {
@@ -2055,7 +2062,7 @@ void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
 			   and 3-address frames
  * @nbuf_head: skb list head
  * @vdev: vdev
- * @peer: peer
+* @txrx_peer : txrx_peer
  * @peer_id: peer id of new received frame
  * @vdev_id: vdev_id of new received frame
  *
@@ -2064,11 +2071,11 @@ void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
 static inline bool
 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
 		    struct dp_vdev *vdev,
-		    struct dp_peer *peer,
+		    struct dp_txrx_peer *txrx_peer,
 		    uint16_t peer_id,
 		    uint8_t vdev_id)
 {
-	if (nbuf_head && peer && (peer->peer_id != peer_id))
+	if (nbuf_head && txrx_peer && txrx_peer->peer_id != peer_id)
 		return true;
 
 	return false;
@@ -2077,7 +2084,7 @@ dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
 static inline bool
 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
 		    struct dp_vdev *vdev,
-		    struct dp_peer *peer,
+		    struct dp_txrx_peer *txrx_peer,
 		    uint16_t peer_id,
 		    uint8_t vdev_id)
 {
@@ -2382,22 +2389,39 @@ void dp_rx_nbuf_free(qdf_nbuf_t nbuf)
 }
 #endif
 
-static inline
-struct dp_peer *dp_rx_get_peer_and_vdev(struct dp_soc *soc,
-					qdf_nbuf_t nbuf,
-					uint16_t peer_id,
-					bool pkt_capture_offload,
-					struct dp_vdev **vdev,
-					struct dp_pdev **rx_pdev,
-					uint32_t *dsf,
-					uint32_t *old_tid)
-{
-	struct dp_peer *peer = NULL;
-
-	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX);
-
-	if (qdf_likely(peer)) {
-		*vdev = peer->vdev;
+/**
+ * dp_rx_get_txrx_peer_and_vdev() - Get txrx peer and vdev from peer id
+ * @nbuf : pointer to the first msdu of an amsdu.
+ * @peer_id : Peer id of the peer
+ * @txrx_ref_handle : Buffer to save the handle for txrx peer's reference
+ * @pkt_capture_offload : Flag indicating if pkt capture offload is needed
+ * @vdev : Buffer to hold pointer to vdev
+ * @rx_pdev : Buffer to hold pointer to rx pdev
+ * @dsf : delay stats flag
+ * @old_tid : Old tid
+ *
+ * Get txrx peer and vdev from peer id
+ *
+ * Return: Pointer to txrx peer
+ */
+static inline struct dp_txrx_peer *
+dp_rx_get_txrx_peer_and_vdev(struct dp_soc *soc,
+			     qdf_nbuf_t nbuf,
+			     uint16_t peer_id,
+			     dp_txrx_ref_handle *txrx_ref_handle,
+			     bool pkt_capture_offload,
+			     struct dp_vdev **vdev,
+			     struct dp_pdev **rx_pdev,
+			     uint32_t *dsf,
+			     uint32_t *old_tid)
+{
+	struct dp_txrx_peer *txrx_peer = NULL;
+
+	txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, txrx_ref_handle,
+					       DP_MOD_ID_RX);
+
+	if (qdf_likely(txrx_peer)) {
+		*vdev = txrx_peer->vdev;
 	} else {
 		nbuf->next = NULL;
 		dp_rx_deliver_to_pkt_capture_no_peer(soc, nbuf,
@@ -2419,7 +2443,7 @@ struct dp_peer *dp_rx_get_peer_and_vdev(struct dp_soc *soc,
 	*old_tid = 0xff;
 
 end:
-	return peer;
+	return txrx_peer;
 }
 
 static inline QDF_STATUS

+ 3 - 3
dp/wifi3.0/dp_rx_err.c

@@ -1788,7 +1788,7 @@ fail:
 	defined(WLAN_MCAST_MLO)
 static bool dp_rx_igmp_handler(struct dp_soc *soc,
 			       struct dp_vdev *vdev,
-			       struct dp_peer *peer,
+			       struct dp_txrx_peer *peer,
 			       qdf_nbuf_t nbuf)
 {
 	if (soc->arch_ops.dp_rx_mcast_handler) {
@@ -1800,7 +1800,7 @@ static bool dp_rx_igmp_handler(struct dp_soc *soc,
 #else
 static bool dp_rx_igmp_handler(struct dp_soc *soc,
 			       struct dp_vdev *vdev,
-			       struct dp_peer *peer,
+			       struct dp_txrx_peer *peer,
 			       qdf_nbuf_t nbuf)
 {
 	return false;
@@ -1879,7 +1879,7 @@ dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
 				   soc->rx_pkt_tlv_size));
 
-	if (dp_rx_igmp_handler(soc, vdev, peer, nbuf))
+	if (dp_rx_igmp_handler(soc, vdev, txrx_peer, nbuf))
 		return;
 
 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);

+ 57 - 35
dp/wifi3.0/dp_tx.c

@@ -3120,6 +3120,7 @@ void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
 	uint16_t sa_peer_id = DP_INVALID_PEER;
 	struct dp_ast_entry *ast_entry = NULL;
 	qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
+	struct dp_txrx_peer *txrx_peer;
 
 	if (!soc->ast_offload_support) {
 		if (qdf_nbuf_get_tx_ftype(nbuf) == CB_FTYPE_INTRABSS_FWD) {
@@ -3140,7 +3141,10 @@ void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
 
 	qdf_spin_lock_bh(&vdev->peer_list_lock);
 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
-		if (!peer->bss_peer && peer->nawds_enabled) {
+		txrx_peer = dp_get_txrx_peer(peer);
+		qdf_assert_always(txrx_peer);
+
+		if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) {
 			peer_id = peer->peer_id;
 			/* Multicast packets needs to be
 			 * dropped in case of intra bss forwarding
@@ -3481,6 +3485,7 @@ void dp_tx_reinject_handler(struct dp_soc *soc,
 	qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
 	struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
 #endif
+	struct dp_txrx_peer *txrx_peer;
 
 	qdf_assert(vdev);
 
@@ -3511,7 +3516,10 @@ void dp_tx_reinject_handler(struct dp_soc *soc,
 
 	qdf_spin_lock_bh(&vdev->peer_list_lock);
 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
-		if (peer->bss_peer)
+		txrx_peer = dp_get_txrx_peer(peer);
+		qdf_assert_always(txrx_peer);
+
+		if (txrx_peer->bss_peer)
 			continue;
 
 		/* Detect wds peers that use 3-addr framing for mcast.
@@ -3520,7 +3528,8 @@ void dp_tx_reinject_handler(struct dp_soc *soc,
 		 * peers that use 4-addr framing for mcast frames will
 		 * be duplicated and sent as 4-addr frames below.
 		 */
-		if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
+		if (!txrx_peer->wds_enabled ||
+		    !txrx_peer->wds_ecm.wds_tx_mcast_4addr) {
 			num_peers_3addr = 1;
 			break;
 		}
@@ -3533,7 +3542,10 @@ void dp_tx_reinject_handler(struct dp_soc *soc,
 	} else {
 		qdf_spin_lock_bh(&vdev->peer_list_lock);
 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
-			if ((peer->peer_id != HTT_INVALID_PEER) &&
+			txrx_peer = dp_get_txrx_peer(peer);
+			qdf_assert_always(txrx_peer);
+
+			if ((txrx_peer->peer_id != HTT_INVALID_PEER) &&
 #ifdef WDS_VENDOR_EXTENSION
 			/*
 			 * . if 3-addr STA, then send on BSS Peer
@@ -3542,12 +3554,13 @@ void dp_tx_reinject_handler(struct dp_soc *soc,
 			 * . if Peer WDS enabled and accept 4-addr ucast,
 			 * send ucast on that peer only
 			 */
-			((peer->bss_peer && num_peers_3addr && is_mcast) ||
-			 (peer->wds_enabled &&
-				  ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
-				   (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
+			((txrx_peer->bss_peer && num_peers_3addr && is_mcast) ||
+			 (txrx_peer->wds_enabled &&
+			 ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) ||
+			 (is_ucast &&
+			 txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) {
 #else
-			(peer->bss_peer &&
+			(txrx_peer->bss_peer &&
 			 (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
 #endif
 				peer_id = DP_INVALID_PEER;
@@ -3775,12 +3788,12 @@ void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
 
 #ifdef DISABLE_DP_STATS
 static
-inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
+inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *peer)
 {
 }
 #else
-static
-inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
+static inline void
+dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer)
 {
 	enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
 
@@ -4116,9 +4129,10 @@ void
 dp_tx_comp_process_desc(struct dp_soc *soc,
 			struct dp_tx_desc_s *desc,
 			struct hal_tx_completion_status *ts,
-			struct dp_peer *peer)
+			struct dp_txrx_peer *txrx_peer)
 {
 	uint64_t time_latency = 0;
+	uint16_t peer_id = DP_INVALID_PEER_ID;
 
 	/*
 	 * m_copy/tx_capture modes are not supported for
@@ -4139,16 +4153,18 @@ dp_tx_comp_process_desc(struct dp_soc *soc,
 
 	if (!(desc->msdu_ext_desc)) {
 		dp_tx_enh_unmap(soc, desc);
+		if (txrx_peer)
+			peer_id = txrx_peer->peer_id;
 
 		if (QDF_STATUS_SUCCESS ==
-		    dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer)) {
+		    dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer_id)) {
 			return;
 		}
 
 		if (QDF_STATUS_SUCCESS ==
 		    dp_get_completion_indication_for_stack(soc,
 							   desc->pdev,
-							   peer, ts,
+							   txrx_peer, ts,
 							   desc->nbuf,
 							   time_latency)) {
 			dp_send_completion_to_stack(soc,
@@ -4345,7 +4361,7 @@ void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
  * @soc: DP soc handle
  * @tx_desc: software descriptor head pointer
  * @ts: Tx completion status
- * @peer: peer handle
+ * @txrx_peer: txrx peer handle
  * @ring_id: ring number
  *
  * Return: none
@@ -4353,7 +4369,8 @@ void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
 				  struct dp_tx_desc_s *tx_desc,
 				  struct hal_tx_completion_status *ts,
-				  struct dp_peer *peer, uint8_t ring_id)
+				  struct dp_txrx_peer *txrx_peer,
+				  uint8_t ring_id)
 {
 	uint32_t length;
 	qdf_ether_header_t *eh;
@@ -4411,12 +4428,12 @@ void dp_tx_comp_process_tx_status(struct dp_soc *soc,
 	DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
 			(ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
 
-	if (!peer) {
+	if (!txrx_peer) {
 		dp_info_rl("peer is null or deletion in progress");
 		DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
 		goto out;
 	}
-	vdev = peer->vdev;
+	vdev = txrx_peer->vdev;
 
 	dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
 	dp_tx_update_uplink_delay(soc, vdev, ts);
@@ -4427,12 +4444,13 @@ void dp_tx_comp_process_tx_status(struct dp_soc *soc,
 		dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
 
 	/* Update peer level stats */
-	if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
+	if (qdf_unlikely(txrx_peer->bss_peer &&
+			 vdev->opmode == wlan_op_mode_ap)) {
 		if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
 			DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
 
-			if ((peer->vdev->tx_encap_type ==
-				htt_cmn_pkt_type_ethernet) &&
+			if (txrx_peer->vdev->tx_encap_type ==
+				htt_cmn_pkt_type_ethernet &&
 				QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
 				DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
 			}
@@ -4441,7 +4459,7 @@ void dp_tx_comp_process_tx_status(struct dp_soc *soc,
 		DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
 		if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
 			DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
-			if (qdf_unlikely(peer->in_twt)) {
+			if (qdf_unlikely(txrx_peer->in_twt)) {
 				DP_STATS_INC_PKT(peer,
 						 tx.tx_success_twt,
 						 1, length);
@@ -4551,9 +4569,10 @@ dp_tx_comp_process_desc_list(struct dp_soc *soc,
 	struct dp_tx_desc_s *desc;
 	struct dp_tx_desc_s *next;
 	struct hal_tx_completion_status ts;
-	struct dp_peer *peer = NULL;
+	struct dp_txrx_peer *txrx_peer = NULL;
 	uint16_t peer_id = DP_INVALID_PEER;
 	qdf_nbuf_t netbuf;
+	dp_txrx_ref_handle txrx_ref_handle;
 
 	desc = comp_head;
 
@@ -4562,12 +4581,14 @@ dp_tx_comp_process_desc_list(struct dp_soc *soc,
 		dp_tx_prefetch_next_nbuf_data(next);
 
 		if (peer_id != desc->peer_id) {
-			if (peer)
-				dp_peer_unref_delete(peer,
-						     DP_MOD_ID_TX_COMP);
+			if (txrx_peer)
+				dp_txrx_peer_unref_delete(txrx_ref_handle,
+							  DP_MOD_ID_TX_COMP);
 			peer_id = desc->peer_id;
-			peer = dp_peer_get_ref_by_id(soc, peer_id,
-						     DP_MOD_ID_TX_COMP);
+			txrx_peer =
+				dp_txrx_peer_get_ref_by_id(soc, peer_id,
+							   &txrx_ref_handle,
+							   DP_MOD_ID_TX_COMP);
 		}
 
 		if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
@@ -4595,21 +4616,22 @@ dp_tx_comp_process_desc_list(struct dp_soc *soc,
 		}
 		hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
 
-		dp_tx_comp_process_tx_status(soc, desc, &ts, peer, ring_id);
+		dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer,
+					     ring_id);
 
 		netbuf = desc->nbuf;
 		/* check tx complete notification */
-		if (peer && qdf_nbuf_tx_notify_comp_get(netbuf))
-			dp_tx_notify_completion(soc, peer->vdev, desc,
+		if (txrx_peer && qdf_nbuf_tx_notify_comp_get(netbuf))
+			dp_tx_notify_completion(soc, txrx_peer->vdev, desc,
 						netbuf, ts.status);
 
-		dp_tx_comp_process_desc(soc, desc, &ts, peer);
+		dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer);
 
 		dp_tx_desc_release(desc, desc->pool_id);
 		desc = next;
 	}
-	if (peer)
-		dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
+	if (txrx_peer)
+		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
 }
 
 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT

+ 5 - 4
dp/wifi3.0/dp_tx.h

@@ -247,11 +247,12 @@ void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
 				  struct dp_tx_desc_s *tx_desc,
 				  struct hal_tx_completion_status *ts,
-				  struct dp_peer *peer, uint8_t ring_id);
+				  struct dp_txrx_peer *txrx_peer,
+				  uint8_t ring_id);
 void dp_tx_comp_process_desc(struct dp_soc *soc,
 			     struct dp_tx_desc_s *desc,
 			     struct hal_tx_completion_status *ts,
-			     struct dp_peer *peer);
+			     struct dp_txrx_peer *txrx_peer);
 void dp_tx_reinject_handler(struct dp_soc *soc,
 			    struct dp_vdev *vdev,
 			    struct dp_tx_desc_s *tx_desc,
@@ -715,7 +716,7 @@ static inline void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
 QDF_STATUS
 dp_get_completion_indication_for_stack(struct dp_soc *soc,
 				       struct dp_pdev *pdev,
-				       struct dp_peer *peer,
+				       struct dp_txrx_peer *peer,
 				       struct hal_tx_completion_status *ts,
 				       qdf_nbuf_t netbuf,
 				       uint64_t time_latency);
@@ -727,7 +728,7 @@ void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
 static inline
 QDF_STATUS dp_get_completion_indication_for_stack(struct dp_soc *soc,
 				       struct dp_pdev *pdev,
-				       struct dp_peer *peer,
+				       struct dp_txrx_peer *peer,
 				       struct hal_tx_completion_status *ts,
 				       qdf_nbuf_t netbuf,
 				       uint64_t time_latency)

+ 1 - 1
dp/wifi3.0/dp_types.h

@@ -1754,7 +1754,7 @@ struct dp_arch_ops {
 	void (*dp_tx_mcast_handler)(struct dp_soc *soc, struct dp_vdev *vdev,
 				    qdf_nbuf_t nbuf);
 	bool (*dp_rx_mcast_handler)(struct dp_soc *soc, struct dp_vdev *vdev,
-				    struct dp_peer *peer, qdf_nbuf_t nbuf);
+				    struct dp_txrx_peer *peer, qdf_nbuf_t nbuf);
 #endif
 	void (*mlo_peer_find_hash_detach)(struct dp_soc *soc);
 	QDF_STATUS (*mlo_peer_find_hash_attach)(struct dp_soc *soc);

+ 67 - 57
dp/wifi3.0/li/dp_li_rx.c

@@ -67,15 +67,15 @@ bool is_sa_da_idx_valid(uint32_t max_ast,
  * Return: bool (true if it is a looped back pkt else false)
  */
 static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc,
-					   struct dp_peer *peer,
+					   struct dp_txrx_peer *txrx_peer,
 					   uint8_t *rx_tlv_hdr,
 					   qdf_nbuf_t nbuf)
 {
-	return dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf);
+	return dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf);
 }
 #else
 static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc,
-					   struct dp_peer *peer,
+					   struct dp_txrx_peer *txrx_peer,
 					   uint8_t *rx_tlv_hdr,
 					   qdf_nbuf_t nbuf)
 {
@@ -87,13 +87,14 @@ static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc,
 #ifndef QCA_HOST_MODE_WIFI_DISABLE
 static bool
 dp_rx_intrabss_ucast_check_li(struct dp_soc *soc, qdf_nbuf_t nbuf,
-			      struct dp_peer *ta_peer,
+			      struct dp_txrx_peer *ta_txrx_peer,
 			      struct hal_rx_msdu_metadata *msdu_metadata,
 			      uint8_t *p_tx_vdev_id)
 {
 	uint16_t da_peer_id;
-	struct dp_peer *da_peer;
+	struct dp_txrx_peer *da_peer;
 	struct dp_ast_entry *ast_entry;
+	dp_txrx_ref_handle txrx_ref_handle;
 
 	if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf))
 		return false;
@@ -112,12 +113,12 @@ dp_rx_intrabss_ucast_check_li(struct dp_soc *soc, qdf_nbuf_t nbuf,
 	 * this indicates a change in topology and that AST entries
 	 * are yet to be updated.
 	 */
-	if ((da_peer_id == ta_peer->peer_id) ||
-	    (da_peer_id == HTT_INVALID_PEER))
+	if (da_peer_id == ta_txrx_peer->peer_id ||
+	    da_peer_id == HTT_INVALID_PEER)
 		return false;
 
-	da_peer = dp_peer_get_ref_by_id(soc, da_peer_id,
-					DP_MOD_ID_RX);
+	da_peer = dp_txrx_peer_get_ref_by_id(soc, da_peer_id,
+					     &txrx_ref_handle, DP_MOD_ID_RX);
 	if (!da_peer)
 		return false;
 
@@ -125,19 +126,19 @@ dp_rx_intrabss_ucast_check_li(struct dp_soc *soc, qdf_nbuf_t nbuf,
 	/* If the source or destination peer in the isolation
 	 * list then dont forward instead push to bridge stack.
 	 */
-	if (dp_get_peer_isolation(ta_peer) ||
+	if (dp_get_peer_isolation(ta_txrx_peer) ||
 	    dp_get_peer_isolation(da_peer) ||
-	    (da_peer->vdev->vdev_id != ta_peer->vdev->vdev_id)) {
-		dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
+	    da_peer->vdev->vdev_id != ta_txrx_peer->vdev->vdev_id) {
+		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
 		return false;
 	}
 
 	if (da_peer->bss_peer) {
-		dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
+		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
 		return false;
 	}
 
-	dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
+	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
 	return true;
 }
 
@@ -145,7 +146,7 @@ dp_rx_intrabss_ucast_check_li(struct dp_soc *soc, qdf_nbuf_t nbuf,
  * dp_rx_intrabss_fwd_li() - Implements the Intra-BSS forwarding logic
  *
  * @soc: core txrx main context
- * @ta_peer	: source peer entry
+ * @ta_txrx_peer	: source peer entry
  * @rx_tlv_hdr	: start address of rx tlvs
  * @nbuf	: nbuf that has to be intrabss forwarded
  *
@@ -153,7 +154,7 @@ dp_rx_intrabss_ucast_check_li(struct dp_soc *soc, qdf_nbuf_t nbuf,
  */
 static bool
 dp_rx_intrabss_fwd_li(struct dp_soc *soc,
-		      struct dp_peer *ta_peer,
+		      struct dp_txrx_peer *ta_txrx_peer,
 		      uint8_t *rx_tlv_hdr,
 		      qdf_nbuf_t nbuf,
 		      struct hal_rx_msdu_metadata msdu_metadata,
@@ -169,17 +170,17 @@ dp_rx_intrabss_fwd_li(struct dp_soc *soc,
 	 * like igmpsnoop decide whether to forward or not with
 	 * Mcast enhancement.
 	 */
-	if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_peer->bss_peer)
-		return dp_rx_intrabss_mcbc_fwd(soc, ta_peer, rx_tlv_hdr,
+	if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_txrx_peer->bss_peer)
+		return dp_rx_intrabss_mcbc_fwd(soc, ta_txrx_peer, rx_tlv_hdr,
 					       nbuf, tid_stats);
 
-	if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
+	if (dp_rx_intrabss_eapol_drop_check(soc, ta_txrx_peer, rx_tlv_hdr,
 					    nbuf))
 		return true;
 
-	if (dp_rx_intrabss_ucast_check_li(soc, nbuf, ta_peer,
+	if (dp_rx_intrabss_ucast_check_li(soc, nbuf, ta_txrx_peer,
 					  &msdu_metadata, &tx_vdev_id))
-		return dp_rx_intrabss_ucast_fwd(soc, ta_peer, tx_vdev_id,
+		return dp_rx_intrabss_ucast_fwd(soc, ta_txrx_peer, tx_vdev_id,
 						rx_tlv_hdr, nbuf, tid_stats);
 
 	return false;
@@ -217,7 +218,8 @@ uint32_t dp_rx_process_li(struct dp_intr *int_ctx,
 	uint16_t msdu_len = 0;
 	uint16_t peer_id;
 	uint8_t vdev_id;
-	struct dp_peer *peer;
+	struct dp_txrx_peer *txrx_peer;
+	dp_txrx_ref_handle txrx_ref_handle;
 	struct dp_vdev *vdev;
 	uint32_t pkt_len = 0;
 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
@@ -272,7 +274,7 @@ more_data:
 	nbuf_tail = NULL;
 	deliver_list_head = NULL;
 	deliver_list_tail = NULL;
-	peer = NULL;
+	txrx_peer = NULL;
 	vdev = NULL;
 	num_rx_bufs_reaped = 0;
 	ebuf_head = NULL;
@@ -560,7 +562,7 @@ done:
 
 	dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]);
 	/* Peer can be NULL is case of LFR */
-	if (qdf_likely(peer))
+	if (qdf_likely(txrx_peer))
 		vdev = NULL;
 
 	/*
@@ -585,9 +587,9 @@ done:
 		vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
 		peer_id =  QDF_NBUF_CB_RX_PEER_ID(nbuf);
 
-		if (dp_rx_is_list_ready(deliver_list_head, vdev, peer,
+		if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer,
 					peer_id, vdev_id)) {
-			dp_rx_deliver_to_stack(soc, vdev, peer,
+			dp_rx_deliver_to_stack(soc, vdev, txrx_peer,
 					       deliver_list_head,
 					       deliver_list_tail);
 			deliver_list_head = NULL;
@@ -605,31 +607,36 @@ done:
 			}
 		}
 
-		if (qdf_unlikely(!peer)) {
-			peer = dp_rx_get_peer_and_vdev(soc, nbuf, peer_id,
-						       pkt_capture_offload,
-						       &vdev,
-						       &rx_pdev, &dsf,
-						       &old_tid);
-			if (qdf_unlikely(!peer) || qdf_unlikely(!vdev)) {
+		if (qdf_unlikely(!txrx_peer)) {
+			txrx_peer =
+			dp_rx_get_txrx_peer_and_vdev(soc, nbuf, peer_id,
+						     &txrx_ref_handle,
+						     pkt_capture_offload,
+						     &vdev,
+						     &rx_pdev, &dsf,
+						     &old_tid);
+			if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) {
 				nbuf = next;
 				continue;
 			}
-		} else if (peer && peer->peer_id != peer_id) {
-			dp_peer_unref_delete(peer, DP_MOD_ID_RX);
-
-			peer = dp_rx_get_peer_and_vdev(soc, nbuf, peer_id,
-						       pkt_capture_offload,
-						       &vdev,
-						       &rx_pdev, &dsf,
-						       &old_tid);
-			if (qdf_unlikely(!peer) || qdf_unlikely(!vdev)) {
+		} else if (txrx_peer && txrx_peer->peer_id != peer_id) {
+			dp_txrx_peer_unref_delete(txrx_ref_handle,
+						  DP_MOD_ID_RX);
+
+			txrx_peer =
+			dp_rx_get_txrx_peer_and_vdev(soc, nbuf, peer_id,
+						     &txrx_ref_handle,
+						     pkt_capture_offload,
+						     &vdev,
+						     &rx_pdev, &dsf,
+						     &old_tid);
+			if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) {
 				nbuf = next;
 				continue;
 			}
 		}
 
-		if (peer) {
+		if (txrx_peer) {
 			QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
 			qdf_dp_trace_set_track(nbuf, QDF_RX);
 			QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
@@ -756,7 +763,8 @@ done:
 		 * process frame for mulitpass phrase processing
 		 */
 		if (qdf_unlikely(vdev->multipass_en)) {
-			if (dp_rx_multipass_process(peer, nbuf, tid) == false) {
+			if (dp_rx_multipass_process(txrx_peer, nbuf,
+						    tid) == false) {
 				DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1);
 				dp_rx_nbuf_free(nbuf);
 				nbuf = next;
@@ -764,7 +772,7 @@ done:
 			}
 		}
 
-		if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
+		if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
 			dp_rx_err("%pK: Policy Check Drop pkt", soc);
 			DP_STATS_INC(peer, rx.policy_check_drop, 1);
 			tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
@@ -775,7 +783,7 @@ done:
 			continue;
 		}
 
-		if (qdf_unlikely(peer && (peer->nawds_enabled) &&
+		if (qdf_unlikely(txrx_peer && (txrx_peer->nawds_enabled) &&
 				 (qdf_nbuf_is_da_mcbc(nbuf)) &&
 				 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc,
 								rx_tlv_hdr) ==
@@ -790,7 +798,8 @@ done:
 		/*
 		 * Drop non-EAPOL frames from unauthorized peer.
 		 */
-		if (qdf_likely(peer) && qdf_unlikely(!peer->authorize) &&
+		if (qdf_likely(txrx_peer) &&
+		    qdf_unlikely(!txrx_peer->authorize) &&
 		    !qdf_nbuf_is_raw_frame(nbuf)) {
 			bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
 					qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
@@ -814,7 +823,7 @@ done:
 		/* Update the flow tag in SKB based on FSE metadata */
 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
 
-		dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer,
+		dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
 					reo_ring_num, tid_stats);
 
 		if (qdf_unlikely(vdev->mesh_vdev)) {
@@ -853,7 +862,7 @@ done:
 				continue;
 			}
 			if (qdf_unlikely(dp_rx_mec_check_wrapper(soc,
-								 peer,
+								 txrx_peer,
 								 rx_tlv_hdr,
 								 nbuf))) {
 				/* this is a looped back MCBC pkt,drop it */
@@ -867,13 +876,14 @@ done:
 			if (qdf_likely(vdev->wds_enabled))
 				dp_rx_wds_srcport_learn(soc,
 							rx_tlv_hdr,
-							peer,
+							txrx_peer,
 							nbuf,
 							msdu_metadata);
 
 			/* Intrabss-fwd */
 			if (dp_rx_check_ap_bridge(vdev))
-				if (dp_rx_intrabss_fwd_li(soc, peer, rx_tlv_hdr,
+				if (dp_rx_intrabss_fwd_li(soc, txrx_peer,
+							  rx_tlv_hdr,
 							  nbuf,
 							  msdu_metadata,
 							  tid_stats)) {
@@ -887,7 +897,7 @@ done:
 
 		dp_rx_update_stats(soc, nbuf);
 
-		dp_pkt_add_timestamp(peer->vdev, QDF_PKT_RX_DRIVER_ENTRY,
+		dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY,
 				     current_time, nbuf);
 
 		DP_RX_LIST_APPEND(deliver_list_head,
@@ -895,7 +905,7 @@ done:
 				  nbuf);
 		DP_STATS_INC_PKT(peer, rx.to_stack, 1,
 				 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
-		if (qdf_unlikely(peer->in_twt))
+		if (qdf_unlikely(txrx_peer->in_twt))
 			DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1,
 					 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
 
@@ -904,12 +914,12 @@ done:
 	}
 
 	if (qdf_likely(deliver_list_head)) {
-		if (qdf_likely(peer)) {
+		if (qdf_likely(txrx_peer)) {
 			dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id,
 						     pkt_capture_offload,
 						     deliver_list_head);
 			if (!pkt_capture_offload)
-				dp_rx_deliver_to_stack(soc, vdev, peer,
+				dp_rx_deliver_to_stack(soc, vdev, txrx_peer,
 						       deliver_list_head,
 						       deliver_list_tail);
 		} else {
@@ -923,8 +933,8 @@ done:
 		}
 	}
 
-	if (qdf_likely(peer))
-		dp_peer_unref_delete(peer, DP_MOD_ID_RX);
+	if (qdf_likely(txrx_peer))
+		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
 
 	if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
 		if (quota) {

+ 11 - 7
dp/wifi3.0/li/dp_li_tx.c

@@ -94,7 +94,8 @@ void dp_tx_process_htt_completion_li(struct dp_soc *soc,
 	struct dp_vdev *vdev = NULL;
 	struct hal_tx_completion_status ts = {0};
 	uint32_t *htt_desc = (uint32_t *)status;
-	struct dp_peer *peer;
+	struct dp_txrx_peer *txrx_peer;
+	dp_txrx_ref_handle txrx_ref_handle;
 	struct cdp_tid_tx_stats *tid_stats = NULL;
 	struct htt_soc *htt_handle;
 	uint8_t vdev_id;
@@ -188,8 +189,9 @@ void dp_tx_process_htt_completion_li(struct dp_soc *soc,
 		if (tx_status < CDP_MAX_TX_HTT_STATUS)
 			tid_stats->htt_status_cnt[tx_status]++;
 
-		peer = dp_peer_get_ref_by_id(soc, ts.peer_id,
-					     DP_MOD_ID_HTT_COMP);
+		txrx_peer = dp_txrx_peer_get_ref_by_id(soc, ts.peer_id,
+						       &txrx_ref_handle,
+						       DP_MOD_ID_HTT_COMP);
 		if (qdf_likely(peer)) {
 			DP_STATS_INC_PKT(peer, tx.comp_pkt, 1,
 					 qdf_nbuf_len(tx_desc->nbuf));
@@ -197,12 +199,14 @@ void dp_tx_process_htt_completion_li(struct dp_soc *soc,
 				      tx_status != HTT_TX_FW2WBM_TX_STATUS_OK);
 		}
 
-		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, peer, ring_id);
-		dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
+		dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer,
+					     ring_id);
+		dp_tx_comp_process_desc(soc, tx_desc, &ts, txrx_peer);
 		dp_tx_desc_release(tx_desc, tx_desc->pool_id);
 
-		if (qdf_likely(peer))
-			dp_peer_unref_delete(peer, DP_MOD_ID_HTT_COMP);
+		if (qdf_likely(txrx_peer))
+			dp_txrx_peer_unref_delete(txrx_ref_handle,
+						  DP_MOD_ID_HTT_COMP);
 
 		break;
 	}