Explorar o código

qcacmn: Optimize branch predictions in monitor path

Optimize branch predictions in monitor path

CRs-Fixed: 3265307
Change-Id: I7536a2fb94b350a678c123d7e0cfcd187816c96d
Amir Patel %!s(int64=3) %!d(string=hai) anos
pai
achega
4720a30038

+ 28 - 27
dp/wifi3.0/monitor/1.0/dp_rx_mon_status_1.0.c

@@ -436,7 +436,7 @@ dp_rx_mon_status_process_tlv(struct dp_soc *soc, struct dp_intr *int_ctx,
 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
 	struct dp_mon_pdev *mon_pdev;
 
-	if (!pdev) {
+	if (qdf_unlikely(!pdev)) {
 		dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d", soc,
 				       mac_id);
 		return;
@@ -446,7 +446,7 @@ dp_rx_mon_status_process_tlv(struct dp_soc *soc, struct dp_intr *int_ctx,
 	ppdu_info = &mon_pdev->ppdu_info;
 	rx_mon_stats = &mon_pdev->rx_mon_stats;
 
-	if (mon_pdev->mon_ppdu_status != DP_PPDU_STATUS_START)
+	if (qdf_unlikely(mon_pdev->mon_ppdu_status != DP_PPDU_STATUS_START))
 		return;
 
 	rx_enh_capture_mode = mon_pdev->rx_enh_capture_mode;
@@ -458,7 +458,7 @@ dp_rx_mon_status_process_tlv(struct dp_soc *soc, struct dp_intr *int_ctx,
 						   DP_MON_STATUS_BUF_DEQUEUE,
 						   NULL, NULL, status_nbuf);
 
-		if (!status_nbuf)
+		if (qdf_unlikely(!status_nbuf))
 			return;
 
 		rx_tlv = qdf_nbuf_data(status_nbuf);
@@ -488,7 +488,7 @@ dp_rx_mon_status_process_tlv(struct dp_soc *soc, struct dp_intr *int_ctx,
 				rx_tlv = hal_rx_status_get_next_tlv(rx_tlv,
 						mon_pdev->is_tlv_hdr_64_bit);
 
-				if ((rx_tlv - rx_tlv_start) >=
+				if (qdf_unlikely((rx_tlv - rx_tlv_start)) >=
 					RX_MON_STATUS_BUF_SIZE)
 					break;
 
@@ -498,17 +498,17 @@ dp_rx_mon_status_process_tlv(struct dp_soc *soc, struct dp_intr *int_ctx,
 				 (tlv_status == HAL_TLV_STATUS_MPDU_START) ||
 				 (tlv_status == HAL_TLV_STATUS_MSDU_END));
 		}
-		if (mon_pdev->dp_peer_based_pktlog) {
+		if (qdf_unlikely(mon_pdev->dp_peer_based_pktlog)) {
 			dp_rx_process_peer_based_pktlog(soc, ppdu_info,
 							status_nbuf,
 							pdev->pdev_id);
 		} else {
-			if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL)
+			if (qdf_unlikely(mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL))
 				pktlog_mode = WDI_EVENT_RX_DESC;
-			else if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE)
+			else if (qdf_unlikely(mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE))
 				pktlog_mode = WDI_EVENT_LITE_RX;
 
-			if (pktlog_mode != WDI_NO_VAL)
+			if (qdf_unlikely(pktlog_mode != WDI_NO_VAL))
 				dp_wdi_event_handler(pktlog_mode, soc,
 						     status_nbuf,
 						     HTT_INVALID_PEER,
@@ -516,9 +516,9 @@ dp_rx_mon_status_process_tlv(struct dp_soc *soc, struct dp_intr *int_ctx,
 		}
 
 		/* smart monitor vap and m_copy cannot co-exist */
-		if (ppdu_info->rx_status.monitor_direct_used &&
-		    mon_pdev->neighbour_peers_added &&
-		    mon_pdev->mvdev) {
+		if (qdf_unlikely(ppdu_info->rx_status.monitor_direct_used &&
+				 mon_pdev->neighbour_peers_added &&
+				 mon_pdev->mvdev)) {
 			smart_mesh_status = dp_rx_handle_smart_mesh_mode(soc,
 						pdev, ppdu_info, status_nbuf);
 			if (smart_mesh_status)
@@ -527,7 +527,7 @@ dp_rx_mon_status_process_tlv(struct dp_soc *soc, struct dp_intr *int_ctx,
 			dp_rx_process_mcopy_mode(soc, pdev,
 						 ppdu_info, tlv_status,
 						 status_nbuf);
-		} else if (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED) {
+		} else if (qdf_unlikely(rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) {
 			if (!nbuf_used)
 				qdf_nbuf_free(status_nbuf);
 
@@ -539,21 +539,21 @@ dp_rx_mon_status_process_tlv(struct dp_soc *soc, struct dp_intr *int_ctx,
 			qdf_nbuf_free(status_nbuf);
 		}
 
-		if (tlv_status == HAL_TLV_STATUS_PPDU_NON_STD_DONE) {
+		if (qdf_unlikely(tlv_status == HAL_TLV_STATUS_PPDU_NON_STD_DONE)) {
 			dp_rx_mon_deliver_non_std(soc, mac_id);
-		} else if ((tlv_status == HAL_TLV_STATUS_PPDU_DONE) &&
-				(!dp_rx_mon_check_phyrx_abort(pdev, ppdu_info))) {
+		} else if ((qdf_likely(tlv_status == HAL_TLV_STATUS_PPDU_DONE)) &&
+				(qdf_likely(!dp_rx_mon_check_phyrx_abort(pdev, ppdu_info)))) {
 			rx_mon_stats->status_ppdu_done++;
 			dp_rx_mon_handle_mu_ul_info(ppdu_info);
 
-			if (mon_pdev->tx_capture_enabled
-			    != CDP_TX_ENH_CAPTURE_DISABLED)
+			if (qdf_unlikely(mon_pdev->tx_capture_enabled
+			    != CDP_TX_ENH_CAPTURE_DISABLED))
 				dp_send_ack_frame_to_stack(soc, pdev,
 							   ppdu_info);
 
-			if (mon_pdev->enhanced_stats_en ||
-			    mon_pdev->mcopy_mode ||
-			    mon_pdev->neighbour_peers_added)
+			if (qdf_likely(mon_pdev->enhanced_stats_en ||
+				       mon_pdev->mcopy_mode ||
+				       mon_pdev->neighbour_peers_added))
 				dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info);
 			else if (dp_cfr_rcc_mode_status(pdev))
 				dp_rx_handle_cfr(soc, pdev, ppdu_info);
@@ -561,7 +561,7 @@ dp_rx_mon_status_process_tlv(struct dp_soc *soc, struct dp_intr *int_ctx,
 			mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_DONE;
 
 			/* Collect spcl vap stats if configured */
-			if (mon_pdev->scan_spcl_vap_configured)
+			if (qdf_unlikely(mon_pdev->scan_spcl_vap_configured))
 				dp_rx_mon_update_scan_spcl_vap_stats(pdev,
 								     ppdu_info);
 
@@ -620,7 +620,7 @@ dp_rx_mon_status_srng_process(struct dp_soc *soc, struct dp_intr *int_ctx,
 	uint32_t work_done = 0;
 	struct dp_mon_pdev *mon_pdev;
 
-	if (!pdev) {
+	if (qdf_unlikely(!pdev)) {
 		dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d",
 				       soc, mac_id);
 		return work_done;
@@ -631,7 +631,8 @@ dp_rx_mon_status_srng_process(struct dp_soc *soc, struct dp_intr *int_ctx,
 	mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng;
 
 	qdf_assert(mon_status_srng);
-	if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) {
+	if (qdf_unlikely(!mon_status_srng ||
+			 !hal_srng_initialized(mon_status_srng))) {
 
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
 			"%s %d : HAL Monitor Status Ring Init Failed -- %pK",
@@ -696,7 +697,7 @@ dp_rx_mon_status_srng_process(struct dp_soc *soc, struct dp_intr *int_ctx,
 
 			status = hal_get_rx_status_done(status_buf);
 
-			if (status != QDF_STATUS_SUCCESS) {
+			if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
 				uint32_t hp, tp;
 				hal_get_sw_hptp(hal_soc, mon_status_srng,
 						&tp, &hp);
@@ -720,9 +721,9 @@ dp_rx_mon_status_srng_process(struct dp_soc *soc, struct dp_intr *int_ctx,
 				 */
 				reap_status = dp_rx_mon_handle_status_buf_done(pdev,
 									mon_status_srng);
-				if (reap_status == DP_MON_STATUS_NO_DMA)
+				if (qdf_unlikely(reap_status == DP_MON_STATUS_NO_DMA))
 					continue;
-				else if (reap_status == DP_MON_STATUS_REPLENISH) {
+				else if (qdf_unlikely(reap_status == DP_MON_STATUS_REPLENISH)) {
 					if (!rx_desc->unmapped) {
 						qdf_nbuf_unmap_nbytes_single(
 							soc->osdev, status_nbuf,
@@ -737,7 +738,7 @@ dp_rx_mon_status_srng_process(struct dp_soc *soc, struct dp_intr *int_ctx,
 			qdf_nbuf_set_pktlen(status_nbuf,
 					    RX_MON_STATUS_BUF_SIZE);
 
-			if (!rx_desc->unmapped) {
+			if (qdf_likely(!rx_desc->unmapped)) {
 				qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf,
 							     QDF_DMA_FROM_DEVICE,
 							     rx_desc_pool->buf_size);

+ 8 - 11
dp/wifi3.0/monitor/dp_mon.c

@@ -1278,15 +1278,15 @@ QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer)
 	struct dp_peer *tgt_peer = NULL;
 	struct dp_txrx_peer *txrx_peer = NULL;
 
-	if (!peer || !peer->vdev || !peer->monitor_peer)
+	if (qdf_unlikely(!peer || !peer->vdev || !peer->monitor_peer))
 		return QDF_STATUS_E_FAULT;
 
 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
-	if (!tgt_peer)
+	if (qdf_unlikely(!tgt_peer))
 		return QDF_STATUS_E_FAULT;
 
 	txrx_peer = tgt_peer->txrx_peer;
-	if (!txrx_peer)
+	if (!qdf_unlikely(txrx_peer))
 		return QDF_STATUS_E_FAULT;
 
 	mon_peer_stats = &peer->monitor_peer->stats;
@@ -1826,13 +1826,12 @@ dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
 QDF_STATUS dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
 				    struct cdp_rx_stats_ppdu_user *ppdu_user)
 {
-	struct cdp_interface_peer_qos_stats qos_stats_intf;
+	struct cdp_interface_peer_qos_stats qos_stats_intf = {0};
 
-	if (ppdu_user->peer_id == HTT_INVALID_PEER) {
+	if (qdf_unlikely(ppdu_user->peer_id == HTT_INVALID_PEER)) {
 		dp_mon_warn("Invalid peer id");
 		return QDF_STATUS_E_FAILURE;
 	}
-	qdf_mem_zero(&qos_stats_intf, sizeof(qos_stats_intf));
 
 	qdf_mem_copy(qos_stats_intf.peer_mac, ppdu_user->mac_addr,
 		     QDF_MAC_ADDR_SIZE);
@@ -2505,20 +2504,18 @@ dp_tx_rate_stats_update(struct dp_peer *peer,
 void dp_send_stats_event(struct dp_pdev *pdev, struct dp_peer *peer,
 			 uint16_t peer_id)
 {
-	struct cdp_interface_peer_stats peer_stats_intf;
+	struct cdp_interface_peer_stats peer_stats_intf = {0};
 	struct dp_mon_peer *mon_peer = peer->monitor_peer;
 	struct dp_txrx_peer *txrx_peer = NULL;
 
-	if (!mon_peer)
+	if (qdf_unlikely(!mon_peer))
 		return;
 
-	qdf_mem_zero(&peer_stats_intf,
-		     sizeof(struct cdp_interface_peer_stats));
 	mon_peer->stats.rx.rx_snr_measured_time = qdf_system_ticks();
 	peer_stats_intf.rx_avg_snr = mon_peer->stats.rx.avg_snr;
 
 	txrx_peer = dp_get_txrx_peer(peer);
-	if (txrx_peer) {
+	if (qdf_likely(txrx_peer)) {
 		peer_stats_intf.rx_byte_count = txrx_peer->to_stack.bytes;
 		peer_stats_intf.tx_byte_count =
 			txrx_peer->stats.per_pkt_stats.tx.tx_success.bytes;

+ 49 - 50
dp/wifi3.0/monitor/dp_rx_mon.c

@@ -146,7 +146,7 @@ dp_rx_mon_populate_cfr_info(struct dp_pdev *pdev,
 {
 	struct cdp_rx_ppdu_cfr_info *cfr_info;
 
-	if (!dp_cfr_rcc_mode_status(pdev))
+	if (!qdf_unlikely(dp_cfr_rcc_mode_status(pdev)))
 		return;
 
 	cfr_info = &cdp_rx_ppdu->cfr_info;
@@ -436,7 +436,7 @@ dp_rx_populate_cdp_indication_ppdu_user(struct dp_pdev *pdev,
 		sw_peer_id = rx_user_status->sw_peer_id;
 		peer = dp_peer_get_ref_by_id(soc, sw_peer_id,
 					     DP_MOD_ID_RX_PPDU_STATS);
-		if (!peer) {
+		if (qdf_unlikely(!peer)) {
 			rx_stats_peruser->peer_id = HTT_INVALID_PEER;
 			continue;
 		}
@@ -528,7 +528,7 @@ dp_rx_populate_cdp_indication_ppdu_user(struct dp_pdev *pdev,
 				 * max RU size will be equal to
 				 * HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2
 				 */
-				if (ru_size >= OFDMA_NUM_RU_SIZE) {
+				if (qdf_unlikely(ru_size >= OFDMA_NUM_RU_SIZE)) {
 					dp_err("invalid ru_size %d\n",
 					       ru_size);
 					return;
@@ -594,7 +594,7 @@ dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev,
 	sw_peer_id = rx_user_status->sw_peer_id;
 	peer = dp_peer_get_ref_by_id(soc, sw_peer_id,
 				     DP_MOD_ID_RX_PPDU_STATS);
-	if (!peer) {
+	if (qdf_unlikely(!peer)) {
 		cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
 		cdp_rx_ppdu->num_users = 0;
 		goto end;
@@ -653,10 +653,6 @@ dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev,
 
 	cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users;
 
-	cdp_rx_ppdu->num_mpdu = 0;
-	cdp_rx_ppdu->num_msdu = 0;
-	cdp_rx_ppdu->retries = 0;
-
 	dp_rx_populate_cdp_indication_ppdu_user(pdev, ppdu_info, cdp_rx_ppdu);
 
 	dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
@@ -869,12 +865,12 @@ static void dp_rx_stats_update(struct dp_pdev *pdev,
 	struct dp_mon_ops *mon_ops;
 	struct dp_mon_pdev *mon_pdev = NULL;
 
-	if (pdev)
+	if (qdf_likely(pdev))
 		soc = pdev->soc;
 	else
 		return;
 
-	if (!soc || soc->process_rx_status)
+	if (qdf_likely(!soc) || soc->process_rx_status)
 		return;
 
 	mon_pdev = pdev->monitor_pdev;
@@ -888,12 +884,12 @@ static void dp_rx_stats_update(struct dp_pdev *pdev,
 		peer = dp_peer_get_ref_by_id(soc, ppdu_user->peer_id,
 					     DP_MOD_ID_RX_PPDU_STATS);
 
-		if (!peer)
+		if (qdf_unlikely(!peer))
 			mon_peer = mon_pdev->invalid_mon_peer;
 		else
 			mon_peer = peer->monitor_peer;
 
-		if (!mon_peer) {
+		if (qdf_unlikely(!mon_peer)) {
 			if (peer)
 				dp_peer_unref_delete(peer,
 						     DP_MOD_ID_RX_PPDU_STATS);
@@ -917,7 +913,7 @@ static void dp_rx_stats_update(struct dp_pdev *pdev,
 		pkt_bw_offset = dp_get_bw_offset_frm_bw(soc, ppdu->u.bw);
 		DP_STATS_UPD(mon_peer, rx.snr, (ppdu->rssi + pkt_bw_offset));
 
-		if (mon_peer->stats.rx.avg_snr == CDP_INVALID_SNR)
+		if (qdf_unlikely(mon_peer->stats.rx.avg_snr == CDP_INVALID_SNR))
 			mon_peer->stats.rx.avg_snr =
 				CDP_SNR_IN(mon_peer->stats.rx.snr);
 		else
@@ -943,7 +939,7 @@ static void dp_rx_stats_update(struct dp_pdev *pdev,
 			else
 				mu_pkt_type = TXRX_TYPE_MU_OFDMA;
 
-			if (nss) {
+			if (qdf_likely(nss)) {
 				DP_STATS_INC(mon_peer, rx.nss[nss - 1], num_msdu);
 				DP_STATS_INC(mon_peer,
 					rx.rx_mu[mu_pkt_type].ppdu_nss[nss - 1],
@@ -1033,7 +1029,7 @@ static void dp_rx_stats_update(struct dp_pdev *pdev,
 		 */
 		ac = TID_TO_WME_AC(ppdu_user->tid);
 
-		if (ppdu->tid != HAL_TID_INVALID)
+		if (qdf_likely(ppdu->tid != HAL_TID_INVALID))
 			DP_STATS_INC(mon_peer, rx.wme_ac_type[ac], num_msdu);
 
 		DP_STATS_INC(mon_peer, rx.rx_ppdus, 1);
@@ -1041,10 +1037,10 @@ static void dp_rx_stats_update(struct dp_pdev *pdev,
 			(ppdu_user->mpdu_cnt_fcs_ok + ppdu_user->mpdu_cnt_fcs_err));
 
 		mon_ops = dp_mon_ops_get(soc);
-		if (mon_ops && mon_ops->mon_rx_stats_update)
+		if (qdf_likely(mon_ops && mon_ops->mon_rx_stats_update))
 			mon_ops->mon_rx_stats_update(mon_peer, ppdu, ppdu_user);
 
-		if (!peer)
+		if (qdf_unlikely(!peer))
 			continue;
 
 		dp_peer_stats_notify(pdev, peer);
@@ -1077,47 +1073,50 @@ dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
 	 *
 	 * In CFR RCC mode - PPDU status TLVs of error pkts are also needed
 	 */
-	if (ppdu_info->com_info.mpdu_cnt_fcs_ok == 0)
+	if (qdf_unlikely(ppdu_info->com_info.mpdu_cnt_fcs_ok == 0))
 		return;
 
-	if (ppdu_info->nac_info.fc_valid &&
-	    ppdu_info->nac_info.to_ds_flag &&
-	    ppdu_info->nac_info.mac_addr2_valid) {
-		struct dp_neighbour_peer *peer = NULL;
-		uint8_t rssi = ppdu_info->rx_status.rssi_comb;
-
-		qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
-		if (mon_pdev->neighbour_peers_added) {
-			TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
-				      neighbour_peer_list_elem) {
-				if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr,
-						 &ppdu_info->nac_info.mac_addr2,
-						 QDF_MAC_ADDR_SIZE)) {
-					peer->rssi = rssi;
-					break;
+	if (qdf_unlikely(mon_pdev->neighbour_peers_added)) {
+		if (ppdu_info->nac_info.fc_valid &&
+		    ppdu_info->nac_info.to_ds_flag &&
+		    ppdu_info->nac_info.mac_addr2_valid) {
+			struct dp_neighbour_peer *peer = NULL;
+			uint8_t rssi = ppdu_info->rx_status.rssi_comb;
+
+			qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
+			if (mon_pdev->neighbour_peers_added) {
+				TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
+					      neighbour_peer_list_elem) {
+					if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr,
+							 &ppdu_info->nac_info.mac_addr2,
+							 QDF_MAC_ADDR_SIZE)) {
+						peer->rssi = rssi;
+						break;
+					}
 				}
 			}
+			qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
+		} else {
+			dp_info("Neighbour peers RSSI update failed! fc_valid = %d, to_ds_flag = %d and mac_addr2_valid = %d",
+					ppdu_info->nac_info.fc_valid,
+					ppdu_info->nac_info.to_ds_flag,
+					ppdu_info->nac_info.mac_addr2_valid);
 		}
-		qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
-	} else {
-		dp_info("Neighbour peers RSSI update failed! fc_valid = %d, to_ds_flag = %d and mac_addr2_valid = %d",
-			ppdu_info->nac_info.fc_valid,
-			ppdu_info->nac_info.to_ds_flag,
-			ppdu_info->nac_info.mac_addr2_valid);
 	}
 
 	/* need not generate wdi event when mcopy, cfr rcc mode and
 	 * enhanced stats are not enabled
 	 */
-	if (!mon_pdev->mcopy_mode && !mon_pdev->enhanced_stats_en &&
-	    !dp_cfr_rcc_mode_status(pdev))
+	if (qdf_unlikely(!mon_pdev->mcopy_mode &&
+			 !mon_pdev->enhanced_stats_en &&
+			 !dp_cfr_rcc_mode_status(pdev)))
 		return;
 
-	if (dp_cfr_rcc_mode_status(pdev))
+	if (qdf_unlikely(dp_cfr_rcc_mode_status(pdev)))
 		dp_update_cfr_dbg_stats(pdev, ppdu_info);
 
-	if (!ppdu_info->rx_status.frame_control_info_valid ||
-	    (ppdu_info->rx_status.ast_index == HAL_AST_IDX_INVALID)) {
+	if (qdf_unlikely(!ppdu_info->rx_status.frame_control_info_valid ||
+			 ppdu_info->rx_status.ast_index == HAL_AST_IDX_INVALID)) {
 		if (!(mon_pdev->mcopy_mode ||
 		      (dp_bb_captured_chan_status(pdev, ppdu_info) ==
 		       QDF_STATUS_SUCCESS)))
@@ -1127,25 +1126,25 @@ dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
 	ppdu_nbuf = qdf_nbuf_alloc(soc->osdev,
 				   sizeof(struct cdp_rx_indication_ppdu),
 				   0, 0, FALSE);
-	if (ppdu_nbuf) {
+	if (qdf_likely(ppdu_nbuf)) {
 		cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)qdf_nbuf_data(ppdu_nbuf);
 
 		qdf_mem_zero(cdp_rx_ppdu, sizeof(struct cdp_rx_indication_ppdu));
 		dp_rx_mon_populate_cfr_info(pdev, ppdu_info, cdp_rx_ppdu);
 		dp_rx_populate_cdp_indication_ppdu(pdev,
 						   ppdu_info, cdp_rx_ppdu);
-		if (!qdf_nbuf_put_tail(ppdu_nbuf,
-				       sizeof(struct cdp_rx_indication_ppdu)))
+		if (!qdf_unlikely(qdf_nbuf_put_tail(ppdu_nbuf,
+				       sizeof(struct cdp_rx_indication_ppdu))))
 			return;
 
 		dp_rx_stats_update(pdev, cdp_rx_ppdu);
 
-		if (cdp_rx_ppdu->peer_id != HTT_INVALID_PEER) {
+		if (qdf_unlikely(cdp_rx_ppdu->peer_id != HTT_INVALID_PEER)) {
 			dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC,
 					     soc, ppdu_nbuf,
 					     cdp_rx_ppdu->peer_id,
 					     WDI_NO_VAL, pdev->pdev_id);
-		} else if (mon_pdev->mcopy_mode || dp_cfr_rcc_mode_status(pdev)) {
+		} else if (qdf_unlikely(mon_pdev->mcopy_mode || dp_cfr_rcc_mode_status(pdev))) {
 			dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc,
 					     ppdu_nbuf, HTT_INVALID_PEER,
 					     WDI_NO_VAL, pdev->pdev_id);
@@ -1485,7 +1484,7 @@ dp_rx_mcopy_process_ppdu_info(struct dp_pdev *pdev,
 {
 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
 
-	if (!mon_pdev->mcopy_mode)
+	if (qdf_unlikely(!mon_pdev->mcopy_mode))
 		return;
 
 	/* The fcs status is received in MPDU end tlv. If the RX header