diff --git a/dp/wifi3.0/dp_htt.c b/dp/wifi3.0/dp_htt.c index 0bac5176f2..3349c2cbad 100644 --- a/dp/wifi3.0/dp_htt.c +++ b/dp/wifi3.0/dp_htt.c @@ -27,6 +27,9 @@ #include "htt_stats.h" #include "htt_ppdu_stats.h" #include "dp_htt.h" +#ifdef WIFI_MONITOR_SUPPORT +#include +#endif #include "qdf_mem.h" /* qdf_mem_malloc,free */ #include "cdp_txrx_cmn_struct.h" @@ -54,418 +57,6 @@ ((pkt->htc_pkt.Status != QDF_STATUS_E_CANCELED) && \ (pkt->htc_pkt.Status != QDF_STATUS_E_RESOURCES)) -/* - * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv - * bitmap for sniffer mode - * @bitmap: received bitmap - * - * Return: expected bitmap value, returns zero if doesn't match with - * either 64-bit Tx window or 256-bit window tlv bitmap - */ -int -dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap) -{ - if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64)) - return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64; - else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256)) - return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256; - - return 0; -} - -#ifdef FEATURE_PERPKT_INFO -/* - * dp_peer_find_by_id_valid - check if peer exists for given id - * @soc: core DP soc context - * @peer_id: peer id from peer object can be retrieved - * - * Return: true if peer exists of false otherwise - */ - -static -bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id) -{ - struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, - DP_MOD_ID_HTT); - - if (peer) { - /* - * Decrement the peer ref which is taken as part of - * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled - */ - dp_peer_unref_delete(peer, DP_MOD_ID_HTT); - - return true; - } - - return false; -} - -/* - * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats. - * @peer: Datapath peer handle - * @ppdu: User PPDU Descriptor - * @cur_ppdu_id: PPDU_ID - * - * Return: None - * - * on Tx data frame, we may get delayed ba set - * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we - * request Block Ack Request(BAR). Successful msdu is received only after Block - * Ack. To populate peer stats we need successful msdu(data frame). - * So we hold the Tx data stats on delayed_ba for stats update. - */ -static void -dp_peer_copy_delay_stats(struct dp_peer *peer, - struct cdp_tx_completion_ppdu_user *ppdu, - uint32_t cur_ppdu_id) -{ - struct dp_pdev *pdev; - struct dp_vdev *vdev; - - if (peer->last_delayed_ba) { - QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, - "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]", - peer->last_delayed_ba_ppduid, cur_ppdu_id); - vdev = peer->vdev; - if (vdev) { - pdev = vdev->pdev; - pdev->stats.cdp_delayed_ba_not_recev++; - } - } - - peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size; - peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc; - peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re; - peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf; - peer->delayed_ba_ppdu_stats.bw = ppdu->bw; - peer->delayed_ba_ppdu_stats.nss = ppdu->nss; - peer->delayed_ba_ppdu_stats.gi = ppdu->gi; - peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; - peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc; - peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; - peer->delayed_ba_ppdu_stats.mpdu_tried_ucast = ppdu->mpdu_tried_ucast; - peer->delayed_ba_ppdu_stats.mpdu_tried_mcast = ppdu->mpdu_tried_mcast; - peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl; - peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl; - peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; - - peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start; - peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones; - peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast; - - peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos; - peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id; - - peer->last_delayed_ba = true; - - ppdu->debug_copied = true; -} - -/* - * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats. - * @peer: Datapath peer handle - * @ppdu: PPDU Descriptor - * - * Return: None - * - * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info - * from Tx BAR frame not required to populate peer stats. - * But we need successful MPDU and MSDU to update previous - * transmitted Tx data frame. Overwrite ppdu stats with the previous - * stored ppdu stats. - */ -static void -dp_peer_copy_stats_to_bar(struct dp_peer *peer, - struct cdp_tx_completion_ppdu_user *ppdu) -{ - ppdu->ltf_size = peer->delayed_ba_ppdu_stats.ltf_size; - ppdu->stbc = peer->delayed_ba_ppdu_stats.stbc; - ppdu->he_re = peer->delayed_ba_ppdu_stats.he_re; - ppdu->txbf = peer->delayed_ba_ppdu_stats.txbf; - ppdu->bw = peer->delayed_ba_ppdu_stats.bw; - ppdu->nss = peer->delayed_ba_ppdu_stats.nss; - ppdu->gi = peer->delayed_ba_ppdu_stats.gi; - ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm; - ppdu->ldpc = peer->delayed_ba_ppdu_stats.ldpc; - ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm; - ppdu->mpdu_tried_ucast = peer->delayed_ba_ppdu_stats.mpdu_tried_ucast; - ppdu->mpdu_tried_mcast = peer->delayed_ba_ppdu_stats.mpdu_tried_mcast; - ppdu->frame_ctrl = peer->delayed_ba_ppdu_stats.frame_ctrl; - ppdu->qos_ctrl = peer->delayed_ba_ppdu_stats.qos_ctrl; - ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm; - - ppdu->ru_start = peer->delayed_ba_ppdu_stats.ru_start; - ppdu->ru_tones = peer->delayed_ba_ppdu_stats.ru_tones; - ppdu->is_mcast = peer->delayed_ba_ppdu_stats.is_mcast; - - ppdu->user_pos = peer->delayed_ba_ppdu_stats.user_pos; - ppdu->mu_group_id = peer->delayed_ba_ppdu_stats.mu_group_id; - - peer->last_delayed_ba = false; - - ppdu->debug_copied = true; -} - -/* - * dp_tx_rate_stats_update() - Update rate per-peer statistics - * @peer: Datapath peer handle - * @ppdu: PPDU Descriptor - * - * Return: None - */ -static void -dp_tx_rate_stats_update(struct dp_peer *peer, - struct cdp_tx_completion_ppdu_user *ppdu) -{ - uint32_t ratekbps = 0; - uint64_t ppdu_tx_rate = 0; - uint32_t rix; - uint16_t ratecode = 0; - - if (!peer || !ppdu) - return; - - if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) - return; - - ratekbps = dp_getrateindex(ppdu->gi, - ppdu->mcs, - ppdu->nss, - ppdu->preamble, - ppdu->bw, - &rix, - &ratecode); - - DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps); - - if (!ratekbps) - return; - - /* Calculate goodput in non-training period - * In training period, don't do anything as - * pending pkt is send as goodput. - */ - if ((!peer->bss_peer) && (!ppdu->sa_is_training)) { - ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) * - (CDP_PERCENT_MACRO - ppdu->current_rate_per)); - } - ppdu->rix = rix; - ppdu->tx_ratekbps = ratekbps; - ppdu->tx_ratecode = ratecode; - peer->stats.tx.avg_tx_rate = - dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps); - ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate); - DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate); - - if (peer->vdev) { - /* - * In STA mode: - * We get ucast stats as BSS peer stats. - * - * In AP mode: - * We get mcast stats as BSS peer stats. - * We get ucast stats as assoc peer stats. - */ - if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) { - peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps; - peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs; - } else { - peer->vdev->stats.tx.last_tx_rate = ratekbps; - peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs; - } - } -} - -/* - * dp_tx_stats_update() - Update per-peer statistics - * @pdev: Datapath pdev handle - * @peer: Datapath peer handle - * @ppdu: PPDU Descriptor - * @ack_rssi: RSSI of last ack received - * - * Return: None - */ -static void -dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer, - struct cdp_tx_completion_ppdu_user *ppdu, - uint32_t ack_rssi) -{ - uint8_t preamble, mcs; - uint16_t num_msdu; - uint16_t num_mpdu; - uint16_t mpdu_tried; - uint16_t mpdu_failed; - - preamble = ppdu->preamble; - mcs = ppdu->mcs; - num_msdu = ppdu->num_msdu; - num_mpdu = ppdu->mpdu_success; - mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast; - mpdu_failed = mpdu_tried - num_mpdu; - - /* If the peer statistics are already processed as part of - * per-MSDU completion handler, do not process these again in per-PPDU - * indications */ - if (pdev->soc->process_tx_status) - return; - - if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) { - /* - * All failed mpdu will be retried, so incrementing - * retries mpdu based on mpdu failed. Even for - * ack failure i.e for long retries we get - * mpdu failed equal mpdu tried. - */ - DP_STATS_INC(peer, tx.retries, mpdu_failed); - DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus); - return; - } - - if (ppdu->is_ppdu_cookie_valid) - DP_STATS_INC(peer, tx.num_ppdu_cookie_valid, 1); - - if (ppdu->mu_group_id <= MAX_MU_GROUP_ID && - ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) { - if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1)))) - QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, - "mu_group_id out of bound!!\n"); - else - DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id], - (ppdu->user_pos + 1)); - } - - if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA || - ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) { - DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones); - DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start); - switch (ppdu->ru_tones) { - case RU_26: - DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_msdu, - num_msdu); - DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_mpdu, - num_mpdu); - DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].mpdu_tried, - mpdu_tried); - break; - case RU_52: - DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_msdu, - num_msdu); - DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_mpdu, - num_mpdu); - DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].mpdu_tried, - mpdu_tried); - break; - case RU_106: - DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_msdu, - num_msdu); - DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_mpdu, - num_mpdu); - DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].mpdu_tried, - mpdu_tried); - break; - case RU_242: - DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_msdu, - num_msdu); - DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_mpdu, - num_mpdu); - DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].mpdu_tried, - mpdu_tried); - break; - case RU_484: - DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_msdu, - num_msdu); - DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_mpdu, - num_mpdu); - DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].mpdu_tried, - mpdu_tried); - break; - case RU_996: - DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_msdu, - num_msdu); - DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_mpdu, - num_mpdu); - DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].mpdu_tried, - mpdu_tried); - break; - } - } - - /* - * All failed mpdu will be retried, so incrementing - * retries mpdu based on mpdu failed. Even for - * ack failure i.e for long retries we get - * mpdu failed equal mpdu tried. - */ - DP_STATS_INC(peer, tx.retries, mpdu_failed); - DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus); - - DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_msdu, - num_msdu); - DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu, - num_mpdu); - DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried, - mpdu_tried); - - DP_STATS_INC_PKT(peer, tx.comp_pkt, - num_msdu, (ppdu->success_bytes + - ppdu->retry_bytes + ppdu->failed_bytes)); - DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate); - DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu); - DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu); - DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu); - if (ppdu->tid < CDP_DATA_TID_MAX) - DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], - num_msdu); - DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc); - DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc); - if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid) - DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi); - - DP_STATS_INCC(peer, - tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, - ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A))); - DP_STATS_INCC(peer, - tx.pkt_type[preamble].mcs_count[mcs], num_msdu, - ((mcs < MAX_MCS_11A) && (preamble == DOT11_A))); - DP_STATS_INCC(peer, - tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, - ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B))); - DP_STATS_INCC(peer, - tx.pkt_type[preamble].mcs_count[mcs], num_msdu, - ((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B))); - DP_STATS_INCC(peer, - tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, - ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N))); - DP_STATS_INCC(peer, - tx.pkt_type[preamble].mcs_count[mcs], num_msdu, - ((mcs < MAX_MCS_11A) && (preamble == DOT11_N))); - DP_STATS_INCC(peer, - tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, - ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC))); - DP_STATS_INCC(peer, - tx.pkt_type[preamble].mcs_count[mcs], num_msdu, - ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC))); - DP_STATS_INCC(peer, - tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, - ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX))); - DP_STATS_INCC(peer, - tx.pkt_type[preamble].mcs_count[mcs], num_msdu, - ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX))); - DP_STATS_INCC(peer, tx.ampdu_cnt, num_msdu, ppdu->is_ampdu); - DP_STATS_INCC(peer, tx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu)); - DP_STATS_INCC(peer, tx.pream_punct_cnt, 1, ppdu->pream_punct); - - dp_peer_stats_notify(pdev, peer); - -#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE - dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, - &peer->stats, ppdu->peer_id, - UPDATE_PEER_STATS, pdev->pdev_id); -#endif -} -#endif - QDF_STATUS dp_rx_populate_cbf_hdr(struct dp_soc *soc, uint32_t mac_id, uint32_t event, @@ -531,18 +122,6 @@ QDF_STATUS dp_rx_populate_cbf_hdr(struct dp_soc *soc, return QDF_STATUS_SUCCESS; } -#ifdef WLAN_TX_PKT_CAPTURE_ENH -#include "dp_tx_capture.h" -#else -static inline void -dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev, - void *data, - uint32_t ppdu_id, - uint32_t size) -{ -} -#endif - /* * htt_htc_pkt_alloc() - Allocate HTC packet buffer * @htt_soc: HTT SOC handle @@ -1190,6 +769,8 @@ fail0: return QDF_STATUS_E_FAILURE; } +qdf_export_symbol(htt_srng_setup); + #ifdef QCA_SUPPORT_FULL_MON /** * htt_h2t_full_mon_cfg() - Send full monitor configuarion msg to FW @@ -2346,1916 +1927,6 @@ void htt_t2h_stats_handler(void *context) dp_process_htt_stat_msg(&htt_stats, soc); } -/* - * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU, - * if a new peer id arrives in a PPDU - * pdev: DP pdev handle - * @peer_id : peer unique identifier - * @ppdu_info: per ppdu tlv structure - * - * return:user index to be populated - */ -#ifdef FEATURE_PERPKT_INFO -static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev, - uint16_t peer_id, - struct ppdu_info *ppdu_info) -{ - uint8_t user_index = 0; - struct cdp_tx_completion_ppdu *ppdu_desc; - struct cdp_tx_completion_ppdu_user *ppdu_user_desc; - - ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); - - while ((user_index + 1) <= ppdu_info->last_user) { - ppdu_user_desc = &ppdu_desc->user[user_index]; - if (ppdu_user_desc->peer_id != peer_id) { - user_index++; - continue; - } else { - /* Max users possible is 8 so user array index should - * not exceed 7 - */ - qdf_assert_always(user_index <= (ppdu_desc->max_users - 1)); - return user_index; - } - } - - ppdu_info->last_user++; - /* Max users possible is 8 so last user should not exceed 8 */ - qdf_assert_always(ppdu_info->last_user <= ppdu_desc->max_users); - return ppdu_info->last_user - 1; -} - -/* - * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv - * pdev: DP pdev handle - * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv - * @ppdu_info: per ppdu tlv structure - * - * return:void - */ -static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev, - uint32_t *tag_buf, struct ppdu_info *ppdu_info) -{ - uint16_t frame_type; - uint16_t frame_ctrl; - uint16_t freq; - struct dp_soc *soc = NULL; - struct cdp_tx_completion_ppdu *ppdu_desc = NULL; - uint64_t ppdu_start_timestamp; - uint32_t *start_tag_buf; - - start_tag_buf = tag_buf; - ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); - - ppdu_desc->ppdu_id = ppdu_info->ppdu_id; - - tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID); - ppdu_info->sched_cmdid = - HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf); - ppdu_desc->num_users = - HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf); - - qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users); - - tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE); - frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf); - ppdu_desc->htt_frame_type = frame_type; - - frame_ctrl = ppdu_desc->frame_ctrl; - - ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id; - - switch (frame_type) { - case HTT_STATS_FTYPE_TIDQ_DATA_SU: - case HTT_STATS_FTYPE_TIDQ_DATA_MU: - case HTT_STATS_FTYPE_SGEN_QOS_NULL: - /* - * for management packet, frame type come as DATA_SU - * need to check frame_ctrl before setting frame_type - */ - if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL) - ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL; - else - ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA; - break; - case HTT_STATS_FTYPE_SGEN_MU_BAR: - case HTT_STATS_FTYPE_SGEN_BAR: - ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR; - break; - default: - ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL; - break; - } - - tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US); - ppdu_desc->tx_duration = *tag_buf; - - tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US); - ppdu_desc->ppdu_start_timestamp = *tag_buf; - - tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE); - freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf); - if (freq != ppdu_desc->channel) { - soc = pdev->soc; - ppdu_desc->channel = freq; - pdev->operating_channel.freq = freq; - if (soc && soc->cdp_soc.ol_ops->freq_to_channel) - pdev->operating_channel.num = - soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc, - pdev->pdev_id, - freq); - - if (soc && soc->cdp_soc.ol_ops->freq_to_band) - pdev->operating_channel.band = - soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc, - pdev->pdev_id, - freq); - } - - ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf); - - tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM); - ppdu_desc->phy_ppdu_tx_time_us = - HTT_PPDU_STATS_COMMON_TLV_PHY_PPDU_TX_TIME_US_GET(*tag_buf); - ppdu_desc->beam_change = - HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf); - ppdu_desc->doppler = - HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf); - ppdu_desc->spatial_reuse = - HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf); - - dp_tx_capture_htt_frame_counter(pdev, frame_type); - - tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US); - ppdu_start_timestamp = *tag_buf; - ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp << - HTT_SHIFT_UPPER_TIMESTAMP) & - HTT_MASK_UPPER_TIMESTAMP); - - ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp + - ppdu_desc->tx_duration; - /* Ack time stamp is same as end time stamp*/ - ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp; - - ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp + - ppdu_desc->tx_duration; - - ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp; - ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp; - ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration; - - /* Ack time stamp is same as end time stamp*/ - ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp; - - tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(BSSCOLOR_OBSS_PSR); - ppdu_desc->bss_color = - HTT_PPDU_STATS_COMMON_TLV_BSS_COLOR_ID_GET(*tag_buf); -} - -/* - * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common - * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv - * @ppdu_info: per ppdu tlv structure - * - * return:void - */ -static void dp_process_ppdu_stats_user_common_tlv( - struct dp_pdev *pdev, uint32_t *tag_buf, - struct ppdu_info *ppdu_info) -{ - uint16_t peer_id; - struct cdp_tx_completion_ppdu *ppdu_desc; - struct cdp_tx_completion_ppdu_user *ppdu_user_desc; - uint8_t curr_user_index = 0; - struct dp_peer *peer; - struct dp_vdev *vdev; - uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); - - ppdu_desc = - (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); - - tag_buf++; - peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf); - - curr_user_index = - dp_get_ppdu_info_user_index(pdev, - peer_id, ppdu_info); - ppdu_user_desc = &ppdu_desc->user[curr_user_index]; - ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); - - ppdu_desc->vdev_id = - HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf); - - ppdu_user_desc->peer_id = peer_id; - - tag_buf++; - - if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) { - ppdu_user_desc->delayed_ba = 1; - ppdu_desc->delayed_ba = 1; - } - - if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) { - ppdu_user_desc->is_mcast = true; - ppdu_user_desc->mpdu_tried_mcast = - HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf); - ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast; - } else { - ppdu_user_desc->mpdu_tried_ucast = - HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf); - } - - ppdu_user_desc->is_seq_num_valid = - HTT_PPDU_STATS_USER_COMMON_TLV_IS_SQNUM_VALID_IN_BUFFER_GET(*tag_buf); - tag_buf++; - - ppdu_user_desc->qos_ctrl = - HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf); - ppdu_user_desc->frame_ctrl = - HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf); - ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl; - - if (ppdu_user_desc->delayed_ba) - ppdu_user_desc->mpdu_success = 0; - - tag_buf += 3; - - if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) { - ppdu_user_desc->ppdu_cookie = - HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf); - ppdu_user_desc->is_ppdu_cookie_valid = 1; - } - - /* returning earlier causes other feilds unpopulated */ - if (peer_id == DP_SCAN_PEER_ID) { - vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id, - DP_MOD_ID_TX_PPDU_STATS); - if (!vdev) - return; - qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw, - QDF_MAC_ADDR_SIZE); - dp_vdev_unref_delete(pdev->soc, vdev, DP_MOD_ID_TX_PPDU_STATS); - } else { - peer = dp_peer_get_ref_by_id(pdev->soc, peer_id, - DP_MOD_ID_TX_PPDU_STATS); - if (!peer) { - /* - * fw sends peer_id which is about to removed but - * it was already removed in host. - * eg: for disassoc, fw send ppdu stats - * with peer id equal to previously associated - * peer's peer_id but it was removed - */ - vdev = dp_vdev_get_ref_by_id(pdev->soc, - ppdu_desc->vdev_id, - DP_MOD_ID_TX_PPDU_STATS); - if (!vdev) - return; - qdf_mem_copy(ppdu_user_desc->mac_addr, - vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE); - dp_vdev_unref_delete(pdev->soc, vdev, - DP_MOD_ID_TX_PPDU_STATS); - return; - } - qdf_mem_copy(ppdu_user_desc->mac_addr, - peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); - dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); - } -} - - -/** - * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv - * @pdev: DP pdev handle - * @tag_buf: T2H message buffer carrying the user rate TLV - * @ppdu_info: per ppdu tlv structure - * - * return:void - */ -static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev, - uint32_t *tag_buf, - struct ppdu_info *ppdu_info) -{ - uint16_t peer_id; - struct cdp_tx_completion_ppdu *ppdu_desc; - struct cdp_tx_completion_ppdu_user *ppdu_user_desc; - uint8_t curr_user_index = 0; - struct dp_vdev *vdev; - uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); - - ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); - - tag_buf++; - peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf); - - curr_user_index = - dp_get_ppdu_info_user_index(pdev, - peer_id, ppdu_info); - ppdu_user_desc = &ppdu_desc->user[curr_user_index]; - ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); - if (peer_id == DP_SCAN_PEER_ID) { - vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id, - DP_MOD_ID_TX_PPDU_STATS); - if (!vdev) - return; - dp_vdev_unref_delete(pdev->soc, vdev, - DP_MOD_ID_TX_PPDU_STATS); - } - ppdu_user_desc->peer_id = peer_id; - - ppdu_user_desc->tid = - HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf); - - tag_buf += 1; - - ppdu_user_desc->user_pos = - HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf); - ppdu_user_desc->mu_group_id = - HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf); - - tag_buf += 1; - - ppdu_user_desc->ru_start = - HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf); - ppdu_user_desc->ru_tones = - (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) - - HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1; - ppdu_desc->usr_ru_tones_sum += ppdu_user_desc->ru_tones; - - tag_buf += 2; - - ppdu_user_desc->ppdu_type = - HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf); - - tag_buf++; - ppdu_user_desc->tx_rate = *tag_buf; - - ppdu_user_desc->ltf_size = - HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf); - ppdu_user_desc->stbc = - HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf); - ppdu_user_desc->he_re = - HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf); - ppdu_user_desc->txbf = - HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf); - ppdu_user_desc->bw = - HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2; - ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf); - ppdu_desc->usr_nss_sum += ppdu_user_desc->nss; - ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf); - ppdu_user_desc->preamble = - HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf); - ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf); - ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf); - ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf); -} - -/* - * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process - * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv - * pdev: DP PDEV handle - * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv - * @ppdu_info: per ppdu tlv structure - * - * return:void - */ -static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv( - struct dp_pdev *pdev, uint32_t *tag_buf, - struct ppdu_info *ppdu_info) -{ - htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf = - (htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf; - - struct cdp_tx_completion_ppdu *ppdu_desc; - struct cdp_tx_completion_ppdu_user *ppdu_user_desc; - uint8_t curr_user_index = 0; - uint16_t peer_id; - uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS; - uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); - - ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); - - tag_buf++; - - peer_id = - HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); - - curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); - ppdu_user_desc = &ppdu_desc->user[curr_user_index]; - ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); - ppdu_user_desc->peer_id = peer_id; - - ppdu_user_desc->start_seq = dp_stats_buf->start_seq; - qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap, - sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS); - - dp_process_ppdu_stats_update_failed_bitmap(pdev, - (void *)ppdu_user_desc, - ppdu_info->ppdu_id, - size); -} - -/* - * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process - * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv - * soc: DP SOC handle - * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv - * @ppdu_info: per ppdu tlv structure - * - * return:void - */ -static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv( - struct dp_pdev *pdev, uint32_t *tag_buf, - struct ppdu_info *ppdu_info) -{ - htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf = - (htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf; - - struct cdp_tx_completion_ppdu *ppdu_desc; - struct cdp_tx_completion_ppdu_user *ppdu_user_desc; - uint8_t curr_user_index = 0; - uint16_t peer_id; - uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS; - uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); - - ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); - - tag_buf++; - - peer_id = - HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); - - curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); - ppdu_user_desc = &ppdu_desc->user[curr_user_index]; - ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); - ppdu_user_desc->peer_id = peer_id; - - ppdu_user_desc->start_seq = dp_stats_buf->start_seq; - qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap, - sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS); - - dp_process_ppdu_stats_update_failed_bitmap(pdev, - (void *)ppdu_user_desc, - ppdu_info->ppdu_id, - size); -} - -/* - * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process - * htt_ppdu_stats_user_cmpltn_common_tlv - * soc: DP SOC handle - * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv - * @ppdu_info: per ppdu tlv structure - * - * return:void - */ -static void dp_process_ppdu_stats_user_cmpltn_common_tlv( - struct dp_pdev *pdev, uint32_t *tag_buf, - struct ppdu_info *ppdu_info) -{ - uint16_t peer_id; - struct cdp_tx_completion_ppdu *ppdu_desc; - struct cdp_tx_completion_ppdu_user *ppdu_user_desc; - uint8_t curr_user_index = 0; - uint8_t bw_iter; - htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf = - (htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf; - uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); - - ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); - - tag_buf++; - peer_id = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf); - - curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); - ppdu_user_desc = &ppdu_desc->user[curr_user_index]; - ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); - ppdu_user_desc->peer_id = peer_id; - - ppdu_user_desc->completion_status = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET( - *tag_buf); - - ppdu_user_desc->tid = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf); - - - tag_buf++; - if (qdf_likely(ppdu_user_desc->completion_status == - HTT_PPDU_STATS_USER_STATUS_OK)) { - ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi; - ppdu_user_desc->usr_ack_rssi = dp_stats_buf->ack_rssi; - ppdu_user_desc->ack_rssi_valid = 1; - } else { - ppdu_user_desc->ack_rssi_valid = 0; - } - - tag_buf++; - - ppdu_user_desc->mpdu_success = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf); - - ppdu_user_desc->mpdu_failed = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) - - ppdu_user_desc->mpdu_success; - - tag_buf++; - - ppdu_user_desc->long_retries = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf); - - ppdu_user_desc->short_retries = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf); - ppdu_user_desc->retry_msdus = - ppdu_user_desc->long_retries + ppdu_user_desc->short_retries; - - ppdu_user_desc->is_ampdu = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf); - ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu; - - ppdu_desc->resp_type = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf); - ppdu_desc->mprot_type = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf); - ppdu_desc->rts_success = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf); - ppdu_desc->rts_failure = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf); - ppdu_user_desc->pream_punct = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PREAM_PUNC_TX_GET(*tag_buf); - - ppdu_info->compltn_common_tlv++; - - /* - * MU BAR may send request to n users but we may received ack only from - * m users. To have count of number of users respond back, we have a - * separate counter bar_num_users per PPDU that get increment for every - * htt_ppdu_stats_user_cmpltn_common_tlv - */ - ppdu_desc->bar_num_users++; - - tag_buf++; - for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) { - ppdu_user_desc->rssi_chain[bw_iter] = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf); - tag_buf++; - } - - ppdu_user_desc->sa_tx_antenna = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf); - - tag_buf++; - ppdu_user_desc->sa_is_training = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf); - if (ppdu_user_desc->sa_is_training) { - ppdu_user_desc->sa_goodput = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf); - } - - tag_buf++; - for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) { - ppdu_user_desc->sa_max_rates[bw_iter] = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]); - } - - tag_buf += CDP_NUM_SA_BW; - ppdu_user_desc->current_rate_per = - HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf); -} - -/* - * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process - * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv - * pdev: DP PDEV handle - * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv - * @ppdu_info: per ppdu tlv structure - * - * return:void - */ -static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv( - struct dp_pdev *pdev, uint32_t *tag_buf, - struct ppdu_info *ppdu_info) -{ - htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf = - (htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf; - struct cdp_tx_completion_ppdu_user *ppdu_user_desc; - struct cdp_tx_completion_ppdu *ppdu_desc; - uint8_t curr_user_index = 0; - uint16_t peer_id; - uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); - - ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); - - tag_buf++; - - peer_id = - HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); - - curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); - ppdu_user_desc = &ppdu_desc->user[curr_user_index]; - ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); - ppdu_user_desc->peer_id = peer_id; - - ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no; - qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap, - sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS); - ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32; -} - -/* - * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process - * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv - * pdev: DP PDEV handle - * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv - * @ppdu_info: per ppdu tlv structure - * - * return:void - */ -static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv( - struct dp_pdev *pdev, uint32_t *tag_buf, - struct ppdu_info *ppdu_info) -{ - htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf = - (htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf; - struct cdp_tx_completion_ppdu_user *ppdu_user_desc; - struct cdp_tx_completion_ppdu *ppdu_desc; - uint8_t curr_user_index = 0; - uint16_t peer_id; - uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); - - ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); - - tag_buf++; - - peer_id = - HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); - - curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); - ppdu_user_desc = &ppdu_desc->user[curr_user_index]; - ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); - ppdu_user_desc->peer_id = peer_id; - - ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no; - qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap, - sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS); - ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32; -} - -/* - * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process - * htt_ppdu_stats_user_compltn_ack_ba_status_tlv - * pdev: DP PDE handle - * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv - * @ppdu_info: per ppdu tlv structure - * - * return:void - */ -static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv( - struct dp_pdev *pdev, uint32_t *tag_buf, - struct ppdu_info *ppdu_info) -{ - uint16_t peer_id; - struct cdp_tx_completion_ppdu *ppdu_desc; - struct cdp_tx_completion_ppdu_user *ppdu_user_desc; - uint8_t curr_user_index = 0; - uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); - - ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); - - tag_buf += 2; - peer_id = - HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf); - - curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); - ppdu_user_desc = &ppdu_desc->user[curr_user_index]; - ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); - if (!ppdu_user_desc->ack_ba_tlv) { - ppdu_user_desc->ack_ba_tlv = 1; - } else { - pdev->stats.ack_ba_comes_twice++; - return; - } - - ppdu_user_desc->peer_id = peer_id; - - tag_buf++; - /* not to update ppdu_desc->tid from this TLV */ - ppdu_user_desc->num_mpdu = - HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf); - - ppdu_user_desc->num_msdu = - HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf); - - ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu; - - tag_buf++; - ppdu_user_desc->start_seq = - HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET( - *tag_buf); - - tag_buf++; - ppdu_user_desc->success_bytes = *tag_buf; - - /* increase ack ba tlv counter on successful mpdu */ - if (ppdu_user_desc->num_mpdu) - ppdu_info->ack_ba_tlv++; - - if (ppdu_user_desc->ba_size == 0) { - ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq; - ppdu_user_desc->ba_bitmap[0] = 1; - ppdu_user_desc->ba_size = 1; - } -} - -/* - * dp_process_ppdu_stats_user_common_array_tlv: Process - * htt_ppdu_stats_user_common_array_tlv - * pdev: DP PDEV handle - * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv - * @ppdu_info: per ppdu tlv structure - * - * return:void - */ -static void dp_process_ppdu_stats_user_common_array_tlv( - struct dp_pdev *pdev, uint32_t *tag_buf, - struct ppdu_info *ppdu_info) -{ - uint32_t peer_id; - struct cdp_tx_completion_ppdu *ppdu_desc; - struct cdp_tx_completion_ppdu_user *ppdu_user_desc; - uint8_t curr_user_index = 0; - struct htt_tx_ppdu_stats_info *dp_stats_buf; - uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); - - ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); - - tag_buf++; - dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf; - tag_buf += 3; - peer_id = - HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf); - - if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) { - QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, - "Invalid peer"); - return; - } - - curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); - - ppdu_user_desc = &ppdu_desc->user[curr_user_index]; - ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); - - ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes; - ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes; - - tag_buf++; - - ppdu_user_desc->success_msdus = - HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf); - ppdu_user_desc->retry_bytes = - HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf); - tag_buf++; - ppdu_user_desc->failed_msdus = - HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf); -} - -/* - * dp_process_ppdu_stats_flush_tlv: Process - * htt_ppdu_stats_flush_tlv - * @pdev: DP PDEV handle - * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv - * @ppdu_info: per ppdu tlv structure - * - * return:void - */ -static void -dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev, - uint32_t *tag_buf, - struct ppdu_info *ppdu_info) -{ - struct cdp_tx_completion_ppdu *ppdu_desc; - uint32_t peer_id; - uint8_t tid; - struct dp_peer *peer; - - ppdu_desc = (struct cdp_tx_completion_ppdu *) - qdf_nbuf_data(ppdu_info->nbuf); - ppdu_desc->is_flush = 1; - - tag_buf++; - ppdu_desc->drop_reason = *tag_buf; - - tag_buf++; - ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf); - ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf); - ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf); - - tag_buf++; - peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf); - tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf); - - ppdu_desc->num_users = 1; - ppdu_desc->user[0].peer_id = peer_id; - ppdu_desc->user[0].tid = tid; - - ppdu_desc->queue_type = - HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf); - - peer = dp_peer_get_ref_by_id(pdev->soc, peer_id, - DP_MOD_ID_TX_PPDU_STATS); - if (!peer) - goto add_ppdu_to_sched_list; - - if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) { - DP_STATS_INC(peer, - tx.excess_retries_per_ac[TID_TO_WME_AC(tid)], - ppdu_desc->num_msdu); - } - - dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); - -add_ppdu_to_sched_list: - ppdu_info->done = 1; - TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem); - pdev->list_depth--; - TAILQ_INSERT_TAIL(&pdev->sched_comp_ppdu_list, ppdu_info, - ppdu_info_list_elem); - pdev->sched_comp_list_depth++; -} - -/** - * dp_process_ppdu_stats_sch_cmd_status_tlv: Process schedule command status tlv - * Here we are not going to process the buffer. - * @pdev: DP PDEV handle - * @ppdu_info: per ppdu tlv structure - * - * return:void - */ -static void -dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev, - struct ppdu_info *ppdu_info) -{ - struct cdp_tx_completion_ppdu *ppdu_desc; - struct dp_peer *peer; - uint8_t num_users; - uint8_t i; - - ppdu_desc = (struct cdp_tx_completion_ppdu *) - qdf_nbuf_data(ppdu_info->nbuf); - - num_users = ppdu_desc->bar_num_users; - - for (i = 0; i < num_users; i++) { - if (ppdu_desc->user[i].user_pos == 0) { - if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { - /* update phy mode for bar frame */ - ppdu_desc->phy_mode = - ppdu_desc->user[i].preamble; - ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs; - break; - } - if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) { - ppdu_desc->frame_ctrl = - ppdu_desc->user[i].frame_ctrl; - break; - } - } - } - - if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA && - ppdu_desc->delayed_ba) { - qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users); - - for (i = 0; i < ppdu_desc->num_users; i++) { - struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu; - uint64_t start_tsf; - uint64_t end_tsf; - uint32_t ppdu_id; - - ppdu_id = ppdu_desc->ppdu_id; - peer = dp_peer_get_ref_by_id - (pdev->soc, ppdu_desc->user[i].peer_id, - DP_MOD_ID_TX_PPDU_STATS); - /** - * This check is to make sure peer is not deleted - * after processing the TLVs. - */ - if (!peer) - continue; - - delay_ppdu = &peer->delayed_ba_ppdu_stats; - start_tsf = ppdu_desc->ppdu_start_timestamp; - end_tsf = ppdu_desc->ppdu_end_timestamp; - /** - * save delayed ba user info - */ - if (ppdu_desc->user[i].delayed_ba) { - dp_peer_copy_delay_stats(peer, - &ppdu_desc->user[i], - ppdu_id); - peer->last_delayed_ba_ppduid = ppdu_id; - delay_ppdu->ppdu_start_timestamp = start_tsf; - delay_ppdu->ppdu_end_timestamp = end_tsf; - } - ppdu_desc->user[i].peer_last_delayed_ba = - peer->last_delayed_ba; - - dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); - - if (ppdu_desc->user[i].delayed_ba && - !ppdu_desc->user[i].debug_copied) { - QDF_TRACE(QDF_MODULE_ID_TXRX, - QDF_TRACE_LEVEL_INFO_MED, - "%s: %d ppdu_id[%d] bar_ppdu_id[%d] num_users[%d] usr[%d] htt_frame_type[%d]\n", - __func__, __LINE__, - ppdu_desc->ppdu_id, - ppdu_desc->bar_ppdu_id, - ppdu_desc->num_users, - i, - ppdu_desc->htt_frame_type); - } - } - } - - /* - * when frame type is BAR and STATS_COMMON_TLV is set - * copy the store peer delayed info to BAR status - */ - if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { - for (i = 0; i < ppdu_desc->bar_num_users; i++) { - struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu; - uint64_t start_tsf; - uint64_t end_tsf; - - peer = dp_peer_get_ref_by_id - (pdev->soc, - ppdu_desc->user[i].peer_id, - DP_MOD_ID_TX_PPDU_STATS); - /** - * This check is to make sure peer is not deleted - * after processing the TLVs. - */ - if (!peer) - continue; - - if (ppdu_desc->user[i].completion_status != - HTT_PPDU_STATS_USER_STATUS_OK) { - dp_peer_unref_delete(peer, - DP_MOD_ID_TX_PPDU_STATS); - continue; - } - - delay_ppdu = &peer->delayed_ba_ppdu_stats; - start_tsf = delay_ppdu->ppdu_start_timestamp; - end_tsf = delay_ppdu->ppdu_end_timestamp; - - if (peer->last_delayed_ba) { - dp_peer_copy_stats_to_bar(peer, - &ppdu_desc->user[i]); - ppdu_desc->ppdu_id = - peer->last_delayed_ba_ppduid; - ppdu_desc->ppdu_start_timestamp = start_tsf; - ppdu_desc->ppdu_end_timestamp = end_tsf; - } - ppdu_desc->user[i].peer_last_delayed_ba = - peer->last_delayed_ba; - dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); - } - } - - TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem); - pdev->list_depth--; - TAILQ_INSERT_TAIL(&pdev->sched_comp_ppdu_list, ppdu_info, - ppdu_info_list_elem); - pdev->sched_comp_list_depth++; -} - -#ifndef WLAN_TX_PKT_CAPTURE_ENH -/* - * dp_deliver_mgmt_frm: Process - * @pdev: DP PDEV handle - * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv - * - * return: void - */ -void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf) -{ - if (pdev->tx_sniffer_enable || pdev->mcopy_mode) { - dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc, - nbuf, HTT_INVALID_PEER, - WDI_NO_VAL, pdev->pdev_id); - } else { - if (!pdev->bpr_enable) - qdf_nbuf_free(nbuf); - } -} -#endif - -/* - * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process - * htt_ppdu_stats_tx_mgmtctrl_payload_tlv - * @pdev: DP PDEV handle - * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv - * @length: tlv_length - * - * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller - */ -static QDF_STATUS -dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev, - qdf_nbuf_t tag_buf, - uint32_t ppdu_id) -{ - uint32_t *nbuf_ptr; - uint8_t trim_size; - size_t head_size; - struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info; - uint32_t *msg_word; - uint32_t tsf_hdr; - - if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) && - (!pdev->bpr_enable) && (!pdev->tx_capture_enabled)) - return QDF_STATUS_SUCCESS; - - /* - * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t - */ - msg_word = (uint32_t *)qdf_nbuf_data(tag_buf); - msg_word = msg_word + 2; - tsf_hdr = *msg_word; - - trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf + - HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) - - qdf_nbuf_data(tag_buf)); - - if (!qdf_nbuf_pull_head(tag_buf, trim_size)) - return QDF_STATUS_SUCCESS; - - qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) - - pdev->mgmtctrl_frm_info.mgmt_buf_len); - - if (pdev->tx_capture_enabled) { - head_size = sizeof(struct cdp_tx_mgmt_comp_info); - if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) { - qdf_err("Fail to get headroom h_sz %zu h_avail %d\n", - head_size, qdf_nbuf_headroom(tag_buf)); - qdf_assert_always(0); - return QDF_STATUS_E_NOMEM; - } - ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *) - qdf_nbuf_push_head(tag_buf, head_size); - qdf_assert_always(ptr_mgmt_comp_info); - ptr_mgmt_comp_info->ppdu_id = ppdu_id; - ptr_mgmt_comp_info->is_sgen_pkt = true; - ptr_mgmt_comp_info->tx_tsf = tsf_hdr; - } else { - head_size = sizeof(ppdu_id); - nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size); - *nbuf_ptr = ppdu_id; - } - - if (pdev->bpr_enable) { - dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc, - tag_buf, HTT_INVALID_PEER, - WDI_NO_VAL, pdev->pdev_id); - } - - dp_deliver_mgmt_frm(pdev, tag_buf); - - return QDF_STATUS_E_ALREADY; -} - -/** - * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU - * - * If the TLV length sent as part of PPDU TLV is less that expected size i.e - * size of corresponding data structure, pad the remaining bytes with zeros - * and continue processing the TLVs - * - * @pdev: DP pdev handle - * @tag_buf: TLV buffer - * @tlv_expected_size: Expected size of Tag - * @tlv_len: TLV length received from FW - * - * Return: Pointer to updated TLV - */ -static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev, - uint32_t *tag_buf, - uint16_t tlv_expected_size, - uint16_t tlv_len) -{ - uint32_t *tlv_desc = tag_buf; - - qdf_assert_always(tlv_len != 0); - - if (tlv_len < tlv_expected_size) { - qdf_mem_zero(pdev->ppdu_tlv_buf, tlv_expected_size); - qdf_mem_copy(pdev->ppdu_tlv_buf, tag_buf, tlv_len); - tlv_desc = pdev->ppdu_tlv_buf; - } - - return tlv_desc; -} - -/** - * dp_process_ppdu_tag(): Function to process the PPDU TLVs - * @pdev: DP pdev handle - * @tag_buf: TLV buffer - * @tlv_len: length of tlv - * @ppdu_info: per ppdu tlv structure - * - * return: void - */ -static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf, - uint32_t tlv_len, struct ppdu_info *ppdu_info) -{ - uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); - uint16_t tlv_expected_size; - uint32_t *tlv_desc; - - switch (tlv_type) { - case HTT_PPDU_STATS_COMMON_TLV: - tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv); - tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, - tlv_expected_size, tlv_len); - dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info); - break; - case HTT_PPDU_STATS_USR_COMMON_TLV: - tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv); - tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, - tlv_expected_size, tlv_len); - dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc, - ppdu_info); - break; - case HTT_PPDU_STATS_USR_RATE_TLV: - tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv); - tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, - tlv_expected_size, tlv_len); - dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc, - ppdu_info); - break; - case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV: - tlv_expected_size = - sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv); - tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, - tlv_expected_size, tlv_len); - dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv( - pdev, tlv_desc, ppdu_info); - break; - case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV: - tlv_expected_size = - sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv); - tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, - tlv_expected_size, tlv_len); - dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv( - pdev, tlv_desc, ppdu_info); - break; - case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV: - tlv_expected_size = - sizeof(htt_ppdu_stats_user_cmpltn_common_tlv); - tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, - tlv_expected_size, tlv_len); - dp_process_ppdu_stats_user_cmpltn_common_tlv( - pdev, tlv_desc, ppdu_info); - break; - case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV: - tlv_expected_size = - sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv); - tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, - tlv_expected_size, tlv_len); - dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv( - pdev, tlv_desc, ppdu_info); - break; - case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV: - tlv_expected_size = - sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv); - tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, - tlv_expected_size, tlv_len); - dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv( - pdev, tlv_desc, ppdu_info); - break; - case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV: - tlv_expected_size = - sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv); - tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, - tlv_expected_size, tlv_len); - dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv( - pdev, tlv_desc, ppdu_info); - break; - case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV: - tlv_expected_size = - sizeof(htt_ppdu_stats_usr_common_array_tlv_v); - tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, - tlv_expected_size, tlv_len); - dp_process_ppdu_stats_user_common_array_tlv( - pdev, tlv_desc, ppdu_info); - break; - case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV: - tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv); - tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, - tlv_expected_size, tlv_len); - dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc, - ppdu_info); - break; - case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV: - dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info); - break; - default: - break; - } -} - -#ifdef WLAN_ATF_ENABLE -static void -dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev, - struct cdp_tx_completion_ppdu *ppdu_desc, - struct cdp_tx_completion_ppdu_user *user) -{ - uint32_t nss_ru_width_sum = 0; - - if (!pdev || !ppdu_desc || !user) - return; - - if (!pdev->dp_atf_stats_enable) - return; - - if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA) - return; - - nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum; - if (!nss_ru_width_sum) - nss_ru_width_sum = 1; - - /** - * For SU-MIMO PPDU phy Tx time is same for the single user. - * For MU-MIMO phy Tx time is calculated per user as below - * user phy tx time = - * Entire PPDU duration * MU Ratio * OFDMA Ratio - * MU Ratio = usr_nss / Sum_of_nss_of_all_users - * OFDMA_ratio = usr_ru_width / Sum_of_ru_width_of_all_users - * usr_ru_widt = ru_end – ru_start + 1 - */ - if (ppdu_desc->htt_frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) { - user->phy_tx_time_us = ppdu_desc->phy_ppdu_tx_time_us; - } else { - user->phy_tx_time_us = (ppdu_desc->phy_ppdu_tx_time_us * - user->nss * user->ru_tones) / nss_ru_width_sum; - } -} -#else -static void -dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev, - struct cdp_tx_completion_ppdu *ppdu_desc, - struct cdp_tx_completion_ppdu_user *user) -{ -} -#endif - -/** - * dp_ppdu_desc_user_stats_update(): Function to update TX user stats - * @pdev: DP pdev handle - * @ppdu_info: per PPDU TLV descriptor - * - * return: void - */ -void -dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev, - struct ppdu_info *ppdu_info) -{ - struct cdp_tx_completion_ppdu *ppdu_desc = NULL; - struct dp_peer *peer = NULL; - uint32_t tlv_bitmap_expected; - uint32_t tlv_bitmap_default; - uint16_t i; - uint32_t num_users; - - ppdu_desc = (struct cdp_tx_completion_ppdu *) - qdf_nbuf_data(ppdu_info->nbuf); - - if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR) - ppdu_desc->ppdu_id = ppdu_info->ppdu_id; - - tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP; - if (pdev->tx_sniffer_enable || pdev->mcopy_mode || - pdev->tx_capture_enabled) { - if (ppdu_info->is_ampdu) - tlv_bitmap_expected = - dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap( - ppdu_info->tlv_bitmap); - } - - tlv_bitmap_default = tlv_bitmap_expected; - - if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { - num_users = ppdu_desc->bar_num_users; - ppdu_desc->num_users = ppdu_desc->bar_num_users; - } else { - num_users = ppdu_desc->num_users; - } - qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users); - - for (i = 0; i < num_users; i++) { - ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu; - ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu; - - peer = dp_peer_get_ref_by_id(pdev->soc, - ppdu_desc->user[i].peer_id, - DP_MOD_ID_TX_PPDU_STATS); - /** - * This check is to make sure peer is not deleted - * after processing the TLVs. - */ - if (!peer) - continue; - - ppdu_desc->user[i].is_bss_peer = peer->bss_peer; - /* - * different frame like DATA, BAR or CTRL has different - * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we - * receive other tlv in-order/sequential from fw. - * Since ACK_BA_STATUS TLV come from Hardware it is - * asynchronous So we need to depend on some tlv to confirm - * all tlv is received for a ppdu. - * So we depend on both SCHED_CMD_STATUS_TLV and - * ACK_BA_STATUS_TLV. for failure packet we won't get - * ACK_BA_STATUS_TLV. - */ - if (!(ppdu_info->tlv_bitmap & - (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) || - (!(ppdu_info->tlv_bitmap & - (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) && - (ppdu_desc->user[i].completion_status == - HTT_PPDU_STATS_USER_STATUS_OK))) { - dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); - continue; - } - - /** - * Update tx stats for data frames having Qos as well as - * non-Qos data tid - */ - - if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX || - (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) || - (ppdu_desc->htt_frame_type == - HTT_STATS_FTYPE_SGEN_QOS_NULL) || - ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) && - (ppdu_desc->num_mpdu > 1))) && - (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) { - - dp_tx_stats_update(pdev, peer, - &ppdu_desc->user[i], - ppdu_desc->ack_rssi); - dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]); - } - - dp_ppdu_desc_user_phy_tx_time_update(pdev, ppdu_desc, - &ppdu_desc->user[i]); - - dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); - tlv_bitmap_expected = tlv_bitmap_default; - } -} - -#ifndef WLAN_TX_PKT_CAPTURE_ENH - -/** - * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor - * to upper layer - * @pdev: DP pdev handle - * @ppdu_info: per PPDU TLV descriptor - * - * return: void - */ -static -void dp_ppdu_desc_deliver(struct dp_pdev *pdev, - struct ppdu_info *ppdu_info) -{ - struct ppdu_info *s_ppdu_info = NULL; - struct ppdu_info *ppdu_info_next = NULL; - struct cdp_tx_completion_ppdu *ppdu_desc = NULL; - qdf_nbuf_t nbuf; - uint32_t time_delta = 0; - bool starved = 0; - bool matched = 0; - bool recv_ack_ba_done = 0; - - if (ppdu_info->tlv_bitmap & - (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) && - ppdu_info->done) - recv_ack_ba_done = 1; - - pdev->last_sched_cmdid = ppdu_info->sched_cmdid; - - s_ppdu_info = TAILQ_FIRST(&pdev->sched_comp_ppdu_list); - - TAILQ_FOREACH_SAFE(s_ppdu_info, &pdev->sched_comp_ppdu_list, - ppdu_info_list_elem, ppdu_info_next) { - if (s_ppdu_info->tsf_l32 > ppdu_info->tsf_l32) - time_delta = (MAX_TSF_32 - s_ppdu_info->tsf_l32) + - ppdu_info->tsf_l32; - else - time_delta = ppdu_info->tsf_l32 - s_ppdu_info->tsf_l32; - - if (!s_ppdu_info->done && !recv_ack_ba_done) { - if (time_delta < MAX_SCHED_STARVE) { - dp_info("pdev[%d] ppdu_id[%d] sched_cmdid[%d] TLV_B[0x%x] TSF[%u] D[%d]", - pdev->pdev_id, - s_ppdu_info->ppdu_id, - s_ppdu_info->sched_cmdid, - s_ppdu_info->tlv_bitmap, - s_ppdu_info->tsf_l32, - s_ppdu_info->done); - break; - } - starved = 1; - } - - pdev->delivered_sched_cmdid = s_ppdu_info->sched_cmdid; - TAILQ_REMOVE(&pdev->sched_comp_ppdu_list, s_ppdu_info, - ppdu_info_list_elem); - pdev->sched_comp_list_depth--; - - nbuf = s_ppdu_info->nbuf; - qdf_assert_always(nbuf); - ppdu_desc = (struct cdp_tx_completion_ppdu *) - qdf_nbuf_data(nbuf); - ppdu_desc->tlv_bitmap = s_ppdu_info->tlv_bitmap; - - if (starved) { - dp_err("ppdu starved fc[0x%x] h_ftype[%d] tlv_bitmap[0x%x] cs[%d]\n", - ppdu_desc->frame_ctrl, - ppdu_desc->htt_frame_type, - ppdu_desc->tlv_bitmap, - ppdu_desc->user[0].completion_status); - starved = 0; - } - - if (ppdu_info->ppdu_id == s_ppdu_info->ppdu_id && - ppdu_info->sched_cmdid == s_ppdu_info->sched_cmdid) - matched = 1; - - dp_ppdu_desc_user_stats_update(pdev, s_ppdu_info); - - qdf_mem_free(s_ppdu_info); - - /** - * Deliver PPDU stats only for valid (acked) data - * frames if sniffer mode is not enabled. - * If sniffer mode is enabled, PPDU stats - * for all frames including mgmt/control - * frames should be delivered to upper layer - */ - if (pdev->tx_sniffer_enable || pdev->mcopy_mode) { - dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, - pdev->soc, - nbuf, HTT_INVALID_PEER, - WDI_NO_VAL, - pdev->pdev_id); - } else { - if (ppdu_desc->num_mpdu != 0 && - ppdu_desc->num_users != 0 && - ppdu_desc->frame_ctrl & - HTT_FRAMECTRL_DATATYPE) { - dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, - pdev->soc, - nbuf, HTT_INVALID_PEER, - WDI_NO_VAL, - pdev->pdev_id); - } else { - qdf_nbuf_free(nbuf); - } - } - - if (matched) - break; - } - return; -} - -#endif - -/** - * dp_get_ppdu_desc(): Function to allocate new PPDU status - * desc for new ppdu id - * @pdev: DP pdev handle - * @ppdu_id: PPDU unique identifier - * @tlv_type: TLV type received - * @tsf_l32: timestamp received along with ppdu stats indication header - * @max_users: Maximum user for that particular ppdu - * - * return: ppdu_info per ppdu tlv structure - */ -static -struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id, - uint8_t tlv_type, uint32_t tsf_l32, - uint8_t max_users) -{ - struct ppdu_info *ppdu_info = NULL; - struct ppdu_info *s_ppdu_info = NULL; - struct ppdu_info *ppdu_info_next = NULL; - struct cdp_tx_completion_ppdu *ppdu_desc = NULL; - uint32_t size = 0; - struct cdp_tx_completion_ppdu *tmp_ppdu_desc = NULL; - struct cdp_tx_completion_ppdu_user *tmp_user; - uint32_t time_delta; - - /* - * Find ppdu_id node exists or not - */ - TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list, - ppdu_info_list_elem, ppdu_info_next) { - if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) { - if (ppdu_info->tsf_l32 > tsf_l32) - time_delta = (MAX_TSF_32 - - ppdu_info->tsf_l32) + tsf_l32; - else - time_delta = tsf_l32 - ppdu_info->tsf_l32; - - if (time_delta > WRAP_DROP_TSF_DELTA) { - TAILQ_REMOVE(&pdev->ppdu_info_list, - ppdu_info, ppdu_info_list_elem); - pdev->list_depth--; - pdev->stats.ppdu_wrap_drop++; - tmp_ppdu_desc = - (struct cdp_tx_completion_ppdu *) - qdf_nbuf_data(ppdu_info->nbuf); - tmp_user = &tmp_ppdu_desc->user[0]; - dp_htt_tx_stats_info("S_PID [%d] S_TSF[%u] TLV_BITMAP[0x%x] [CMPLTN - %d ACK_BA - %d] CS[%d] - R_PID[%d] R_TSF[%u] R_TLV_TAG[0x%x]\n", - ppdu_info->ppdu_id, - ppdu_info->tsf_l32, - ppdu_info->tlv_bitmap, - tmp_user->completion_status, - ppdu_info->compltn_common_tlv, - ppdu_info->ack_ba_tlv, - ppdu_id, tsf_l32, tlv_type); - qdf_nbuf_free(ppdu_info->nbuf); - ppdu_info->nbuf = NULL; - qdf_mem_free(ppdu_info); - } else { - break; - } - } - } - - /* - * check if it is ack ba tlv and if it is not there in ppdu info - * list then check it in sched completion ppdu list - */ - if (!ppdu_info && - tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) { - TAILQ_FOREACH(s_ppdu_info, - &pdev->sched_comp_ppdu_list, - ppdu_info_list_elem) { - if (s_ppdu_info && (s_ppdu_info->ppdu_id == ppdu_id)) { - if (s_ppdu_info->tsf_l32 > tsf_l32) - time_delta = (MAX_TSF_32 - - s_ppdu_info->tsf_l32) + - tsf_l32; - else - time_delta = tsf_l32 - - s_ppdu_info->tsf_l32; - if (time_delta < WRAP_DROP_TSF_DELTA) { - ppdu_info = s_ppdu_info; - break; - } - } else { - /* - * ACK BA STATUS TLV comes sequential order - * if we received ack ba status tlv for second - * ppdu and first ppdu is still waiting for - * ACK BA STATUS TLV. Based on fw comment - * we won't receive it tlv later. So we can - * set ppdu info done. - */ - if (s_ppdu_info) - s_ppdu_info->done = 1; - } - } - } - - if (ppdu_info) { - if (ppdu_info->tlv_bitmap & (1 << tlv_type)) { - /** - * if we get tlv_type that is already been processed - * for ppdu, that means we got a new ppdu with same - * ppdu id. Hence Flush the older ppdu - * for MUMIMO and OFDMA, In a PPDU we have - * multiple user with same tlv types. tlv bitmap is - * used to check whether SU or MU_MIMO/OFDMA - */ - if (!(ppdu_info->tlv_bitmap & - (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) - return ppdu_info; - - ppdu_desc = (struct cdp_tx_completion_ppdu *) - qdf_nbuf_data(ppdu_info->nbuf); - - /** - * apart from ACK BA STATUS TLV rest all comes in order - * so if tlv type not ACK BA STATUS TLV we can deliver - * ppdu_info - */ - if ((tlv_type == - HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) && - (ppdu_desc->htt_frame_type == - HTT_STATS_FTYPE_SGEN_MU_BAR)) - return ppdu_info; - - dp_ppdu_desc_deliver(pdev, ppdu_info); - } else { - return ppdu_info; - } - } - - /** - * Flush the head ppdu descriptor if ppdu desc list reaches max - * threshold - */ - if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) { - ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list); - TAILQ_REMOVE(&pdev->ppdu_info_list, - ppdu_info, ppdu_info_list_elem); - pdev->list_depth--; - pdev->stats.ppdu_drop++; - qdf_nbuf_free(ppdu_info->nbuf); - ppdu_info->nbuf = NULL; - qdf_mem_free(ppdu_info); - } - - size = sizeof(struct cdp_tx_completion_ppdu) + - (max_users * sizeof(struct cdp_tx_completion_ppdu_user)); - - /* - * Allocate new ppdu_info node - */ - ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info)); - if (!ppdu_info) - return NULL; - - ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, size, - 0, 4, TRUE); - if (!ppdu_info->nbuf) { - qdf_mem_free(ppdu_info); - return NULL; - } - - ppdu_info->ppdu_desc = - (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); - qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), size); - - if (qdf_nbuf_put_tail(ppdu_info->nbuf, size) == NULL) { - QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, - "No tailroom for HTT PPDU"); - qdf_nbuf_free(ppdu_info->nbuf); - ppdu_info->nbuf = NULL; - ppdu_info->last_user = 0; - qdf_mem_free(ppdu_info); - return NULL; - } - - ppdu_info->ppdu_desc->max_users = max_users; - ppdu_info->tsf_l32 = tsf_l32; - /** - * No lock is needed because all PPDU TLVs are processed in - * same context and this list is updated in same context - */ - TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info, - ppdu_info_list_elem); - pdev->list_depth++; - return ppdu_info; -} - -/** - * dp_htt_process_tlv(): Function to process each PPDU TLVs - * @pdev: DP pdev handle - * @htt_t2h_msg: HTT target to host message - * - * return: ppdu_info per ppdu tlv structure - */ - -static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev, - qdf_nbuf_t htt_t2h_msg) -{ - uint32_t length; - uint32_t ppdu_id; - uint8_t tlv_type; - uint32_t tlv_length, tlv_bitmap_expected; - uint8_t *tlv_buf; - struct ppdu_info *ppdu_info = NULL; - struct cdp_tx_completion_ppdu *ppdu_desc = NULL; - uint8_t max_users = CDP_MU_MAX_USERS; - uint32_t tsf_l32; - - uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg); - - length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word); - - msg_word = msg_word + 1; - ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word); - - msg_word = msg_word + 1; - tsf_l32 = (uint32_t)(*msg_word); - - msg_word = msg_word + 2; - while (length > 0) { - tlv_buf = (uint8_t *)msg_word; - tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word); - tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word); - if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG)) - pdev->stats.ppdu_stats_counter[tlv_type]++; - - if (tlv_length == 0) - break; - - tlv_length += HTT_TLV_HDR_LEN; - - /** - * Not allocating separate ppdu descriptor for MGMT Payload - * TLV as this is sent as separate WDI indication and it - * doesn't contain any ppdu information - */ - if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) { - pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf; - pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id; - pdev->mgmtctrl_frm_info.mgmt_buf_len = - HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET - (*(msg_word + 1)); - msg_word = - (uint32_t *)((uint8_t *)tlv_buf + tlv_length); - length -= (tlv_length); - continue; - } - - /* - * retrieve max_users if it's USERS_INFO, - * else, it's 1 for COMPLTN_FLUSH, - * else, use CDP_MU_MAX_USERS - */ - if (tlv_type == HTT_PPDU_STATS_USERS_INFO_TLV) { - max_users = - HTT_PPDU_STATS_USERS_INFO_TLV_MAX_USERS_GET(*(msg_word + 1)); - } else if (tlv_type == HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) { - max_users = 1; - } - - ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type, - tsf_l32, max_users); - if (!ppdu_info) - return NULL; - - ppdu_info->ppdu_id = ppdu_id; - ppdu_info->tlv_bitmap |= (1 << tlv_type); - - dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info); - - /** - * Increment pdev level tlv count to monitor - * missing TLVs - */ - pdev->tlv_count++; - ppdu_info->last_tlv_cnt = pdev->tlv_count; - msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length); - length -= (tlv_length); - } - - if (!ppdu_info) - return NULL; - - pdev->last_ppdu_id = ppdu_id; - - tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP; - - if (pdev->tx_sniffer_enable || pdev->mcopy_mode || - pdev->tx_capture_enabled) { - if (ppdu_info->is_ampdu) - tlv_bitmap_expected = - dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap( - ppdu_info->tlv_bitmap); - } - - ppdu_desc = ppdu_info->ppdu_desc; - - if (!ppdu_desc) - return NULL; - - if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status != - HTT_PPDU_STATS_USER_STATUS_OK) { - tlv_bitmap_expected = tlv_bitmap_expected & 0xFF; - } - - /* - * for frame type DATA and BAR, we update stats based on MSDU, - * successful msdu and mpdu are populate from ACK BA STATUS TLV - * which comes out of order. successful mpdu also populated from - * COMPLTN COMMON TLV which comes in order. for every ppdu_info - * we store successful mpdu from both tlv and compare before delivering - * to make sure we received ACK BA STATUS TLV. For some self generated - * frame we won't get ack ba status tlv so no need to wait for - * ack ba status tlv. - */ - if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL && - ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) { - /* - * most of the time bar frame will have duplicate ack ba - * status tlv - */ - if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR && - (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)) - return NULL; - /* - * For data frame, compltn common tlv should match ack ba status - * tlv and completion status. Reason we are checking first user - * for ofdma, completion seen at next MU BAR frm, for mimo - * only for first user completion will be immediate. - */ - if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA && - (ppdu_desc->user[0].completion_status == 0 && - (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv))) - return NULL; - } - - /** - * Once all the TLVs for a given PPDU has been processed, - * return PPDU status to be delivered to higher layer. - * tlv_bitmap_expected can't be available for different frame type. - * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu. - * apart from ACK BA TLV, FW sends other TLV in sequential order. - * flush tlv comes separate. - */ - if ((ppdu_info->tlv_bitmap != 0 && - (ppdu_info->tlv_bitmap & - (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) || - (ppdu_info->tlv_bitmap & - (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) { - ppdu_info->done = 1; - return ppdu_info; - } - - return NULL; -} -#endif /* FEATURE_PERPKT_INFO */ - -/** - * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW - * @soc: DP SOC handle - * @pdev_id: pdev id - * @htt_t2h_msg: HTT message nbuf - * - * return:void - */ -#if defined(WDI_EVENT_ENABLE) -#ifdef FEATURE_PERPKT_INFO -static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, - uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) -{ - struct dp_pdev *pdev = soc->pdev_list[pdev_id]; - struct ppdu_info *ppdu_info = NULL; - bool free_buf = true; - - if (pdev_id >= MAX_PDEV_CNT) - return true; - - pdev = soc->pdev_list[pdev_id]; - if (!pdev) - return true; - - if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && - !pdev->mcopy_mode && !pdev->bpr_enable) - return free_buf; - - qdf_spin_lock_bh(&pdev->ppdu_stats_lock); - ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg); - - if (pdev->mgmtctrl_frm_info.mgmt_buf) { - if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv - (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) != - QDF_STATUS_SUCCESS) - free_buf = false; - } - - if (ppdu_info) - dp_ppdu_desc_deliver(pdev, ppdu_info); - - pdev->mgmtctrl_frm_info.mgmt_buf = NULL; - pdev->mgmtctrl_frm_info.mgmt_buf_len = 0; - pdev->mgmtctrl_frm_info.ppdu_id = 0; - - qdf_spin_unlock_bh(&pdev->ppdu_stats_lock); - - return free_buf; -} -#else -static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, - uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) -{ - return true; -} -#endif -#endif - /** * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats * @soc: DP SOC handle @@ -4264,13 +1935,13 @@ static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, * return:void */ static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc, - qdf_nbuf_t htt_t2h_msg) + qdf_nbuf_t htt_t2h_msg) { uint8_t done; qdf_nbuf_t msg_copy; uint32_t *msg_word; - msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg); + msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg); msg_word = msg_word + 3; done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word); @@ -4290,7 +1961,7 @@ static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc, if (!msg_copy) { QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO, - "T2H messge clone failed for HTT EXT STATS"); + "T2H messge clone failed for HTT EXT STATS"); goto error; } @@ -4383,46 +2054,6 @@ struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle) return htt_soc; } -#if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG) -/* - * dp_ppdu_stats_ind_handler() - PPDU stats msg handler - * @htt_soc: HTT SOC handle - * @msg_word: Pointer to payload - * @htt_t2h_msg: HTT msg nbuf - * - * Return: True if buffer should be freed by caller. - */ -static bool -dp_ppdu_stats_ind_handler(struct htt_soc *soc, - uint32_t *msg_word, - qdf_nbuf_t htt_t2h_msg) -{ - u_int8_t pdev_id; - u_int8_t target_pdev_id; - bool free_buf; - - target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word); - pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc, - target_pdev_id); - dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc, - htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL, - pdev_id); - - free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id, - htt_t2h_msg); - - return free_buf; -} -#else -static bool -dp_ppdu_stats_ind_handler(struct htt_soc *soc, - uint32_t *msg_word, - qdf_nbuf_t htt_t2h_msg) -{ - return true; -} -#endif - #if defined(WDI_EVENT_ENABLE) && \ !defined(REMOVE_PKT_LOG) /* @@ -4953,8 +2584,8 @@ static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt) case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: { - free_buf = dp_ppdu_stats_ind_handler(soc, msg_word, - htt_t2h_msg); + free_buf = monitor_ppdu_stats_ind_handler(soc, msg_word, + htt_t2h_msg); break; } diff --git a/dp/wifi3.0/dp_internal.h b/dp/wifi3.0/dp_internal.h index d3606db24b..7f1a9bd981 100644 --- a/dp/wifi3.0/dp_internal.h +++ b/dp/wifi3.0/dp_internal.h @@ -212,11 +212,194 @@ static inline QDF_STATUS monitor_config_debug_sniffer(struct dp_pdev *pdev, { return QDF_STATUS_E_FAILURE; } + +static inline void monitor_flush_rings(struct dp_soc *soc) +{ +} + +static inline QDF_STATUS monitor_htt_srng_setup(struct dp_soc *soc, + struct dp_pdev *pdev, + int mac_id, + int mac_for_pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void monitor_service_mon_rings(struct dp_soc *soc, uint32_t quota) +{ +} + +static inline +uint32_t monitor_process(struct dp_soc *soc, struct dp_intr *int_ctx, + uint32_t mac_id, uint32_t quota) +{ + return 0; +} + +static inline +uint32_t monitor_drop_packets_for_mac(struct dp_pdev *pdev, + uint32_t mac_id, uint32_t quota) +{ + return 0; +} + +static inline void monitor_peer_tx_init(struct dp_pdev *pdev, + struct dp_peer *peer) +{ +} + +static inline void monitor_peer_tx_cleanup(struct dp_vdev *vdev, + struct dp_peer *peer) +{ +} + +static inline +void monitor_peer_tid_peer_id_update(struct dp_soc *soc, + struct dp_peer *peer, + uint16_t peer_id) +{ +} + +static inline void monitor_tx_ppdu_stats_attach(struct dp_pdev *pdev) +{ +} + +static inline void monitor_tx_ppdu_stats_detach(struct dp_pdev *pdev) +{ +} + +static inline QDF_STATUS monitor_tx_capture_debugfs_init(struct dp_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void monitor_peer_tx_capture_filter_check(struct dp_pdev *pdev, + struct dp_peer *peer) +{ +} + +static inline +QDF_STATUS monitor_tx_add_to_comp_queue(struct dp_soc *soc, + struct dp_tx_desc_s *desc, + struct hal_tx_completion_status *ts, + struct dp_peer *peer) +{ + return QDF_STATUS_E_FAILURE; +} + +static inline bool monitor_ppdu_stats_ind_handler(struct htt_soc *soc, + uint32_t *msg_word, + qdf_nbuf_t htt_t2h_msg) +{ + return true; +} + +static inline QDF_STATUS monitor_htt_ppdu_stats_attach(struct dp_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static inline void monitor_htt_ppdu_stats_detach(struct dp_pdev *pdev) +{ +} + +static inline void monitor_print_pdev_rx_mon_stats(struct dp_pdev *pdev) +{ +} + +static inline QDF_STATUS monitor_config_enh_tx_capture(struct dp_pdev *pdev, + uint32_t val) +{ + return QDF_STATUS_E_INVAL; +} + +static inline QDF_STATUS monitor_config_enh_rx_capture(struct dp_pdev *pdev, + uint32_t val) +{ + return QDF_STATUS_E_INVAL; +} + +static inline QDF_STATUS monitor_set_bpr_enable(struct dp_pdev *pdev, + uint32_t val) +{ + return QDF_STATUS_E_FAILURE; +} + +static inline int monitor_set_filter_neigh_peers(struct dp_pdev *pdev, bool val) +{ + return 0; +} + +static inline +void monitor_set_atf_stats_enable(struct dp_pdev *pdev, bool value) +{ +} + +static inline +void monitor_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor) +{ +} + +static inline +bool monitor_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle) +{ + return false; +} + +static inline +bool monitor_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle) +{ + return false; +} + +static inline +bool monitor_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle) +{ + return false; +} + +static inline +int monitor_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, bool enable) +{ + return 0; +} + +static inline void monitor_pktlogmod_exit(struct dp_pdev *pdev) +{ +} + +static inline +void monitor_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev) +{ +} + +static inline +void monitor_neighbour_peers_detach(struct dp_pdev *pdev) +{ +} + +static inline QDF_STATUS monitor_filter_neighbour_peer(struct dp_pdev *pdev, + uint8_t *rx_pkt_hdr) +{ + return QDF_STATUS_E_FAILURE; +} + +static inline void monitor_print_pdev_tx_capture_stats(struct dp_pdev *pdev) +{ +} #endif -#ifdef WLAN_TX_PKT_CAPTURE_ENH -extern uint8_t -dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX]; +#ifndef WIFI_MONITOR_SUPPORT +static inline QDF_STATUS monitor_drop_inv_peer_pkts(struct dp_vdev *vdev, + struct ieee80211_frame *wh) +{ + return QDF_STATUS_E_FAILURE; +} + +static inline bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev) +{ + return false; +} #endif #define DP_MAX_TIMER_EXEC_TIME_TICKS \ @@ -1134,7 +1317,6 @@ void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer); void dp_peer_ppdu_delayed_ba_cleanup(struct dp_peer *peer); extern void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer); -void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer); void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer); void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer); extern struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc, @@ -1514,15 +1696,6 @@ dp_print_pdev_tx_stats(struct dp_pdev *pdev); void dp_print_pdev_rx_stats(struct dp_pdev *pdev); -/** - * dp_print_pdev_rx_mon_stats(): Print Pdev level RX monitor stats - * @pdev: DP_PDEV Handle - * - * Return: void - */ -void -dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev); - /** * dp_print_soc_tx_stats(): Print SOC level stats * @soc DP_SOC Handle @@ -1772,8 +1945,6 @@ void dp_wdi_event_handler(enum WDI_EVENT event, struct dp_soc *soc, int dp_wdi_event_attach(struct dp_pdev *txrx_pdev); int dp_wdi_event_detach(struct dp_pdev *txrx_pdev); -int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, - bool enable); /** * dp_get_pldev() - function to get pktlog device handle @@ -1783,7 +1954,6 @@ int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, * Return: pktlog device handle or NULL */ void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); -void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn); static inline void dp_hif_update_pipe_callback(struct dp_soc *dp_soc, @@ -1804,8 +1974,6 @@ dp_hif_update_pipe_callback(struct dp_soc *dp_soc, DP_HTT_T2H_HP_PIPE, &hif_pipe_callbacks); } -QDF_STATUS dp_peer_stats_notify(struct dp_pdev *pdev, struct dp_peer *peer); - QDF_STATUS dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev, struct cdp_rx_stats_ppdu_user *ppdu_user); #else @@ -1841,22 +2009,12 @@ static inline int dp_wdi_event_detach(struct dp_pdev *txrx_pdev) return 0; } -static inline int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, - bool enable) -{ - return 0; -} static inline QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev, uint32_t stats_type_upload_mask, uint8_t mac_id) { return 0; } -static inline void -dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn) -{ -} - static inline void dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context, QDF_STATUS (*callback)(void *, qdf_nbuf_t, uint8_t), @@ -1864,11 +2022,6 @@ dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context, { } -static inline QDF_STATUS dp_peer_stats_notify(struct dp_pdev *pdev, - struct dp_peer *peer) -{ - return QDF_STATUS_SUCCESS; -} static inline QDF_STATUS dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev, @@ -2074,115 +2227,6 @@ void dp_pdev_print_tid_stats(struct dp_pdev *pdev); void dp_soc_set_txrx_ring_map(struct dp_soc *soc); -#ifndef WLAN_TX_PKT_CAPTURE_ENH -/** - * dp_tx_ppdu_stats_attach - Initialize Tx PPDU stats and enhanced capture - * @pdev: DP PDEV - * - * Return: none - */ -static inline void dp_tx_ppdu_stats_attach(struct dp_pdev *pdev) -{ -} - -/** - * dp_tx_ppdu_stats_detach - Cleanup Tx PPDU stats and enhanced capture - * @pdev: DP PDEV - * - * Return: none - */ -static inline void dp_tx_ppdu_stats_detach(struct dp_pdev *pdev) -{ -} - -/** - * dp_tx_ppdu_stats_process - Deferred PPDU stats handler - * @context: Opaque work context (PDEV) - * - * Return: none - */ -static inline void dp_tx_ppdu_stats_process(void *context) -{ -} - -/** - * dp_tx_add_to_comp_queue() - add completion msdu to queue - * @soc: DP Soc handle - * @tx_desc: software Tx descriptor - * @ts : Tx completion status from HAL/HTT descriptor - * @peer: DP peer - * - * Return: none - */ -static inline -QDF_STATUS dp_tx_add_to_comp_queue(struct dp_soc *soc, - struct dp_tx_desc_s *desc, - struct hal_tx_completion_status *ts, - struct dp_peer *peer) -{ - return QDF_STATUS_E_FAILURE; -} - -/* - * dp_tx_capture_htt_frame_counter: increment counter for htt_frame_type - * pdev: DP pdev handle - * htt_frame_type: htt frame type received from fw - * - * return: void - */ -static inline -void dp_tx_capture_htt_frame_counter(struct dp_pdev *pdev, - uint32_t htt_frame_type) -{ -} - -/* - * dp_tx_cature_stats: print tx capture stats - * @pdev: DP PDEV handle - * - * return: void - */ -static inline -void dp_print_pdev_tx_capture_stats(struct dp_pdev *pdev) -{ -} - -/* - * dp_peer_tx_capture_filter_check: check filter is enable for the filter - * and update tx_cap_enabled flag - * @pdev: DP PDEV handle - * @peer: DP PEER handle - * - * return: void - */ -static inline -void dp_peer_tx_capture_filter_check(struct dp_pdev *pdev, - struct dp_peer *peer) -{ -} - -/* - * dp_tx_capture_debugfs_init: tx capture debugfs init - * @pdev: DP PDEV handle - * - * return: QDF_STATUS - */ -static inline -QDF_STATUS dp_tx_capture_debugfs_init(struct dp_pdev *pdev) -{ - return QDF_STATUS_E_FAILURE; -} -#endif - -#ifdef FEATURE_PERPKT_INFO -void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf); -#else -static inline -void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf) -{ -} -#endif - /** * dp_vdev_to_cdp_vdev() - typecast dp vdev to cdp vdev * @vdev: DP vdev handle diff --git a/dp/wifi3.0/dp_main.c b/dp/wifi3.0/dp_main.c index 4fea0e94f7..39162b31bb 100644 --- a/dp/wifi3.0/dp_main.c +++ b/dp/wifi3.0/dp_main.c @@ -163,40 +163,6 @@ QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs, WLAN_CFG_INT_NUM_CONTEXTS_MAX >= WLAN_CFG_INT_NUM_CONTEXTS); -#ifdef WLAN_RX_PKT_CAPTURE_ENH -#include "dp_rx_mon_feature.h" -#else -/* - * dp_config_enh_rx_capture()- API to enable/disable enhanced rx capture - * @pdev_handle: DP_PDEV handle - * @val: user provided value - * - * Return: QDF_STATUS - */ -static QDF_STATUS -dp_config_enh_rx_capture(struct dp_pdev *pdev_handle, uint8_t val) -{ - return QDF_STATUS_E_INVAL; -} -#endif /* WLAN_RX_PKT_CAPTURE_ENH */ - -#ifdef WLAN_TX_PKT_CAPTURE_ENH -#include "dp_tx_capture.h" -#else -/* - * dp_config_enh_tx_capture()- API to enable/disable enhanced tx capture - * @pdev_handle: DP_PDEV handle - * @val: user provided value - * - * Return: QDF_STATUS - */ -static QDF_STATUS -dp_config_enh_tx_capture(struct dp_pdev *pdev_handle, uint8_t val) -{ - return QDF_STATUS_E_INVAL; -} -#endif - static void dp_pdev_srng_deinit(struct dp_pdev *pdev); static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev); static void dp_pdev_srng_free(struct dp_pdev *pdev); @@ -242,7 +208,6 @@ dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, struct ol_if_ops *ol_ops, uint16_t device_id); -void dp_pktlogmod_exit(struct dp_pdev *handle); static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, uint8_t *peer_mac_addr); @@ -256,10 +221,6 @@ bool is_dp_verbose_debug_enabled; #endif #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) -static void dp_cfr_filter(struct cdp_soc_t *soc_hdl, - uint8_t pdev_id, - bool enable, - struct cdp_monitor_filter *filter_val); static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable); @@ -268,24 +229,17 @@ dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, struct cdp_cfr_rcc_stats *cfr_rcc_stats); static inline void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id); -static inline void -dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, - bool enable); #endif + static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index); static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index); static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index); static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index); -static inline bool -dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev); static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num); -QDF_STATUS dp_vdev_set_monitor_mode_rings(struct dp_pdev *pdev, - uint8_t delayed_replenish); -static void dp_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev); #define DP_INTR_POLL_TIMER_MS 5 @@ -464,30 +418,6 @@ uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx return 0; } -/* - * dp_service_mon_rings()- service monitor rings - * @soc: soc dp handle - * @quota: number of ring entry that can be serviced - * - * Return: None - * - */ -static void dp_service_mon_rings(struct dp_soc *soc, uint32_t quota) -{ - int ring = 0, work_done; - struct dp_pdev *pdev = NULL; - - for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) { - pdev = dp_get_pdev_for_lmac_id(soc, ring); - if (!pdev) - continue; - work_done = dp_mon_process(soc, NULL, ring, quota); - - dp_rx_mon_dest_debug("Reaped %d descs from Monitor rings", - work_done); - } -} - /* * dp_mon_reap_timer_handler()- timer to reap monitor rings * reqd as we are not getting ppdu end interrupts @@ -500,96 +430,11 @@ static void dp_mon_reap_timer_handler(void *arg) { struct dp_soc *soc = (struct dp_soc *)arg; - dp_service_mon_rings(soc, QCA_NAPI_BUDGET); + monitor_service_mon_rings(soc, QCA_NAPI_BUDGET); qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS); } -#ifndef REMOVE_PKT_LOG -/** - * dp_pkt_log_init() - API to initialize packet log - * @soc_hdl: Datapath soc handle - * @pdev_id: id of data path pdev handle - * @scn: HIF context - * - * Return: none - */ -void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn) -{ - struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); - struct dp_pdev *handle = - dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); - - if (!handle) { - dp_err("pdev handle is NULL"); - return; - } - - if (handle->pkt_log_init) { - dp_init_err("%pK: Packet log not initialized", soc); - return; - } - - pktlog_sethandle(&handle->pl_dev, scn); - pktlog_set_pdev_id(handle->pl_dev, pdev_id); - pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION); - - if (pktlogmod_init(scn)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - "%s: pktlogmod_init failed", __func__); - handle->pkt_log_init = false; - } else { - handle->pkt_log_init = true; - } -} - -/** - * dp_pkt_log_con_service() - connect packet log service - * @soc_hdl: Datapath soc handle - * @pdev_id: id of data path pdev handle - * @scn: device context - * - * Return: none - */ -static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl, - uint8_t pdev_id, void *scn) -{ - dp_pkt_log_init(soc_hdl, pdev_id, scn); - pktlog_htc_attach(); -} - -/** - * dp_pktlogmod_exit() - API to cleanup pktlog info - * @pdev: Pdev handle - * - * Return: none - */ -void dp_pktlogmod_exit(struct dp_pdev *pdev) -{ - struct dp_soc *soc = pdev->soc; - struct hif_opaque_softc *scn = soc->hif_handle; - - if (!scn) { - dp_err("Invalid hif(scn) handle"); - return; - } - - /* stop mon_reap_timer if it has been started */ - if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED && - soc->reap_timer_init && (!dp_is_enable_reap_timer_non_pkt(pdev))) - qdf_timer_sync_cancel(&soc->mon_reap_timer); - - pktlogmod_exit(scn); - pdev->pkt_log_init = false; -} -#else -static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl, - uint8_t pdev_id, void *scn) -{ -} - -void dp_pktlogmod_exit(struct dp_pdev *handle) { } -#endif /** * dp_get_num_rx_contexts() - get number of RX contexts * @soc_hdl: cdp opaque soc handle @@ -611,7 +456,6 @@ static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl) } #else -void dp_pktlogmod_exit(struct dp_pdev *handle) { } /** * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode @@ -652,8 +496,8 @@ static void dp_service_lmac_rings(void *arg) rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev]; - dp_mon_process(soc, NULL, mac_for_pdev, - QCA_NAPI_BUDGET); + monitor_process(soc, NULL, mac_for_pdev, + QCA_NAPI_BUDGET); for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) @@ -2408,8 +2252,8 @@ static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget) if (!pdev) continue; if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) { - work_done = dp_mon_process(soc, int_ctx, mac_for_pdev, - remaining_quota); + work_done = monitor_process(soc, int_ctx, mac_for_pdev, + remaining_quota); if (work_done) intr_stats->num_rx_mon_ring_masks++; budget -= work_done; @@ -2695,14 +2539,14 @@ static void dp_mon_vdev_timer(void *arg) while (yield == DP_TIMER_NO_YIELD) { for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) { if (lmac_iter == lmac_id) - work_done = dp_mon_process( + work_done = monitor_process( soc, NULL, lmac_iter, remaining_quota); else work_done = - dp_mon_drop_packets_for_mac(pdev, - lmac_iter, - remaining_quota); + monitor_drop_packets_for_mac(pdev, + lmac_iter, + remaining_quota); if (work_done) { budget -= work_done; if (budget <= 0) { @@ -2781,13 +2625,15 @@ static void dp_interrupt_timer(void *arg) while (yield == DP_TIMER_NO_YIELD) { for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) { if (lmac_iter == lmac_id) - work_done = dp_mon_process(soc, - &soc->intr_ctx[dp_intr_id], - lmac_iter, remaining_quota); + work_done = + monitor_process(soc, + &soc->intr_ctx[dp_intr_id], + lmac_iter, remaining_quota); else - work_done = dp_mon_drop_packets_for_mac(pdev, - lmac_iter, - remaining_quota); + work_done = + monitor_drop_packets_for_mac(pdev, + lmac_iter, + remaining_quota); if (work_done) { budget -= work_done; if (budget <= 0) { @@ -4794,25 +4640,6 @@ void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl) } #endif -/* - * dp_htt_ppdu_stats_attach() - attach resources for HTT PPDU stats processing - * @pdev: Datapath PDEV handle - * - * Return: QDF_STATUS_SUCCESS: Success - * QDF_STATUS_E_NOMEM: Error - */ -QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev) -{ - pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE); - - if (!pdev->ppdu_tlv_buf) { - QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail"); - return QDF_STATUS_E_NOMEM; - } - - return QDF_STATUS_SUCCESS; -} - #ifdef DP_TX_HW_DESC_HISTORY /** * dp_soc_tx_hw_desc_history_attach - Attach TX HW descriptor history @@ -5121,67 +4948,6 @@ static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev) } #endif -/* - * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients) - * @pdev: device object - * - * Return: void - */ -void dp_neighbour_peers_detach(struct dp_pdev *pdev) -{ - struct dp_neighbour_peer *peer = NULL; - struct dp_neighbour_peer *temp_peer = NULL; - - TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list, - neighbour_peer_list_elem, temp_peer) { - /* delete this peer from the list */ - TAILQ_REMOVE(&pdev->neighbour_peers_list, - peer, neighbour_peer_list_elem); - qdf_mem_free(peer); - } - - qdf_spinlock_destroy(&pdev->neighbour_peer_mutex); -} - -/** -* dp_htt_ppdu_stats_detach() - detach stats resources -* @pdev: Datapath PDEV handle -* -* Return: void -*/ -void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev) -{ - struct ppdu_info *ppdu_info, *ppdu_info_next; - - TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list, - ppdu_info_list_elem, ppdu_info_next) { - if (!ppdu_info) - break; - TAILQ_REMOVE(&pdev->ppdu_info_list, - ppdu_info, ppdu_info_list_elem); - pdev->list_depth--; - qdf_assert_always(ppdu_info->nbuf); - qdf_nbuf_free(ppdu_info->nbuf); - qdf_mem_free(ppdu_info); - } - - TAILQ_FOREACH_SAFE(ppdu_info, &pdev->sched_comp_ppdu_list, - ppdu_info_list_elem, ppdu_info_next) { - if (!ppdu_info) - break; - TAILQ_REMOVE(&pdev->sched_comp_ppdu_list, - ppdu_info, ppdu_info_list_elem); - pdev->sched_comp_list_depth--; - qdf_assert_always(ppdu_info->nbuf); - qdf_nbuf_free(ppdu_info->nbuf); - qdf_mem_free(ppdu_info); - } - - if (pdev->ppdu_tlv_buf) - qdf_mem_free(pdev->ppdu_tlv_buf); - -} - #ifdef WLAN_DP_PENDING_MEM_FLUSH /** * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev @@ -5313,7 +5079,7 @@ static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev) { struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev; - dp_tx_capture_debugfs_init(pdev); + monitor_tx_capture_debugfs_init(pdev); if (dp_pdev_htt_stats_dbgfs_init(pdev)) { dp_init_err("%pK: Failed to initialize pdev HTT stats debugfs", pdev->soc); @@ -5653,88 +5419,6 @@ static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc) dp_soc_detach(txrx_soc); } -#if !defined(DISABLE_MON_CONFIG) -/** - * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings - * @soc: soc handle - * @pdev: physical device handle - * @mac_id: ring number - * @mac_for_pdev: mac_id - * - * Return: non-zero for failure, zero for success - */ -static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc, - struct dp_pdev *pdev, - int mac_id, - int mac_for_pdev) -{ - QDF_STATUS status = QDF_STATUS_SUCCESS; - - if (soc->wlan_cfg_ctx->rxdma1_enable) { - status = htt_srng_setup(soc->htt_handle, mac_for_pdev, - soc->rxdma_mon_buf_ring[mac_id] - .hal_srng, - RXDMA_MONITOR_BUF); - - if (status != QDF_STATUS_SUCCESS) { - dp_err("Failed to send htt srng setup message for Rxdma mon buf ring"); - return status; - } - - status = htt_srng_setup(soc->htt_handle, mac_for_pdev, - soc->rxdma_mon_dst_ring[mac_id] - .hal_srng, - RXDMA_MONITOR_DST); - - if (status != QDF_STATUS_SUCCESS) { - dp_err("Failed to send htt srng setup message for Rxdma mon dst ring"); - return status; - } - - status = htt_srng_setup(soc->htt_handle, mac_for_pdev, - soc->rxdma_mon_status_ring[mac_id] - .hal_srng, - RXDMA_MONITOR_STATUS); - - if (status != QDF_STATUS_SUCCESS) { - dp_err("Failed to send htt srng setup message for Rxdma mon status ring"); - return status; - } - - status = htt_srng_setup(soc->htt_handle, mac_for_pdev, - soc->rxdma_mon_desc_ring[mac_id] - .hal_srng, - RXDMA_MONITOR_DESC); - - if (status != QDF_STATUS_SUCCESS) { - dp_err("Failed to send htt srng message for Rxdma mon desc ring"); - return status; - } - } else { - status = htt_srng_setup(soc->htt_handle, mac_for_pdev, - soc->rxdma_mon_status_ring[mac_id] - .hal_srng, - RXDMA_MONITOR_STATUS); - - if (status != QDF_STATUS_SUCCESS) { - dp_err("Failed to send htt srng setup message for Rxdma mon status ring"); - return status; - } - } - - return status; - -} -#else -static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc, - struct dp_pdev *pdev, - int mac_id, - int mac_for_pdev) -{ - return QDF_STATUS_SUCCESS; -} -#endif - /* * dp_rxdma_ring_config() - configure the RX DMA rings * @@ -5823,9 +5507,9 @@ static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc) RXDMA_DST); /* Configure monitor mode rings */ - status = dp_mon_htt_srng_setup(soc, pdev, - lmac_id, - mac_for_pdev); + status = monitor_htt_srng_setup(soc, pdev, + lmac_id, + mac_for_pdev); if (status != QDF_STATUS_SUCCESS) { dp_err("Failed to send htt monitor messages to target"); return status; @@ -6267,7 +5951,7 @@ static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc, dp_vdev_id_map_tbl_add(soc, vdev, vdev_id); if (wlan_op_mode_monitor == vdev->opmode) { - dp_vdev_set_monitor_mode_buf_rings(pdev); + monitor_vdev_set_monitor_mode_buf_rings(pdev); pdev->monitor_vdev = vdev; return QDF_STATUS_SUCCESS; } @@ -6717,7 +6401,7 @@ dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, * when unassociated peer get associated peer need to * update tx_cap_enabled flag to support peer filter. */ - dp_peer_tx_capture_filter_check(pdev, peer); + monitor_peer_tx_capture_filter_check(pdev, peer); dp_set_peer_isolation(peer, false); @@ -6861,7 +6545,7 @@ dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, * when unassociated peer get associated peer need to * update tx_cap_enabled flag to support peer filter. */ - dp_peer_tx_capture_filter_check(pdev, peer); + monitor_peer_tx_capture_filter_check(pdev, peer); dp_set_peer_isolation(peer, false); @@ -7038,7 +6722,7 @@ dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, * which is REO2TCL ring. for this reason we should * not setup reo_queues and default route for bss_peer. */ - dp_peer_tx_init(pdev, peer); + monitor_peer_tx_init(pdev, peer); if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) { status = QDF_STATUS_E_FAILURE; goto fail; @@ -7201,139 +6885,6 @@ dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id) return cdp_host_reo_dest_ring_unknown; } -#ifdef ATH_SUPPORT_NAC -/* - * dp_set_filter_neigh_peers() - set filter neighbour peers for smart mesh - * @pdev_handle: device object - * @val: value to be set - * - * Return: void - */ -static int dp_set_filter_neigh_peers(struct dp_pdev *pdev, - bool val) -{ - /* Enable/Disable smart mesh filtering. This flag will be checked - * during rx processing to check if packets are from NAC clients. - */ - pdev->filter_neighbour_peers = val; - return 0; -} -#else -static int dp_set_filter_neigh_peers(struct dp_pdev *pdev, - bool val) -{ - return 0; -} -#endif /* ATH_SUPPORT_NAC */ - -#if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) -/* - * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients) - * address for smart mesh filtering - * @txrx_soc: cdp soc handle - * @vdev_id: id of virtual device object - * @cmd: Add/Del command - * @macaddr: nac client mac address - * - * Return: success/failure - */ -static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc_hdl, - uint8_t vdev_id, - uint32_t cmd, uint8_t *macaddr) -{ - struct dp_soc *soc = (struct dp_soc *)soc_hdl; - struct dp_pdev *pdev; - struct dp_neighbour_peer *peer = NULL; - struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, - DP_MOD_ID_CDP); - - if (!vdev || !macaddr) - goto fail0; - - pdev = vdev->pdev; - - if (!pdev) - goto fail0; - - /* Store address of NAC (neighbour peer) which will be checked - * against TA of received packets. - */ - if (cmd == DP_NAC_PARAM_ADD) { - peer = (struct dp_neighbour_peer *) qdf_mem_malloc( - sizeof(*peer)); - - if (!peer) { - dp_cdp_err("%pK: DP neighbour peer node memory allocation failed" - , soc); - goto fail0; - } - - qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0], - macaddr, QDF_MAC_ADDR_SIZE); - peer->vdev = vdev; - - qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); - - /* add this neighbour peer into the list */ - TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer, - neighbour_peer_list_elem); - qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); - - /* first neighbour */ - if (!pdev->neighbour_peers_added) { - QDF_STATUS status = QDF_STATUS_SUCCESS; - - pdev->neighbour_peers_added = true; - - dp_mon_filter_setup_smart_monitor(pdev); - status = dp_mon_filter_update(pdev); - if (status != QDF_STATUS_SUCCESS) { - dp_cdp_err("%pK: smart mon filter setup failed", - soc); - dp_mon_filter_reset_smart_monitor(pdev); - pdev->neighbour_peers_added = false; - } - } - - } else if (cmd == DP_NAC_PARAM_DEL) { - qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); - TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, - neighbour_peer_list_elem) { - if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], - macaddr, QDF_MAC_ADDR_SIZE)) { - /* delete this peer from the list */ - TAILQ_REMOVE(&pdev->neighbour_peers_list, - peer, neighbour_peer_list_elem); - qdf_mem_free(peer); - break; - } - } - /* last neighbour deleted */ - if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) { - QDF_STATUS status = QDF_STATUS_SUCCESS; - - dp_mon_filter_reset_smart_monitor(pdev); - status = dp_mon_filter_update(pdev); - if (status != QDF_STATUS_SUCCESS) { - dp_cdp_err("%pK: smart mon filter clear failed", - soc); - } - pdev->neighbour_peers_added = false; - - } - - qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); - } - dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); - return 1; - -fail0: - if (vdev) - dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); - return 0; -} -#endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ - #ifdef WLAN_SUPPORT_SCS /* * dp_enable_scs_params - Enable/Disable SCS procedures @@ -7600,45 +7151,6 @@ dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, return status; } -static void dp_flush_monitor_rings(struct dp_soc *soc) -{ - struct dp_pdev *pdev = soc->pdev_list[0]; - hal_soc_handle_t hal_soc = soc->hal_soc; - uint32_t lmac_id; - uint32_t hp, tp; - uint8_t dp_intr_id; - int budget; - void *mon_dst_srng; - - /* Reset monitor filters before reaping the ring*/ - qdf_spin_lock_bh(&pdev->mon_lock); - dp_mon_filter_reset_mon_mode(pdev); - if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) - dp_info("failed to reset monitor filters"); - qdf_spin_unlock_bh(&pdev->mon_lock); - - if (pdev->mon_chan_band == REG_BAND_UNKNOWN) - return; - - lmac_id = pdev->ch_band_lmac_id_mapping[pdev->mon_chan_band]; - if (qdf_unlikely(lmac_id == DP_MON_INVALID_LMAC_ID)) - return; - - dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id]; - mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, lmac_id); - - /* reap full ring */ - budget = wlan_cfg_get_dma_mon_stat_ring_size(pdev->wlan_cfg_ctx); - - hal_get_sw_hptp(hal_soc, mon_dst_srng, &tp, &hp); - dp_info("Before reap: Monitor DST ring HP %u TP %u", hp, tp); - - dp_mon_process(soc, &soc->intr_ctx[dp_intr_id], lmac_id, budget); - - hal_get_sw_hptp(hal_soc, mon_dst_srng, &tp, &hp); - dp_info("After reap: Monitor DST ring HP %u TP %u", hp, tp); -} - /** * dp_vdev_unref_delete() - check and process vdev delete * @soc : DP specific soc pointer @@ -7679,11 +7191,11 @@ void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev, if (wlan_op_mode_monitor == vdev->opmode) { if (soc->intr_mode == DP_INTR_POLL) { qdf_timer_sync_cancel(&soc->int_timer); - dp_flush_monitor_rings(soc); + monitor_flush_rings(soc); } else if (soc->intr_mode == DP_INTR_MSI && soc->mon_vdev_timer_state & MON_VDEV_TIMER_RUNNING) { qdf_timer_sync_cancel(&soc->mon_vdev_timer); - dp_flush_monitor_rings(soc); + monitor_flush_rings(soc); soc->mon_vdev_timer_state &= ~MON_VDEV_TIMER_RUNNING; } pdev->monitor_vdev = NULL; @@ -7956,25 +7468,6 @@ static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, return QDF_STATUS_SUCCESS; } -/* - * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode - * @soc_hdl: datapath soc handle - * @pdev_id: physical device instance id - * - * Return: virtual interface id - */ -static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl, - uint8_t pdev_id) -{ - struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); - struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); - - if (qdf_unlikely(!pdev || !pdev->monitor_vdev)) - return -EINVAL; - - return pdev->monitor_vdev->vdev_id; -} - static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id) { struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); @@ -8046,41 +7539,6 @@ static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3( } /** - * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer - * ring based on target - * @soc: soc handle - * @mac_for_pdev: WIN- pdev_id, MCL- mac id - * @pdev: physical device handle - * @ring_num: mac id - * @htt_tlv_filter: tlv filter - * - * Return: zero on success, non-zero on failure - */ -static inline -QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev, - struct dp_pdev *pdev, uint8_t ring_num, - struct htt_rx_ring_tlv_filter htt_tlv_filter) -{ - QDF_STATUS status; - - if (soc->wlan_cfg_ctx->rxdma1_enable) - status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, - soc->rxdma_mon_buf_ring[ring_num] - .hal_srng, - RXDMA_MONITOR_BUF, - RX_MONITOR_BUFFER_SIZE, - &htt_tlv_filter); - else - status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, - pdev->rx_mac_buf_ring[ring_num] - .hal_srng, - RXDMA_BUF, RX_DATA_BUFFER_SIZE, - &htt_tlv_filter); - - return status; -} - -/* * dp_get_tx_pending() - read pending tx * @pdev_handle: Datapath PDEV handle * @@ -8122,207 +7580,6 @@ static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc, return QDF_STATUS_E_FAILURE; } -/** - * dp_vdev_set_monitor_mode_rings () - set monitor mode rings - * - * Allocate SW descriptor pool, buffers, link descriptor memory - * Initialize monitor related SRNGs - * - * @pdev: DP pdev object - * - * Return: QDF_STATUS - */ -QDF_STATUS dp_vdev_set_monitor_mode_rings(struct dp_pdev *pdev, - uint8_t delayed_replenish) -{ - struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; - uint32_t mac_id; - uint32_t mac_for_pdev; - struct dp_soc *soc = pdev->soc; - QDF_STATUS status = QDF_STATUS_SUCCESS; - struct dp_srng *mon_buf_ring; - uint32_t num_entries; - - pdev_cfg_ctx = pdev->wlan_cfg_ctx; - - /* If monitor rings are aleady initilized, return from here */ - if (pdev->pdev_mon_init) - return QDF_STATUS_SUCCESS; - - for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { - mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id, - pdev->pdev_id); - - /* Allocate sw rx descriptor pool for mon RxDMA buffer ring */ - status = dp_rx_pdev_mon_buf_desc_pool_alloc(pdev, mac_for_pdev); - if (!QDF_IS_STATUS_SUCCESS(status)) { - dp_err("%s: dp_rx_pdev_mon_buf_desc_pool_alloc() failed\n", - __func__); - goto fail0; - } - - dp_rx_pdev_mon_buf_desc_pool_init(pdev, mac_for_pdev); - - /* If monitor buffers are already allocated, - * do not allocate. - */ - status = dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev, - delayed_replenish); - - mon_buf_ring = &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev]; - /* - * Configure low interrupt threshld when monitor mode is - * configured. - */ - if (mon_buf_ring->hal_srng) { - num_entries = mon_buf_ring->num_entries; - hal_set_low_threshold(mon_buf_ring->hal_srng, - num_entries >> 3); - htt_srng_setup(pdev->soc->htt_handle, - pdev->pdev_id, - mon_buf_ring->hal_srng, - RXDMA_MONITOR_BUF); - } - - /* Allocate link descriptors for the mon link descriptor ring */ - status = dp_hw_link_desc_pool_banks_alloc(soc, mac_for_pdev); - if (!QDF_IS_STATUS_SUCCESS(status)) { - dp_err("%s: dp_hw_link_desc_pool_banks_alloc() failed", - __func__); - goto fail0; - } - dp_link_desc_ring_replenish(soc, mac_for_pdev); - - htt_srng_setup(soc->htt_handle, pdev->pdev_id, - soc->rxdma_mon_desc_ring[mac_for_pdev].hal_srng, - RXDMA_MONITOR_DESC); - htt_srng_setup(soc->htt_handle, pdev->pdev_id, - soc->rxdma_mon_dst_ring[mac_for_pdev].hal_srng, - RXDMA_MONITOR_DST); - } - pdev->pdev_mon_init = 1; - - return QDF_STATUS_SUCCESS; - -fail0: - return QDF_STATUS_E_FAILURE; -} - -/** - * dp_vdev_set_monitor_mode_buf_rings () - set monitor mode buf rings - * - * Allocate SW descriptor pool, buffers, link descriptor memory - * Initialize monitor related SRNGs - * - * @pdev: DP pdev object - * - * Return: void - */ -static void dp_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev) -{ - uint32_t mac_id; - uint32_t mac_for_pdev; - struct dp_srng *mon_buf_ring; - uint32_t num_entries; - struct dp_soc *soc = pdev->soc; - - /* If delay monitor replenish is disabled, allocate link descriptor - * monitor ring buffers of ring size. - */ - if (!wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) { - dp_vdev_set_monitor_mode_rings(pdev, false); - } else { - for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { - mac_for_pdev = - dp_get_lmac_id_for_pdev_id(pdev->soc, - mac_id, - pdev->pdev_id); - - dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev, - FALSE); - mon_buf_ring = - &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev]; - /* - * Configure low interrupt threshld when monitor mode is - * configured. - */ - if (mon_buf_ring->hal_srng) { - num_entries = mon_buf_ring->num_entries; - hal_set_low_threshold(mon_buf_ring->hal_srng, - num_entries >> 3); - htt_srng_setup(pdev->soc->htt_handle, - pdev->pdev_id, - mon_buf_ring->hal_srng, - RXDMA_MONITOR_BUF); - } - } - } -} - -/** - * dp_set_bsscolor() - sets bsscolor for tx capture - * @pdev: Datapath PDEV handle - * @bsscolor: new bsscolor - */ -static void -dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor) -{ - pdev->rx_mon_recv_status.bsscolor = bsscolor; -} - -/** - * dp_pdev_get_filter_ucast_data() - get DP PDEV monitor ucast filter - * @soc : data path soc handle - * @pdev_id : pdev_id - * Return: true on ucast filter flag set - */ -static bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle) -{ - struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; - - if ((pdev->fp_data_filter & FILTER_DATA_UCAST) || - (pdev->mo_data_filter & FILTER_DATA_UCAST)) - return true; - - return false; -} - -/** - * dp_pdev_get_filter_mcast_data() - get DP PDEV monitor mcast filter - * @pdev_handle: Datapath PDEV handle - * Return: true on mcast filter flag set - */ -static bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle) -{ - struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; - - if ((pdev->fp_data_filter & FILTER_DATA_MCAST) || - (pdev->mo_data_filter & FILTER_DATA_MCAST)) - return true; - - return false; -} - -/** - * dp_pdev_get_filter_non_data() - get DP PDEV monitor non_data filter - * @pdev_handle: Datapath PDEV handle - * Return: true on non data filter flag set - */ -static bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle) -{ - struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; - - if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) || - (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) { - if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) || - (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) { - return true; - } - } - - return false; -} - #ifdef MESH_MODE_SUPPORT static void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val) @@ -8986,7 +8243,7 @@ dp_print_host_stats(struct dp_vdev *vdev, dp_print_ring_stats(pdev); break; case TXRX_RX_MON_STATS: - dp_print_pdev_rx_mon_stats(pdev); + monitor_print_pdev_rx_mon_stats(pdev); break; case TXRX_REO_QUEUE_STATS: dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc, @@ -9023,72 +8280,6 @@ dp_print_host_stats(struct dp_vdev *vdev, return 0; } -/* - * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer - * modes are enabled or not. - * @dp_pdev: dp pdev handle. - * - * Return: bool - */ -static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev) -{ - if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && - !pdev->mcopy_mode) - return true; - else - return false; -} - -/* - *dp_set_bpr_enable() - API to enable/disable bpr feature - *@pdev_handle: DP_PDEV handle. - *@val: Provided value. - * - *Return: 0 for success. nonzero for failure. - */ -static QDF_STATUS -dp_set_bpr_enable(struct dp_pdev *pdev, int val) -{ - switch (val) { - case CDP_BPR_DISABLE: - pdev->bpr_enable = CDP_BPR_DISABLE; - if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en && - !pdev->tx_sniffer_enable && !pdev->mcopy_mode) { - dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id); - } else if (pdev->enhanced_stats_en && - !pdev->tx_sniffer_enable && !pdev->mcopy_mode && - !pdev->pktlog_ppdu_stats) { - dp_h2t_cfg_stats_msg_send(pdev, - DP_PPDU_STATS_CFG_ENH_STATS, - pdev->pdev_id); - } - break; - case CDP_BPR_ENABLE: - pdev->bpr_enable = CDP_BPR_ENABLE; - if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && - !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) { - dp_h2t_cfg_stats_msg_send(pdev, - DP_PPDU_STATS_CFG_BPR, - pdev->pdev_id); - } else if (pdev->enhanced_stats_en && - !pdev->tx_sniffer_enable && !pdev->mcopy_mode && - !pdev->pktlog_ppdu_stats) { - dp_h2t_cfg_stats_msg_send(pdev, - DP_PPDU_STATS_CFG_BPR_ENH, - pdev->pdev_id); - } else if (pdev->pktlog_ppdu_stats) { - dp_h2t_cfg_stats_msg_send(pdev, - DP_PPDU_STATS_CFG_BPR_PKTLOG, - pdev->pdev_id); - } - break; - default: - break; - } - - return QDF_STATUS_SUCCESS; -} - /* * dp_pdev_tid_stats_ingress_inc * @pdev: pdev handle @@ -9115,92 +8306,6 @@ dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val) pdev->stats.tid_stats.osif_drop += val; } -#ifdef FEATURE_PERPKT_INFO -/* - * dp_enable_enhanced_stats()- API to enable enhanced statistcs - * @soc_handle: DP_SOC handle - * @pdev_id: id of DP_PDEV handle - * - * Return: QDF_STATUS - */ -static QDF_STATUS -dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id) -{ - struct dp_pdev *pdev = NULL; - QDF_STATUS status = QDF_STATUS_SUCCESS; - - pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, - pdev_id); - - if (!pdev) - return QDF_STATUS_E_FAILURE; - - if (pdev->enhanced_stats_en == 0) - dp_cal_client_timer_start(pdev->cal_client_ctx); - - pdev->enhanced_stats_en = 1; - - dp_mon_filter_setup_enhanced_stats(pdev); - status = dp_mon_filter_update(pdev); - if (status != QDF_STATUS_SUCCESS) { - dp_cdp_err("%pK: Failed to set enhanced mode filters", soc); - dp_mon_filter_reset_enhanced_stats(pdev); - dp_cal_client_timer_stop(pdev->cal_client_ctx); - pdev->enhanced_stats_en = 0; - return QDF_STATUS_E_FAILURE; - } - - if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) { - dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id); - } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) { - dp_h2t_cfg_stats_msg_send(pdev, - DP_PPDU_STATS_CFG_BPR_ENH, - pdev->pdev_id); - } - - return QDF_STATUS_SUCCESS; -} - -/* - * dp_disable_enhanced_stats()- API to disable enhanced statistcs - * - * @param soc - the soc handle - * @param pdev_id - pdev_id of pdev - * @return - QDF_STATUS - */ -static QDF_STATUS -dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id) -{ - struct dp_pdev *pdev = - dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, - pdev_id); - - if (!pdev) - return QDF_STATUS_E_FAILURE; - - if (pdev->enhanced_stats_en == 1) - dp_cal_client_timer_stop(pdev->cal_client_ctx); - - pdev->enhanced_stats_en = 0; - - if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) { - dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id); - } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) { - dp_h2t_cfg_stats_msg_send(pdev, - DP_PPDU_STATS_CFG_BPR, - pdev->pdev_id); - } - - dp_mon_filter_reset_enhanced_stats(pdev); - if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("Failed to reset enhanced mode filters")); - } - - return QDF_STATUS_SUCCESS; -} -#endif /* FEATURE_PERPKT_INFO */ - /* * dp_get_fw_peer_stats()- function to print peer stats * @soc: soc handle @@ -9344,22 +8449,6 @@ static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id, return QDF_STATUS_SUCCESS; } -#ifdef WLAN_ATF_ENABLE -static void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value) -{ - if (!pdev) { - dp_cdp_err("Invalid pdev"); - return; - } - - pdev->dp_atf_stats_enable = value; -} -#else -static void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value) -{ -} -#endif - /* * dp_set_peer_param: function to set parameters in peer * @cdp_soc: DP soc handle @@ -9433,15 +8522,15 @@ static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, break; case CDP_FILTER_MCAST_DATA: val->cdp_pdev_param_fltr_mcast = - dp_pdev_get_filter_mcast_data(pdev); + monitor_pdev_get_filter_mcast_data(pdev); break; case CDP_FILTER_NO_DATA: val->cdp_pdev_param_fltr_none = - dp_pdev_get_filter_non_data(pdev); + monitor_pdev_get_filter_non_data(pdev); break; case CDP_FILTER_UCAST_DATA: val->cdp_pdev_param_fltr_ucast = - dp_pdev_get_filter_ucast_data(pdev); + monitor_pdev_get_filter_ucast_data(pdev); break; default: return QDF_STATUS_E_FAILURE; @@ -9493,7 +8582,8 @@ static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, return monitor_config_debug_sniffer(pdev, val.cdp_pdev_param_dbg_snf); case CDP_CONFIG_BPR_ENABLE: - return dp_set_bpr_enable(pdev, val.cdp_pdev_param_bpr_enable); + return monitor_set_bpr_enable(pdev, + val.cdp_pdev_param_bpr_enable); case CDP_CONFIG_PRIMARY_RADIO: pdev->is_primary = val.cdp_pdev_param_primary_radio; break; @@ -9509,10 +8599,10 @@ static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, val.cdp_pdev_param_osif_drop); break; case CDP_CONFIG_ENH_RX_CAPTURE: - return dp_config_enh_rx_capture(pdev, + return monitor_config_enh_rx_capture(pdev, val.cdp_pdev_param_en_rx_cap); case CDP_CONFIG_ENH_TX_CAPTURE: - return dp_config_enh_tx_capture(pdev, + return monitor_config_enh_tx_capture(pdev, val.cdp_pdev_param_en_tx_cap); case CDP_CONFIG_HMMC_TID_OVERRIDE: pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd; @@ -9528,8 +8618,8 @@ static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, val.cdp_pdev_param_tidmap_prty); break; case CDP_FILTER_NEIGH_PEERS: - dp_set_filter_neigh_peers(pdev, - val.cdp_pdev_param_fltr_neigh_peers); + monitor_set_filter_neigh_peers(pdev, + val.cdp_pdev_param_fltr_neigh_peers); break; case CDP_MONITOR_CHANNEL: pdev->mon_chan_num = val.cdp_pdev_param_monitor_chan; @@ -9540,14 +8630,14 @@ static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, wlan_reg_freq_to_band(pdev->mon_chan_freq); break; case CDP_CONFIG_BSS_COLOR: - dp_mon_set_bsscolor(pdev, val.cdp_pdev_param_bss_color); + monitor_set_bsscolor(pdev, val.cdp_pdev_param_bss_color); break; case CDP_SET_ATF_STATS_ENABLE: - dp_set_atf_stats_enable(pdev, + monitor_set_atf_stats_enable(pdev, val.cdp_pdev_param_atf_stats_enable); break; case CDP_CONFIG_SPECIAL_VAP: - dp_vdev_set_monitor_mode_buf_rings(pdev); + monitor_vdev_set_monitor_mode_buf_rings(pdev); break; default: return QDF_STATUS_E_INVAL; @@ -9911,51 +9001,6 @@ static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc, return QDF_STATUS_SUCCESS; } -/** - * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer - * @soc: DP_SOC handle - * @pdev_id: id of DP_PDEV handle - * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode - * @is_tx_pkt_cap_enable: enable/disable/delete/print - * Tx packet capture in monitor mode - * @peer_mac: MAC address for which the above need to be enabled/disabled - * - * Return: Success if Rx & Tx capture is enabled for peer, false otherwise - */ -QDF_STATUS -dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc, - uint8_t pdev_id, - bool is_rx_pkt_cap_enable, - uint8_t is_tx_pkt_cap_enable, - uint8_t *peer_mac) -{ - struct dp_peer *peer; - QDF_STATUS status; - struct dp_pdev *pdev = - dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, - pdev_id); - if (!pdev) - return QDF_STATUS_E_FAILURE; - - peer = dp_peer_find_hash_find((struct dp_soc *)soc, - peer_mac, 0, DP_VDEV_ALL, - DP_MOD_ID_CDP); - if (!peer) - return QDF_STATUS_E_FAILURE; - - /* we need to set tx pkt capture for non associated peer */ - status = dp_peer_set_tx_capture_enabled(pdev, peer, - is_tx_pkt_cap_enable, - peer_mac); - - status = dp_peer_set_rx_capture_enabled(pdev, peer, - is_rx_pkt_cap_enable, - peer_mac); - dp_peer_unref_delete(peer, DP_MOD_ID_CDP); - - return status; -} - /* * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev * @soc: DP_SOC handle @@ -11188,127 +10233,6 @@ dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id, } #endif -#ifdef ATH_SUPPORT_NAC_RSSI -/** - * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC - * @soc_hdl: DP soc handle - * @vdev_id: id of DP vdev handle - * @mac_addr: neighbour mac - * @rssi: rssi value - * - * Return: 0 for success. nonzero for failure. - */ -static QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc_hdl, - uint8_t vdev_id, - char *mac_addr, - uint8_t *rssi) -{ - struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); - struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, - DP_MOD_ID_CDP); - struct dp_pdev *pdev; - struct dp_neighbour_peer *peer = NULL; - QDF_STATUS status = QDF_STATUS_E_FAILURE; - - if (!vdev) - return status; - - pdev = vdev->pdev; - *rssi = 0; - qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); - TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, - neighbour_peer_list_elem) { - if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], - mac_addr, QDF_MAC_ADDR_SIZE) == 0) { - *rssi = peer->rssi; - status = QDF_STATUS_SUCCESS; - break; - } - } - qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); - dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); - return status; -} - -static QDF_STATUS -dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc, - uint8_t vdev_id, - enum cdp_nac_param_cmd cmd, char *bssid, - char *client_macaddr, - uint8_t chan_num) -{ - struct dp_soc *soc = (struct dp_soc *)cdp_soc; - struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, - DP_MOD_ID_CDP); - struct dp_pdev *pdev; - - if (!vdev) - return QDF_STATUS_E_FAILURE; - - pdev = (struct dp_pdev *)vdev->pdev; - pdev->nac_rssi_filtering = 1; - /* Store address of NAC (neighbour peer) which will be checked - * against TA of received packets. - */ - - if (cmd == CDP_NAC_PARAM_ADD) { - dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id, - DP_NAC_PARAM_ADD, - (uint8_t *)client_macaddr); - } else if (cmd == CDP_NAC_PARAM_DEL) { - dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id, - DP_NAC_PARAM_DEL, - (uint8_t *)client_macaddr); - } - - if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi) - soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi - (soc->ctrl_psoc, pdev->pdev_id, - vdev->vdev_id, cmd, bssid, client_macaddr); - - dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); - return QDF_STATUS_SUCCESS; -} -#endif - -/** - * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering - * for pktlog - * @soc: cdp_soc handle - * @pdev_id: id of dp pdev handle - * @mac_addr: Peer mac address - * @enb_dsb: Enable or disable peer based filtering - * - * Return: QDF_STATUS - */ -static int -dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id, - uint8_t *mac_addr, uint8_t enb_dsb) -{ - struct dp_peer *peer; - struct dp_pdev *pdev = - dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, - pdev_id); - - if (!pdev) - return QDF_STATUS_E_FAILURE; - - peer = dp_peer_find_hash_find((struct dp_soc *)soc, mac_addr, - 0, DP_VDEV_ALL, DP_MOD_ID_CDP); - - if (!peer) { - dp_err("Invalid Peer"); - return QDF_STATUS_E_FAILURE; - } - - peer->peer_based_pktlog_filter = enb_dsb; - pdev->dp_peer_based_pktlog = enb_dsb; - - dp_peer_unref_delete(peer, DP_MOD_ID_CDP); - - return QDF_STATUS_SUCCESS; -} - #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS /** * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for @@ -11800,7 +10724,6 @@ static struct cdp_cmn_ops dp_ops_cmn = { #endif .txrx_pdev_init = dp_pdev_init_wifi3, .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3, - .txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3, .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3, .txrx_ath_getstats = dp_get_device_stats, .addba_requestprocess = dp_addba_requestprocess_wifi3, @@ -11883,10 +10806,6 @@ static struct cdp_ctrl_ops dp_ops_ctrl = { .txrx_get_psoc_param = dp_get_psoc_param, .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest, .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest, -#if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) - .txrx_update_filter_neighbour_peers = - dp_update_filter_neighbour_peers, -#endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ .txrx_get_sec_type = dp_get_sec_type, .txrx_wdi_event_sub = dp_wdi_event_sub, .txrx_wdi_event_unsub = dp_wdi_event_unsub, @@ -11900,10 +10819,6 @@ static struct cdp_ctrl_ops dp_ops_ctrl = { #ifdef VDEV_PEER_PROTOCOL_COUNT .txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt, #endif -#ifdef ATH_SUPPORT_NAC_RSSI - .txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi, - .txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi, -#endif #ifdef WLAN_SUPPORT_MSCS .txrx_record_mscs_params = dp_record_mscs_params, #endif @@ -11913,7 +10828,6 @@ static struct cdp_ctrl_ops dp_ops_ctrl = { #endif .set_key = dp_set_michael_key, .txrx_get_vdev_param = dp_get_vdev_param, - .enable_peer_based_pktlog = dp_enable_peer_based_pktlog, .calculate_delay_stats = dp_calculate_delay_stats, #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG .txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag, @@ -11929,10 +10843,6 @@ static struct cdp_ctrl_ops dp_ops_ctrl = { #ifdef QCA_MULTIPASS_SUPPORT .txrx_peer_set_vlan_id = dp_peer_set_vlan_id, #endif /*QCA_MULTIPASS_SUPPORT*/ -#if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH) - .txrx_update_peer_pkt_capture_params = - dp_peer_update_pkt_capture_params, -#endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */ #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY .txrx_set_delta_tsf = dp_set_delta_tsf, .txrx_set_tsf_ul_delay_report = dp_set_tsf_ul_delay_report, @@ -11954,10 +10864,6 @@ static struct cdp_host_stats_ops dp_ops_host_stats = { .txrx_per_peer_stats = dp_get_host_peer_stats, .get_fw_peer_stats = dp_get_fw_peer_stats, .get_htt_stats = dp_get_htt_stats, -#ifdef FEATURE_PERPKT_INFO - .txrx_enable_enhanced_stats = dp_enable_enhanced_stats, - .txrx_disable_enhanced_stats = dp_disable_enhanced_stats, -#endif /* FEATURE_PERPKT_INFO */ .txrx_stats_publish = dp_txrx_stats_publish, .txrx_get_vdev_stats = dp_txrx_get_vdev_stats, .txrx_get_peer_stats = dp_txrx_get_peer_stats, @@ -11982,12 +10888,12 @@ static struct cdp_pflow_ops dp_ops_pflow = { #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) static struct cdp_cfr_ops dp_ops_cfr = { - .txrx_cfr_filter = dp_cfr_filter, + .txrx_cfr_filter = NULL, .txrx_get_cfr_rcc = dp_get_cfr_rcc, .txrx_set_cfr_rcc = dp_set_cfr_rcc, .txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats, .txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats, - .txrx_enable_mon_reap_timer = dp_enable_mon_reap_timer, + .txrx_enable_mon_reap_timer = NULL, }; #endif @@ -12449,8 +11355,6 @@ static struct cdp_misc_ops dp_ops_misc = { .runtime_suspend = dp_runtime_suspend, .runtime_resume = dp_runtime_resume, #endif /* FEATURE_RUNTIME_PM */ - .pkt_log_init = dp_pkt_log_init, - .pkt_log_con_service = dp_pkt_log_con_service, .get_num_rx_contexts = dp_get_num_rx_contexts, .get_tx_ack_stats = dp_tx_get_success_ack_stats, #ifdef WLAN_SUPPORT_DATA_STALL @@ -12545,7 +11449,7 @@ static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) dp_is_enable_reap_timer_non_pkt(pdev)) && soc->reap_timer_init) { qdf_timer_sync_cancel(&soc->mon_reap_timer); - dp_service_mon_rings(soc, DP_MON_REAP_BUDGET); + monitor_service_mon_rings(soc, DP_MON_REAP_BUDGET); } dp_suspend_fse_cache_flush(soc); @@ -12603,7 +11507,7 @@ static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) || dp_is_enable_reap_timer_non_pkt(pdev)) && soc->reap_timer_init) { - dp_service_mon_rings(soc, DP_MON_REAP_BUDGET); + monitor_service_mon_rings(soc, DP_MON_REAP_BUDGET); } } @@ -12630,7 +11534,7 @@ static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl, dp_is_enable_reap_timer_non_pkt(pdev)) && soc->reap_timer_init) { qdf_timer_sync_cancel(&soc->mon_reap_timer); - dp_service_mon_rings(soc, DP_MON_REAP_BUDGET); + monitor_service_mon_rings(soc, DP_MON_REAP_BUDGET); } } @@ -13191,82 +12095,6 @@ void dp_is_hw_dbs_enable(struct dp_soc *soc, *max_mac_rings = (dbs_enable)?(*max_mac_rings):1; } -#if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) -/* - * dp_cfr_filter() - Configure HOST RX monitor status ring for CFR - * @soc_hdl: Datapath soc handle - * @pdev_id: id of data path pdev handle - * @enable: Enable/Disable CFR - * @filter_val: Flag to select Filter for monitor mode - */ -static void dp_cfr_filter(struct cdp_soc_t *soc_hdl, - uint8_t pdev_id, - bool enable, - struct cdp_monitor_filter *filter_val) -{ - struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); - struct dp_pdev *pdev = NULL; - struct htt_rx_ring_tlv_filter htt_tlv_filter = {0}; - int max_mac_rings; - uint8_t mac_id = 0; - - pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); - if (!pdev) { - dp_err("pdev is NULL"); - return; - } - - if (pdev->monitor_vdev) { - dp_info("No action is needed since monitor mode is enabled\n"); - return; - } - soc = pdev->soc; - pdev->cfr_rcc_mode = false; - max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx); - dp_is_hw_dbs_enable(soc, &max_mac_rings); - - dp_debug("Max_mac_rings %d", max_mac_rings); - dp_info("enable : %d, mode: 0x%x", enable, filter_val->mode); - - if (enable) { - pdev->cfr_rcc_mode = true; - - htt_tlv_filter.ppdu_start = 1; - htt_tlv_filter.ppdu_end = 1; - htt_tlv_filter.ppdu_end_user_stats = 1; - htt_tlv_filter.ppdu_end_user_stats_ext = 1; - htt_tlv_filter.ppdu_end_status_done = 1; - htt_tlv_filter.mpdu_start = 1; - htt_tlv_filter.offset_valid = false; - - htt_tlv_filter.enable_fp = - (filter_val->mode & MON_FILTER_PASS) ? 1 : 0; - htt_tlv_filter.enable_md = 0; - htt_tlv_filter.enable_mo = - (filter_val->mode & MON_FILTER_OTHER) ? 1 : 0; - htt_tlv_filter.fp_mgmt_filter = filter_val->fp_mgmt; - htt_tlv_filter.fp_ctrl_filter = filter_val->fp_ctrl; - htt_tlv_filter.fp_data_filter = filter_val->fp_data; - htt_tlv_filter.mo_mgmt_filter = filter_val->mo_mgmt; - htt_tlv_filter.mo_ctrl_filter = filter_val->mo_ctrl; - htt_tlv_filter.mo_data_filter = filter_val->mo_data; - } - - for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { - int mac_for_pdev = - dp_get_mac_id_for_pdev(mac_id, - pdev->pdev_id); - - htt_h2t_rx_ring_cfg(soc->htt_handle, - mac_for_pdev, - soc->rxdma_mon_status_ring[mac_id] - .hal_srng, - RXDMA_MONITOR_STATUS, - RX_MON_STATUS_BUF_SIZE, - &htt_tlv_filter); - } -} - /** * dp_get_cfr_rcc() - get cfr rcc config * @soc_hdl: Datapath soc handle @@ -13357,267 +12185,6 @@ static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc)); } -/* - * dp_enable_mon_reap_timer() - enable/disable reap timer - * @soc_hdl: Datapath soc handle - * @pdev_id: id of objmgr pdev - * @enable: Enable/Disable reap timer of monitor status ring - * - * Return: none - */ -static void -dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, - bool enable) -{ - struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); - struct dp_pdev *pdev = NULL; - - pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); - if (!pdev) { - dp_err("pdev is NULL"); - return; - } - - pdev->enable_reap_timer_non_pkt = enable; - if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) { - dp_debug("pktlog enabled %d", pdev->rx_pktlog_mode); - return; - } - - if (!soc->reap_timer_init) { - dp_err("reap timer not init"); - return; - } - - if (enable) - qdf_timer_mod(&soc->mon_reap_timer, - DP_INTR_POLL_TIMER_MS); - else - qdf_timer_sync_cancel(&soc->mon_reap_timer); -} -#endif - -/* - * dp_is_enable_reap_timer_non_pkt() - check if mon reap timer is - * enabled by non-pkt log or not - * @pdev: point to dp pdev - * - * Return: true if mon reap timer is enabled by non-pkt log - */ -static bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev) -{ - if (!pdev) { - dp_err("null pdev"); - return false; - } - - return pdev->enable_reap_timer_non_pkt; -} - -/* -* dp_set_pktlog_wifi3() - attach txrx vdev -* @pdev: Datapath PDEV handle -* @event: which event's notifications are being subscribed to -* @enable: WDI event subscribe or not. (True or False) -* -* Return: Success, NULL on failure -*/ -#ifdef WDI_EVENT_ENABLE -int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, - bool enable) -{ - struct dp_soc *soc = NULL; - int max_mac_rings = wlan_cfg_get_num_mac_rings - (pdev->wlan_cfg_ctx); - uint8_t mac_id = 0; - - soc = pdev->soc; - dp_is_hw_dbs_enable(soc, &max_mac_rings); - - QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, - FL("Max_mac_rings %d "), - max_mac_rings); - - if (enable) { - switch (event) { - case WDI_EVENT_RX_DESC: - if (pdev->monitor_vdev) { - /* Nothing needs to be done if monitor mode is - * enabled - */ - pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL; - return 0; - } - - if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) { - pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL; - dp_mon_filter_setup_rx_pkt_log_full(pdev); - if (dp_mon_filter_update(pdev) != - QDF_STATUS_SUCCESS) { - dp_cdp_err("%pK: Pktlog full filters set failed", soc); - dp_mon_filter_reset_rx_pkt_log_full(pdev); - pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED; - return 0; - } - - if (soc->reap_timer_init && - (!dp_is_enable_reap_timer_non_pkt(pdev))) - qdf_timer_mod(&soc->mon_reap_timer, - DP_INTR_POLL_TIMER_MS); - } - break; - - case WDI_EVENT_LITE_RX: - if (pdev->monitor_vdev) { - /* Nothing needs to be done if monitor mode is - * enabled - */ - pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE; - return 0; - } - if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) { - pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE; - - /* - * Set the packet log lite mode filter. - */ - dp_mon_filter_setup_rx_pkt_log_lite(pdev); - if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { - dp_cdp_err("%pK: Pktlog lite filters set failed", soc); - dp_mon_filter_reset_rx_pkt_log_lite(pdev); - pdev->rx_pktlog_mode = - DP_RX_PKTLOG_DISABLED; - return 0; - } - - if (soc->reap_timer_init && - (!dp_is_enable_reap_timer_non_pkt(pdev))) - qdf_timer_mod(&soc->mon_reap_timer, - DP_INTR_POLL_TIMER_MS); - } - break; - - case WDI_EVENT_LITE_T2H: - for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { - int mac_for_pdev = dp_get_mac_id_for_pdev( - mac_id, pdev->pdev_id); - - pdev->pktlog_ppdu_stats = true; - dp_h2t_cfg_stats_msg_send(pdev, - DP_PPDU_TXLITE_STATS_BITMASK_CFG, - mac_for_pdev); - } - break; - - case WDI_EVENT_RX_CBF: - if (pdev->monitor_vdev) { - /* Nothing needs to be done if monitor mode is - * enabled - */ - dp_info("Monitor mode, CBF setting filters"); - pdev->rx_pktlog_cbf = true; - return 0; - } - if (!pdev->rx_pktlog_cbf) { - pdev->rx_pktlog_cbf = true; - pdev->monitor_configured = true; - dp_vdev_set_monitor_mode_buf_rings(pdev); - /* - * Set the packet log lite mode filter. - */ - qdf_info("Non monitor mode: Enable destination ring"); - - dp_mon_filter_setup_rx_pkt_log_cbf(pdev); - if (dp_mon_filter_update(pdev) != - QDF_STATUS_SUCCESS) { - dp_err("Pktlog set CBF filters failed"); - dp_mon_filter_reset_rx_pktlog_cbf(pdev); - pdev->rx_pktlog_mode = - DP_RX_PKTLOG_DISABLED; - pdev->monitor_configured = false; - return 0; - } - - if (soc->reap_timer_init && - !dp_is_enable_reap_timer_non_pkt(pdev)) - qdf_timer_mod(&soc->mon_reap_timer, - DP_INTR_POLL_TIMER_MS); - } - break; - - default: - /* Nothing needs to be done for other pktlog types */ - break; - } - } else { - switch (event) { - case WDI_EVENT_RX_DESC: - case WDI_EVENT_LITE_RX: - if (pdev->monitor_vdev) { - /* Nothing needs to be done if monitor mode is - * enabled - */ - pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED; - return 0; - } - if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) { - pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED; - dp_mon_filter_reset_rx_pkt_log_full(pdev); - if (dp_mon_filter_update(pdev) != - QDF_STATUS_SUCCESS) { - dp_cdp_err("%pK: Pktlog filters reset failed", soc); - return 0; - } - - dp_mon_filter_reset_rx_pkt_log_lite(pdev); - if (dp_mon_filter_update(pdev) != - QDF_STATUS_SUCCESS) { - dp_cdp_err("%pK: Pktlog filters reset failed", soc); - return 0; - } - - if (soc->reap_timer_init && - (!dp_is_enable_reap_timer_non_pkt(pdev))) - qdf_timer_stop(&soc->mon_reap_timer); - } - break; - case WDI_EVENT_LITE_T2H: - /* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW - * passing value 0. Once these macros will define in htt - * header file will use proper macros - */ - for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { - int mac_for_pdev = - dp_get_mac_id_for_pdev(mac_id, - pdev->pdev_id); - - pdev->pktlog_ppdu_stats = false; - if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) { - dp_h2t_cfg_stats_msg_send(pdev, 0, - mac_for_pdev); - } else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) { - dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER, - mac_for_pdev); - } else if (pdev->enhanced_stats_en) { - dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, - mac_for_pdev); - } - } - - break; - case WDI_EVENT_RX_CBF: - pdev->rx_pktlog_cbf = false; - break; - - default: - /* Nothing needs to be done for other pktlog types */ - break; - } - } - return 0; -} -#endif - /** * dp_bucket_index() - Return index from array * diff --git a/dp/wifi3.0/dp_peer.c b/dp/wifi3.0/dp_peer.c index 0db556c920..39d62a1a37 100644 --- a/dp/wifi3.0/dp_peer.c +++ b/dp/wifi3.0/dp_peer.c @@ -32,6 +32,9 @@ #ifdef FEATURE_WDS #include "dp_txrx_wds.h" #endif +#ifdef WIFI_MONITOR_SUPPORT +#include +#endif #ifdef WLAN_TX_PKT_CAPTURE_ENH #include "dp_tx_capture.h" @@ -2271,7 +2274,8 @@ static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc, dp_peer_find_id_to_obj_add(soc, peer, peer_id); if (peer->peer_id == HTT_INVALID_PEER) { peer->peer_id = peer_id; - dp_peer_tid_peer_id_update(peer, peer->peer_id); + monitor_peer_tid_peer_id_update(soc, peer, + peer->peer_id); } else { QDF_ASSERT(0); } @@ -3219,30 +3223,6 @@ static void dp_peer_setup_remaining_tids(struct dp_peer *peer) static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {}; #endif -/* - * dp_peer_tx_init() – Initialize receive TID state - * @pdev: Datapath pdev - * @peer: Datapath peer - * - */ -void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer) -{ - dp_peer_tid_queue_init(peer); - dp_peer_update_80211_hdr(peer->vdev, peer); -} - -/* - * dp_peer_tx_cleanup() – Deinitialize receive TID state - * @vdev: Datapath vdev - * @peer: Datapath peer - * - */ -static inline void -dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer) -{ - dp_peer_tid_queue_cleanup(peer); -} - /* * dp_peer_rx_init() – Initialize receive TID state * @pdev: Datapath pdev @@ -3373,7 +3353,7 @@ void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer) /* save vdev related member in case vdev freed */ vdev_opmode = vdev->opmode; - dp_peer_tx_cleanup(vdev, peer); + monitor_peer_tx_cleanup(vdev, peer); if (vdev_opmode != wlan_op_mode_monitor) /* cleanup the Rx reorder queues for this peer */ diff --git a/dp/wifi3.0/dp_peer.h b/dp/wifi3.0/dp_peer.h index 440f9fb63c..b245fe7f07 100644 --- a/dp/wifi3.0/dp_peer.h +++ b/dp/wifi3.0/dp_peer.h @@ -798,25 +798,6 @@ void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl, struct dp_ast_flow_override_info *ast_info); #endif -/** - * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer - * @soc: DP SOC handle - * @pdev_id: id of DP pdev handle - * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode - * @is_tx_pkt_cap_enable: enable/disable/delete/print - * Tx packet capture in monitor mode - * Tx packet capture in monitor mode - * @peer_mac: MAC address for which the above need to be enabled/disabled - * - * Return: Success if Rx & Tx capture is enabled for peer, false otherwise - */ -QDF_STATUS -dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc, - uint8_t pdev_id, - bool is_rx_pkt_cap_enable, - uint8_t is_tx_pkt_cap_enable, - uint8_t *peer_mac); - /* * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache * after deleting the entries (ie., setting valid=0) @@ -829,48 +810,6 @@ void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status); -#ifndef WLAN_TX_PKT_CAPTURE_ENH -/** - * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID - * @peer: Datapath peer - * - */ -static inline void dp_peer_tid_queue_init(struct dp_peer *peer) -{ -} - -/** - * dp_peer_tid_peer_id_update() – update peer_id to tid structure - * @peer: Datapath peer - * @peer_id: peer_id - * - */ -static inline -void dp_peer_tid_peer_id_update(struct dp_peer *peer, uint16_t peer_id) -{ -} - -/** - * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID - * @peer: Datapath peer - * - */ -static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer) -{ -} - -/** - * dp_peer_update_80211_hdr() – dp peer update 80211 hdr - * @vdev: Datapath vdev - * @peer: Datapath peer - * - */ -static inline void -dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer) -{ -} -#endif - #ifdef QCA_PEER_EXT_STATS QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc, struct dp_peer *peer); diff --git a/dp/wifi3.0/dp_rx.c b/dp/wifi3.0/dp_rx.c index cebcf5dd97..e664af3466 100644 --- a/dp/wifi3.0/dp_rx.c +++ b/dp/wifi3.0/dp_rx.c @@ -35,6 +35,10 @@ #endif #include "dp_hist.h" #include "dp_rx_buffer_pool.h" +#ifdef WIFI_MONITOR_SUPPORT +#include "dp_htt.h" +#include +#endif #ifndef QCA_HOST_MODE_WIFI_DISABLED @@ -860,50 +864,6 @@ QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, #endif #ifdef FEATURE_NAC_RSSI -/** - * dp_rx_nac_filter(): Function to perform filtering of non-associated - * clients - * @pdev: DP pdev handle - * @rx_pkt_hdr: Rx packet Header - * - * return: dp_vdev* - */ -static -struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev, - uint8_t *rx_pkt_hdr) -{ - struct ieee80211_frame *wh; - struct dp_neighbour_peer *peer = NULL; - - wh = (struct ieee80211_frame *)rx_pkt_hdr; - - if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS) - return NULL; - - qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); - TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, - neighbour_peer_list_elem) { - if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], - wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) { - dp_rx_debug("%pK: NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x", - pdev->soc, - peer->neighbour_peers_macaddr.raw[0], - peer->neighbour_peers_macaddr.raw[1], - peer->neighbour_peers_macaddr.raw[2], - peer->neighbour_peers_macaddr.raw[3], - peer->neighbour_peers_macaddr.raw[4], - peer->neighbour_peers_macaddr.raw[5]); - - qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); - - return pdev->monitor_vdev; - } - } - qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); - - return NULL; -} - /** * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac * @soc: DP SOC handle @@ -948,23 +908,11 @@ uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, goto free; } - if (pdev->filter_neighbour_peers) { - /* Next Hop scenario not yet handle */ - vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr); - if (vdev) { - dp_rx_mon_deliver(soc, pdev->pdev_id, - pdev->invalid_peer_head_msdu, - pdev->invalid_peer_tail_msdu); - - pdev->invalid_peer_head_msdu = NULL; - pdev->invalid_peer_tail_msdu = NULL; - - return 0; - } - } + if (monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) == + QDF_STATUS_SUCCESS) + return 0; TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { - if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE) == 0) { goto out; @@ -990,13 +938,9 @@ out: * in order to avoid HM_WDS false addition. */ if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) { - if (!soc->hw_nac_monitor_support && - pdev->filter_neighbour_peers && - vdev->opmode == wlan_op_mode_sta) { - dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm", - soc, wh->i_addr1); + if (monitor_drop_inv_peer_pkts(vdev, wh) == QDF_STATUS_SUCCESS) goto free; - } + pdev->soc->cdp_soc.ol_ops->rx_invalid_peer( (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc, pdev->pdev_id, &msg); diff --git a/dp/wifi3.0/dp_rx_err.c b/dp/wifi3.0/dp_rx_err.c index 4cc405036c..2c9d86463d 100644 --- a/dp/wifi3.0/dp_rx_err.c +++ b/dp/wifi3.0/dp_rx_err.c @@ -27,6 +27,10 @@ #include "qdf_nbuf.h" #include "dp_rx_defrag.h" #include "dp_ipa.h" +#ifdef WIFI_MONITOR_SUPPORT +#include "dp_htt.h" +#include +#endif #ifdef FEATURE_WDS #include "dp_txrx_wds.h" #endif diff --git a/dp/wifi3.0/dp_stats.c b/dp/wifi3.0/dp_stats.c index a28c78cd84..2ed8916640 100644 --- a/dp/wifi3.0/dp_stats.c +++ b/dp/wifi3.0/dp_stats.c @@ -27,9 +27,12 @@ #include #include "dp_hist.h" #endif +#ifdef WIFI_MONITOR_SUPPORT +#include "dp_htt.h" +#include +#endif #define DP_MAX_STRING_LEN 500 -#define INVALID_FREE_BUFF 0xffffffff #define DP_HTT_HW_INTR_NAME_LEN HTT_STATS_MAX_HW_INTR_NAME_LEN #define DP_HTT_HW_MODULE_NAME_LEN HTT_STATS_MAX_HW_MODULE_NAME_LEN @@ -4347,42 +4350,6 @@ void dp_peer_stats_update_protocol_cnt(struct cdp_soc_t *soc_hdl, #endif #ifdef WDI_EVENT_ENABLE -QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer) -{ - struct cdp_interface_peer_stats peer_stats_intf; - struct cdp_peer_stats *peer_stats = &peer->stats; - - if (!peer->vdev) - return QDF_STATUS_E_FAULT; - - qdf_mem_zero(&peer_stats_intf, sizeof(peer_stats_intf)); - if (peer_stats->rx.last_snr != peer_stats->rx.snr) - peer_stats_intf.rssi_changed = true; - - if ((peer_stats->rx.snr && peer_stats_intf.rssi_changed) || - (peer_stats->tx.tx_rate && - peer_stats->tx.tx_rate != peer_stats->tx.last_tx_rate)) { - qdf_mem_copy(peer_stats_intf.peer_mac, peer->mac_addr.raw, - QDF_MAC_ADDR_SIZE); - peer_stats_intf.vdev_id = peer->vdev->vdev_id; - peer_stats_intf.last_peer_tx_rate = peer_stats->tx.last_tx_rate; - peer_stats_intf.peer_tx_rate = peer_stats->tx.tx_rate; - peer_stats_intf.peer_rssi = peer_stats->rx.snr; - peer_stats_intf.tx_packet_count = peer_stats->tx.ucast.num; - peer_stats_intf.rx_packet_count = peer_stats->rx.to_stack.num; - peer_stats_intf.tx_byte_count = peer_stats->tx.tx_success.bytes; - peer_stats_intf.rx_byte_count = peer_stats->rx.to_stack.bytes; - peer_stats_intf.per = peer_stats->tx.last_per; - peer_stats_intf.ack_rssi = peer_stats->tx.last_ack_rssi; - peer_stats_intf.free_buff = INVALID_FREE_BUFF; - dp_wdi_event_handler(WDI_EVENT_PEER_STATS, dp_pdev->soc, - (void *)&peer_stats_intf, 0, - WDI_NO_VAL, dp_pdev->pdev_id); - } - - return QDF_STATUS_SUCCESS; -} - QDF_STATUS dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev, struct cdp_rx_stats_ppdu_user *ppdu_user) { @@ -6305,7 +6272,7 @@ dp_print_pdev_tx_stats(struct dp_pdev *pdev) i, pdev->stats.wdi_event[i]); } - dp_print_pdev_tx_capture_stats(pdev); + monitor_print_pdev_tx_capture_stats(pdev); } void @@ -6382,88 +6349,6 @@ dp_print_pdev_rx_stats(struct dp_pdev *pdev) pdev->stats.rx_buffer_pool.num_pool_bufs_replenish); } -void -dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev) -{ - struct cdp_pdev_mon_stats *rx_mon_stats; - uint32_t *stat_ring_ppdu_ids; - uint32_t *dest_ring_ppdu_ids; - int i, idx; - - rx_mon_stats = &pdev->rx_mon_stats; - - DP_PRINT_STATS("PDEV Rx Monitor Stats:\n"); - - DP_PRINT_STATS("status_ppdu_compl_cnt = %d", - rx_mon_stats->status_ppdu_compl); - DP_PRINT_STATS("status_ppdu_start_cnt = %d", - rx_mon_stats->status_ppdu_start); - DP_PRINT_STATS("status_ppdu_end_cnt = %d", - rx_mon_stats->status_ppdu_end); - DP_PRINT_STATS("status_ppdu_start_mis_cnt = %d", - rx_mon_stats->status_ppdu_start_mis); - DP_PRINT_STATS("status_ppdu_end_mis_cnt = %d", - rx_mon_stats->status_ppdu_end_mis); - DP_PRINT_STATS("status_ppdu_done_cnt = %d", - rx_mon_stats->status_ppdu_done); - DP_PRINT_STATS("dest_ppdu_done_cnt = %d", - rx_mon_stats->dest_ppdu_done); - DP_PRINT_STATS("dest_mpdu_done_cnt = %d", - rx_mon_stats->dest_mpdu_done); - DP_PRINT_STATS("tlv_tag_status_err_cnt = %u", - rx_mon_stats->tlv_tag_status_err); - DP_PRINT_STATS("mon status DMA not done WAR count= %u", - rx_mon_stats->status_buf_done_war); - DP_PRINT_STATS("dest_mpdu_drop_cnt = %d", - rx_mon_stats->dest_mpdu_drop); - DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d", - rx_mon_stats->dup_mon_linkdesc_cnt); - DP_PRINT_STATS("dup_mon_buf_cnt = %d", - rx_mon_stats->dup_mon_buf_cnt); - DP_PRINT_STATS("mon_rx_buf_reaped = %u", - rx_mon_stats->mon_rx_bufs_reaped_dest); - DP_PRINT_STATS("mon_rx_buf_replenished = %u", - rx_mon_stats->mon_rx_bufs_replenished_dest); - DP_PRINT_STATS("ppdu_id_mismatch = %u", - rx_mon_stats->ppdu_id_mismatch); - DP_PRINT_STATS("mpdu_ppdu_id_match_cnt = %d", - rx_mon_stats->ppdu_id_match); - DP_PRINT_STATS("ppdus dropped frm status ring = %d", - rx_mon_stats->status_ppdu_drop); - DP_PRINT_STATS("ppdus dropped frm dest ring = %d", - rx_mon_stats->dest_ppdu_drop); - stat_ring_ppdu_ids = - (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST); - dest_ring_ppdu_ids = - (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST); - - if (!stat_ring_ppdu_ids || !dest_ring_ppdu_ids) - DP_PRINT_STATS("Unable to allocate ppdu id hist mem\n"); - - qdf_spin_lock_bh(&pdev->mon_lock); - idx = rx_mon_stats->ppdu_id_hist_idx; - qdf_mem_copy(stat_ring_ppdu_ids, - rx_mon_stats->stat_ring_ppdu_id_hist, - sizeof(uint32_t) * MAX_PPDU_ID_HIST); - qdf_mem_copy(dest_ring_ppdu_ids, - rx_mon_stats->dest_ring_ppdu_id_hist, - sizeof(uint32_t) * MAX_PPDU_ID_HIST); - qdf_spin_unlock_bh(&pdev->mon_lock); - - DP_PRINT_STATS("PPDU Id history:"); - DP_PRINT_STATS("stat_ring_ppdu_ids\t dest_ring_ppdu_ids"); - for (i = 0; i < MAX_PPDU_ID_HIST; i++) { - idx = (idx + 1) & (MAX_PPDU_ID_HIST - 1); - DP_PRINT_STATS("%*u\t%*u", 16, - rx_mon_stats->stat_ring_ppdu_id_hist[idx], 16, - rx_mon_stats->dest_ring_ppdu_id_hist[idx]); - } - qdf_mem_free(stat_ring_ppdu_ids); - qdf_mem_free(dest_ring_ppdu_ids); - DP_PRINT_STATS("mon_rx_dest_stuck = %d", - rx_mon_stats->mon_rx_dest_stuck); -} - void dp_print_soc_tx_stats(struct dp_soc *soc) { diff --git a/dp/wifi3.0/dp_tx.c b/dp/wifi3.0/dp_tx.c index 5315511945..ff8d871ede 100644 --- a/dp/wifi3.0/dp_tx.c +++ b/dp/wifi3.0/dp_tx.c @@ -44,6 +44,9 @@ #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR #include #endif +#ifdef WIFI_MONITOR_SUPPORT +#include +#endif /* Flag to skip CCE classify when mesh or tid override enabled */ #define DP_TX_SKIP_CCE_CLASSIFY \ @@ -3924,7 +3927,7 @@ dp_tx_comp_process_desc(struct dp_soc *soc, dp_tx_enh_unmap(soc, desc); if (QDF_STATUS_SUCCESS == - dp_tx_add_to_comp_queue(soc, desc, ts, peer)) { + monitor_tx_add_to_comp_queue(soc, desc, ts, peer)) { return; } diff --git a/dp/wifi3.0/dp_wdi_event.c b/dp/wifi3.0/dp_wdi_event.c index 34631ba94d..17d3d09ea0 100644 --- a/dp/wifi3.0/dp_wdi_event.c +++ b/dp/wifi3.0/dp_wdi_event.c @@ -19,6 +19,10 @@ #include "dp_internal.h" #include "qdf_mem.h" /* qdf_mem_malloc,free */ +#ifdef WIFI_MONITOR_SUPPORT +#include "dp_htt.h" +#include +#endif #ifdef WDI_EVENT_ENABLE void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id) @@ -192,7 +196,7 @@ dp_wdi_event_sub( return -EINVAL; } - dp_set_pktlog_wifi3(txrx_pdev, event, true); + monitor_set_pktlog_wifi3(txrx_pdev, event, true); event_index = event - WDI_EVENT_BASE; wdi_sub = txrx_pdev->wdi_event_list[event_index]; @@ -254,7 +258,7 @@ dp_wdi_event_unsub( return -EINVAL; } - dp_set_pktlog_wifi3(txrx_pdev, event, false); + monitor_set_pktlog_wifi3(txrx_pdev, event, false); if (!event_cb_sub->priv.prev) { txrx_pdev->wdi_event_list[event_index] = event_cb_sub->priv.next; diff --git a/dp/wifi3.0/monitor/dp_mon.c b/dp/wifi3.0/monitor/dp_mon.c index 830bbbe8d7..49dd6a3023 100644 --- a/dp/wifi3.0/monitor/dp_mon.c +++ b/dp/wifi3.0/monitor/dp_mon.c @@ -21,9 +21,33 @@ #include #include #include "htt_ppdu_stats.h" +#include "dp_cal_client_api.h" +#if defined(DP_CON_MON) +#ifndef REMOVE_PKT_LOG +#include +#include +#endif +#endif +#ifdef FEATURE_PERPKT_INFO +#include "dp_ratetable.h" +#endif #define RNG_ERR "SRNG setup failed for" #define mon_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params) +#define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16 +#define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE +#define HTT_SHIFT_UPPER_TIMESTAMP 32 +#define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000 +#define DP_INTR_POLL_TIMER_MS 5 +#define INVALID_FREE_BUFF 0xffffffff + +#ifdef WLAN_RX_PKT_CAPTURE_ENH +#include "dp_rx_mon_feature.h" +#endif /* WLAN_RX_PKT_CAPTURE_ENH */ + +#ifdef WLAN_TX_PKT_CAPTURE_ENH +#include "dp_tx_capture.h" +#endif QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng, int ring_type, uint32_t num_entries, @@ -34,13 +58,19 @@ QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng, void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng, int ring_type, int ring_num); -QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev); -void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev); -void dp_neighbour_peers_detach(struct dp_pdev *pdev); -void dp_pktlogmod_exit(struct dp_pdev *handle); QDF_STATUS dp_vdev_set_monitor_mode_rings(struct dp_pdev *pdev, uint8_t delayed_replenish); +#ifndef WLAN_TX_PKT_CAPTURE_ENH +static inline void +dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev, + void *data, + uint32_t ppdu_id, + uint32_t size) +{ +} +#endif + #if !defined(DISABLE_MON_CONFIG) /** * dp_mon_rings_deinit() - Deinitialize monitor rings @@ -607,6 +637,3933 @@ dp_config_debug_sniffer(struct dp_pdev *pdev, int val) return status; } +static void dp_flush_monitor_rings(struct dp_soc *soc) +{ + struct dp_pdev *pdev = soc->pdev_list[0]; + hal_soc_handle_t hal_soc = soc->hal_soc; + uint32_t lmac_id; + uint32_t hp, tp; + uint8_t dp_intr_id; + int budget; + void *mon_dst_srng; + + /* Reset monitor filters before reaping the ring*/ + qdf_spin_lock_bh(&pdev->mon_lock); + dp_mon_filter_reset_mon_mode(pdev); + if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) + dp_info("failed to reset monitor filters"); + qdf_spin_unlock_bh(&pdev->mon_lock); + + if (pdev->mon_chan_band == REG_BAND_UNKNOWN) + return; + + lmac_id = pdev->ch_band_lmac_id_mapping[pdev->mon_chan_band]; + if (qdf_unlikely(lmac_id == DP_MON_INVALID_LMAC_ID)) + return; + + dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id]; + mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, lmac_id); + + /* reap full ring */ + budget = wlan_cfg_get_dma_mon_stat_ring_size(pdev->wlan_cfg_ctx); + + hal_get_sw_hptp(hal_soc, mon_dst_srng, &tp, &hp); + dp_info("Before reap: Monitor DST ring HP %u TP %u", hp, tp); + + dp_mon_process(soc, &soc->intr_ctx[dp_intr_id], lmac_id, budget); + + hal_get_sw_hptp(hal_soc, mon_dst_srng, &tp, &hp); + dp_info("After reap: Monitor DST ring HP %u TP %u", hp, tp); +} + +#if !defined(DISABLE_MON_CONFIG) +/** + * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings + * @soc: soc handle + * @pdev: physical device handle + * @mac_id: ring number + * @mac_for_pdev: mac_id + * + * Return: non-zero for failure, zero for success + */ +static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc, + struct dp_pdev *pdev, + int mac_id, + int mac_for_pdev) +{ + QDF_STATUS status = QDF_STATUS_SUCCESS; + + if (soc->wlan_cfg_ctx->rxdma1_enable) { + status = htt_srng_setup(soc->htt_handle, mac_for_pdev, + soc->rxdma_mon_buf_ring[mac_id] + .hal_srng, + RXDMA_MONITOR_BUF); + + if (status != QDF_STATUS_SUCCESS) { + dp_err("Failed to send htt srng setup message for Rxdma mon buf ring"); + return status; + } + + status = htt_srng_setup(soc->htt_handle, mac_for_pdev, + soc->rxdma_mon_dst_ring[mac_id] + .hal_srng, + RXDMA_MONITOR_DST); + + if (status != QDF_STATUS_SUCCESS) { + dp_err("Failed to send htt srng setup message for Rxdma mon dst ring"); + return status; + } + + status = htt_srng_setup(soc->htt_handle, mac_for_pdev, + soc->rxdma_mon_status_ring[mac_id] + .hal_srng, + RXDMA_MONITOR_STATUS); + + if (status != QDF_STATUS_SUCCESS) { + dp_err("Failed to send htt srng setup message for Rxdma mon status ring"); + return status; + } + + status = htt_srng_setup(soc->htt_handle, mac_for_pdev, + soc->rxdma_mon_desc_ring[mac_id] + .hal_srng, + RXDMA_MONITOR_DESC); + + if (status != QDF_STATUS_SUCCESS) { + dp_err("Failed to send htt srng message for Rxdma mon desc ring"); + return status; + } + } else { + status = htt_srng_setup(soc->htt_handle, mac_for_pdev, + soc->rxdma_mon_status_ring[mac_id] + .hal_srng, + RXDMA_MONITOR_STATUS); + + if (status != QDF_STATUS_SUCCESS) { + dp_err("Failed to send htt srng setup message for Rxdma mon status ring"); + return status; + } + } + + return status; +} +#endif + +/* MCL specific functions */ +#if defined(DP_CON_MON) + +/* + * dp_service_mon_rings()- service monitor rings + * @soc: soc dp handle + * @quota: number of ring entry that can be serviced + * + * Return: None + * + */ +static void dp_service_mon_rings(struct dp_soc *soc, uint32_t quota) +{ + int ring = 0, work_done; + struct dp_pdev *pdev = NULL; + + for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) { + pdev = dp_get_pdev_for_lmac_id(soc, ring); + if (!pdev) + continue; + work_done = dp_mon_process(soc, NULL, ring, quota); + + dp_rx_mon_dest_debug("Reaped %d descs from Monitor rings", + work_done); + } +} +#endif + +/** + * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer + * ring based on target + * @soc: soc handle + * @mac_for_pdev: WIN- pdev_id, MCL- mac id + * @pdev: physical device handle + * @ring_num: mac id + * @htt_tlv_filter: tlv filter + * + * Return: zero on success, non-zero on failure + */ +static inline QDF_STATUS +dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev, + struct dp_pdev *pdev, uint8_t ring_num, + struct htt_rx_ring_tlv_filter htt_tlv_filter) +{ + QDF_STATUS status; + + if (soc->wlan_cfg_ctx->rxdma1_enable) + status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, + soc->rxdma_mon_buf_ring[ring_num] + .hal_srng, + RXDMA_MONITOR_BUF, + RX_MONITOR_BUFFER_SIZE, + &htt_tlv_filter); + else + status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev, + pdev->rx_mac_buf_ring[ring_num] + .hal_srng, + RXDMA_BUF, RX_DATA_BUFFER_SIZE, + &htt_tlv_filter); + + return status; +} + +/* + * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode + * @soc_hdl: datapath soc handle + * @pdev_id: physical device instance id + * + * Return: virtual interface id + */ +static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + + if (qdf_unlikely(!pdev || !pdev->monitor_vdev)) + return -EINVAL; + + return pdev->monitor_vdev->vdev_id; +} + +/* + * dp_peer_tx_init() – Initialize receive TID state + * @pdev: Datapath pdev + * @peer: Datapath peer + * + */ +void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer) +{ + dp_peer_tid_queue_init(peer); + dp_peer_update_80211_hdr(peer->vdev, peer); +} + +/* + * dp_peer_tx_cleanup() – Deinitialize receive TID state + * @vdev: Datapath vdev + * @peer: Datapath peer + * + */ +static inline void +dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer) +{ + dp_peer_tid_queue_cleanup(peer); +} + +#ifdef FEATURE_PERPKT_INFO +#ifndef WLAN_TX_PKT_CAPTURE_ENH +/* + * dp_deliver_mgmt_frm: Process + * @pdev: DP PDEV handle + * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv + * + * return: void + */ +void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf) +{ + if (pdev->tx_sniffer_enable || pdev->mcopy_mode) { + dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc, + nbuf, HTT_INVALID_PEER, + WDI_NO_VAL, pdev->pdev_id); + } else { + if (!pdev->bpr_enable) + qdf_nbuf_free(nbuf); + } +} +#endif +/* + * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process + * htt_ppdu_stats_tx_mgmtctrl_payload_tlv + * @pdev: DP PDEV handle + * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv + * @length: tlv_length + * + * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller + */ +QDF_STATUS +dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev, + qdf_nbuf_t tag_buf, + uint32_t ppdu_id) +{ + uint32_t *nbuf_ptr; + uint8_t trim_size; + size_t head_size; + struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info; + uint32_t *msg_word; + uint32_t tsf_hdr; + + if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) && + (!pdev->bpr_enable) && (!pdev->tx_capture_enabled)) + return QDF_STATUS_SUCCESS; + + /* + * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t + */ + msg_word = (uint32_t *)qdf_nbuf_data(tag_buf); + msg_word = msg_word + 2; + tsf_hdr = *msg_word; + + trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf + + HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) - + qdf_nbuf_data(tag_buf)); + + if (!qdf_nbuf_pull_head(tag_buf, trim_size)) + return QDF_STATUS_SUCCESS; + + qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) - + pdev->mgmtctrl_frm_info.mgmt_buf_len); + + if (pdev->tx_capture_enabled) { + head_size = sizeof(struct cdp_tx_mgmt_comp_info); + if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) { + qdf_err("Fail to get headroom h_sz %zu h_avail %d\n", + head_size, qdf_nbuf_headroom(tag_buf)); + qdf_assert_always(0); + return QDF_STATUS_E_NOMEM; + } + ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *) + qdf_nbuf_push_head(tag_buf, head_size); + qdf_assert_always(ptr_mgmt_comp_info); + ptr_mgmt_comp_info->ppdu_id = ppdu_id; + ptr_mgmt_comp_info->is_sgen_pkt = true; + ptr_mgmt_comp_info->tx_tsf = tsf_hdr; + } else { + head_size = sizeof(ppdu_id); + nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size); + *nbuf_ptr = ppdu_id; + } + if (pdev->bpr_enable) { + dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc, + tag_buf, HTT_INVALID_PEER, + WDI_NO_VAL, pdev->pdev_id); + } + + dp_deliver_mgmt_frm(pdev, tag_buf); + + return QDF_STATUS_E_ALREADY; +} +#endif + +/* + * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv + * bitmap for sniffer mode + * @bitmap: received bitmap + * + * Return: expected bitmap value, returns zero if doesn't match with + * either 64-bit Tx window or 256-bit window tlv bitmap + */ +int +dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap) +{ + if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64)) + return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64; + else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256)) + return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256; + + return 0; +} + +#ifdef FEATURE_PERPKT_INFO +/* + * dp_peer_find_by_id_valid - check if peer exists for given id + * @soc: core DP soc context + * @peer_id: peer id from peer object can be retrieved + * + * Return: true if peer exists of false otherwise + */ + +static +bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id) +{ + struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, + DP_MOD_ID_HTT); + + if (peer) { + /* + * Decrement the peer ref which is taken as part of + * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled + */ + dp_peer_unref_delete(peer, DP_MOD_ID_HTT); + + return true; + } + + return false; +} + +/* + * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats. + * @peer: Datapath peer handle + * @ppdu: User PPDU Descriptor + * @cur_ppdu_id: PPDU_ID + * + * Return: None + * + * on Tx data frame, we may get delayed ba set + * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we + * request Block Ack Request(BAR). Successful msdu is received only after Block + * Ack. To populate peer stats we need successful msdu(data frame). + * So we hold the Tx data stats on delayed_ba for stats update. + */ +static void +dp_peer_copy_delay_stats(struct dp_peer *peer, + struct cdp_tx_completion_ppdu_user *ppdu, + uint32_t cur_ppdu_id) +{ + struct dp_pdev *pdev; + struct dp_vdev *vdev; + + if (peer->last_delayed_ba) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]", + peer->last_delayed_ba_ppduid, cur_ppdu_id); + vdev = peer->vdev; + if (vdev) { + pdev = vdev->pdev; + pdev->stats.cdp_delayed_ba_not_recev++; + } + } + + peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size; + peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc; + peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re; + peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf; + peer->delayed_ba_ppdu_stats.bw = ppdu->bw; + peer->delayed_ba_ppdu_stats.nss = ppdu->nss; + peer->delayed_ba_ppdu_stats.gi = ppdu->gi; + peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; + peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc; + peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; + peer->delayed_ba_ppdu_stats.mpdu_tried_ucast = ppdu->mpdu_tried_ucast; + peer->delayed_ba_ppdu_stats.mpdu_tried_mcast = ppdu->mpdu_tried_mcast; + peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl; + peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl; + peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm; + + peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start; + peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones; + peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast; + + peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos; + peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id; + + peer->last_delayed_ba = true; + + ppdu->debug_copied = true; +} + +/* + * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats. + * @peer: Datapath peer handle + * @ppdu: PPDU Descriptor + * + * Return: None + * + * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info + * from Tx BAR frame not required to populate peer stats. + * But we need successful MPDU and MSDU to update previous + * transmitted Tx data frame. Overwrite ppdu stats with the previous + * stored ppdu stats. + */ +static void +dp_peer_copy_stats_to_bar(struct dp_peer *peer, + struct cdp_tx_completion_ppdu_user *ppdu) +{ + ppdu->ltf_size = peer->delayed_ba_ppdu_stats.ltf_size; + ppdu->stbc = peer->delayed_ba_ppdu_stats.stbc; + ppdu->he_re = peer->delayed_ba_ppdu_stats.he_re; + ppdu->txbf = peer->delayed_ba_ppdu_stats.txbf; + ppdu->bw = peer->delayed_ba_ppdu_stats.bw; + ppdu->nss = peer->delayed_ba_ppdu_stats.nss; + ppdu->gi = peer->delayed_ba_ppdu_stats.gi; + ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm; + ppdu->ldpc = peer->delayed_ba_ppdu_stats.ldpc; + ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm; + ppdu->mpdu_tried_ucast = peer->delayed_ba_ppdu_stats.mpdu_tried_ucast; + ppdu->mpdu_tried_mcast = peer->delayed_ba_ppdu_stats.mpdu_tried_mcast; + ppdu->frame_ctrl = peer->delayed_ba_ppdu_stats.frame_ctrl; + ppdu->qos_ctrl = peer->delayed_ba_ppdu_stats.qos_ctrl; + ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm; + + ppdu->ru_start = peer->delayed_ba_ppdu_stats.ru_start; + ppdu->ru_tones = peer->delayed_ba_ppdu_stats.ru_tones; + ppdu->is_mcast = peer->delayed_ba_ppdu_stats.is_mcast; + + ppdu->user_pos = peer->delayed_ba_ppdu_stats.user_pos; + ppdu->mu_group_id = peer->delayed_ba_ppdu_stats.mu_group_id; + + peer->last_delayed_ba = false; + + ppdu->debug_copied = true; +} + +/* + * dp_tx_rate_stats_update() - Update rate per-peer statistics + * @peer: Datapath peer handle + * @ppdu: PPDU Descriptor + * + * Return: None + */ +static void +dp_tx_rate_stats_update(struct dp_peer *peer, + struct cdp_tx_completion_ppdu_user *ppdu) +{ + uint32_t ratekbps = 0; + uint64_t ppdu_tx_rate = 0; + uint32_t rix; + uint16_t ratecode = 0; + + if (!peer || !ppdu) + return; + + if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) + return; + + ratekbps = dp_getrateindex(ppdu->gi, + ppdu->mcs, + ppdu->nss, + ppdu->preamble, + ppdu->bw, + &rix, + &ratecode); + + DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps); + + if (!ratekbps) + return; + + /* Calculate goodput in non-training period + * In training period, don't do anything as + * pending pkt is send as goodput. + */ + if ((!peer->bss_peer) && (!ppdu->sa_is_training)) { + ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) * + (CDP_PERCENT_MACRO - ppdu->current_rate_per)); + } + ppdu->rix = rix; + ppdu->tx_ratekbps = ratekbps; + ppdu->tx_ratecode = ratecode; + peer->stats.tx.avg_tx_rate = + dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps); + ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate); + DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate); + + if (peer->vdev) { + /* + * In STA mode: + * We get ucast stats as BSS peer stats. + * + * In AP mode: + * We get mcast stats as BSS peer stats. + * We get ucast stats as assoc peer stats. + */ + if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) { + peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps; + peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs; + } else { + peer->vdev->stats.tx.last_tx_rate = ratekbps; + peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs; + } + } +} + +/* + * dp_tx_stats_update() - Update per-peer statistics + * @pdev: Datapath pdev handle + * @peer: Datapath peer handle + * @ppdu: PPDU Descriptor + * @ack_rssi: RSSI of last ack received + * + * Return: None + */ +static void +dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer, + struct cdp_tx_completion_ppdu_user *ppdu, + uint32_t ack_rssi) +{ + uint8_t preamble, mcs; + uint16_t num_msdu; + uint16_t num_mpdu; + uint16_t mpdu_tried; + uint16_t mpdu_failed; + + preamble = ppdu->preamble; + mcs = ppdu->mcs; + num_msdu = ppdu->num_msdu; + num_mpdu = ppdu->mpdu_success; + mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast; + mpdu_failed = mpdu_tried - num_mpdu; + + /* If the peer statistics are already processed as part of + * per-MSDU completion handler, do not process these again in per-PPDU + * indications + */ + if (pdev->soc->process_tx_status) + return; + + if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) { + /* + * All failed mpdu will be retried, so incrementing + * retries mpdu based on mpdu failed. Even for + * ack failure i.e for long retries we get + * mpdu failed equal mpdu tried. + */ + DP_STATS_INC(peer, tx.retries, mpdu_failed); + DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus); + return; + } + + if (ppdu->is_ppdu_cookie_valid) + DP_STATS_INC(peer, tx.num_ppdu_cookie_valid, 1); + + if (ppdu->mu_group_id <= MAX_MU_GROUP_ID && + ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) { + if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1)))) + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "mu_group_id out of bound!!\n"); + else + DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id], + (ppdu->user_pos + 1)); + } + + if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA || + ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) { + DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones); + DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start); + switch (ppdu->ru_tones) { + case RU_26: + DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_msdu, + num_msdu); + DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_mpdu, + num_mpdu); + DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].mpdu_tried, + mpdu_tried); + break; + case RU_52: + DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_msdu, + num_msdu); + DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_mpdu, + num_mpdu); + DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].mpdu_tried, + mpdu_tried); + break; + case RU_106: + DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_msdu, + num_msdu); + DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_mpdu, + num_mpdu); + DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].mpdu_tried, + mpdu_tried); + break; + case RU_242: + DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_msdu, + num_msdu); + DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_mpdu, + num_mpdu); + DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].mpdu_tried, + mpdu_tried); + break; + case RU_484: + DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_msdu, + num_msdu); + DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_mpdu, + num_mpdu); + DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].mpdu_tried, + mpdu_tried); + break; + case RU_996: + DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_msdu, + num_msdu); + DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_mpdu, + num_mpdu); + DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].mpdu_tried, + mpdu_tried); + break; + } + } + + /* + * All failed mpdu will be retried, so incrementing + * retries mpdu based on mpdu failed. Even for + * ack failure i.e for long retries we get + * mpdu failed equal mpdu tried. + */ + DP_STATS_INC(peer, tx.retries, mpdu_failed); + DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus); + + DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_msdu, + num_msdu); + DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu, + num_mpdu); + DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried, + mpdu_tried); + + DP_STATS_INC_PKT(peer, tx.comp_pkt, + num_msdu, (ppdu->success_bytes + + ppdu->retry_bytes + ppdu->failed_bytes)); + DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate); + DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu); + DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu); + DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu); + if (ppdu->tid < CDP_DATA_TID_MAX) + DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)], + num_msdu); + DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc); + DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc); + if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid) + DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi); + + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, + ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < MAX_MCS_11A) && (preamble == DOT11_A))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, + ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, + ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < MAX_MCS_11A) && (preamble == DOT11_N))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, + ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu, + ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX))); + DP_STATS_INCC(peer, + tx.pkt_type[preamble].mcs_count[mcs], num_msdu, + ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX))); + DP_STATS_INCC(peer, tx.ampdu_cnt, num_msdu, ppdu->is_ampdu); + DP_STATS_INCC(peer, tx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu)); + DP_STATS_INCC(peer, tx.pream_punct_cnt, 1, ppdu->pream_punct); + + dp_peer_stats_notify(pdev, peer); + +#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE + dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, + &peer->stats, ppdu->peer_id, + UPDATE_PEER_STATS, pdev->pdev_id); +#endif +} +#endif + +#ifdef FEATURE_PERPKT_INFO +/* + * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU, + * if a new peer id arrives in a PPDU + * pdev: DP pdev handle + * @peer_id : peer unique identifier + * @ppdu_info: per ppdu tlv structure + * + * return:user index to be populated + */ +static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev, + uint16_t peer_id, + struct ppdu_info *ppdu_info) +{ + uint8_t user_index = 0; + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + + ppdu_desc = + (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + while ((user_index + 1) <= ppdu_info->last_user) { + ppdu_user_desc = &ppdu_desc->user[user_index]; + if (ppdu_user_desc->peer_id != peer_id) { + user_index++; + continue; + } else { + /* Max users possible is 8 so user array index should + * not exceed 7 + */ + qdf_assert_always(user_index <= (ppdu_desc->max_users - 1)); + return user_index; + } + } + + ppdu_info->last_user++; + /* Max users possible is 8 so last user should not exceed 8 */ + qdf_assert_always(ppdu_info->last_user <= ppdu_desc->max_users); + return ppdu_info->last_user - 1; +} + +/* + * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv + * pdev: DP pdev handle + * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void +dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev, + uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + uint16_t frame_type; + uint16_t frame_ctrl; + uint16_t freq; + struct dp_soc *soc = NULL; + struct cdp_tx_completion_ppdu *ppdu_desc = NULL; + uint64_t ppdu_start_timestamp; + uint32_t *start_tag_buf; + + start_tag_buf = tag_buf; + ppdu_desc = + (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + ppdu_desc->ppdu_id = ppdu_info->ppdu_id; + + tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID); + ppdu_info->sched_cmdid = + HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf); + ppdu_desc->num_users = + HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf); + + qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users); + + tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE); + frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf); + ppdu_desc->htt_frame_type = frame_type; + + frame_ctrl = ppdu_desc->frame_ctrl; + + ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id; + + switch (frame_type) { + case HTT_STATS_FTYPE_TIDQ_DATA_SU: + case HTT_STATS_FTYPE_TIDQ_DATA_MU: + case HTT_STATS_FTYPE_SGEN_QOS_NULL: + /* + * for management packet, frame type come as DATA_SU + * need to check frame_ctrl before setting frame_type + */ + if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL) + ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL; + else + ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA; + break; + case HTT_STATS_FTYPE_SGEN_MU_BAR: + case HTT_STATS_FTYPE_SGEN_BAR: + ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR; + break; + default: + ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL; + break; + } + + tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US); + ppdu_desc->tx_duration = *tag_buf; + + tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US); + ppdu_desc->ppdu_start_timestamp = *tag_buf; + + tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE); + freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf); + if (freq != ppdu_desc->channel) { + soc = pdev->soc; + ppdu_desc->channel = freq; + pdev->operating_channel.freq = freq; + if (soc && soc->cdp_soc.ol_ops->freq_to_channel) + pdev->operating_channel.num = + soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc, + pdev->pdev_id, + freq); + + if (soc && soc->cdp_soc.ol_ops->freq_to_band) + pdev->operating_channel.band = + soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc, + pdev->pdev_id, + freq); + } + + ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf); + + tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM); + ppdu_desc->phy_ppdu_tx_time_us = + HTT_PPDU_STATS_COMMON_TLV_PHY_PPDU_TX_TIME_US_GET(*tag_buf); + ppdu_desc->beam_change = + HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf); + ppdu_desc->doppler = + HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf); + ppdu_desc->spatial_reuse = + HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf); + + dp_tx_capture_htt_frame_counter(pdev, frame_type); + + tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US); + ppdu_start_timestamp = *tag_buf; + ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp << + HTT_SHIFT_UPPER_TIMESTAMP) & + HTT_MASK_UPPER_TIMESTAMP); + + ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp + + ppdu_desc->tx_duration; + /* Ack time stamp is same as end time stamp*/ + ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp; + + ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp + + ppdu_desc->tx_duration; + + ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp; + ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp; + ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration; + + /* Ack time stamp is same as end time stamp*/ + ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp; + + tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(BSSCOLOR_OBSS_PSR); + ppdu_desc->bss_color = + HTT_PPDU_STATS_COMMON_TLV_BSS_COLOR_ID_GET(*tag_buf); +} + +/* + * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common + * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_common_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + uint16_t peer_id; + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + struct dp_peer *peer; + struct dp_vdev *vdev; + uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); + + ppdu_desc = + (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf); + + curr_user_index = + dp_get_ppdu_info_user_index(pdev, + peer_id, ppdu_info); + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); + + ppdu_desc->vdev_id = + HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf); + + ppdu_user_desc->peer_id = peer_id; + + tag_buf++; + + if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) { + ppdu_user_desc->delayed_ba = 1; + ppdu_desc->delayed_ba = 1; + } + + if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) { + ppdu_user_desc->is_mcast = true; + ppdu_user_desc->mpdu_tried_mcast = + HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf); + ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast; + } else { + ppdu_user_desc->mpdu_tried_ucast = + HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf); + } + + ppdu_user_desc->is_seq_num_valid = + HTT_PPDU_STATS_USER_COMMON_TLV_IS_SQNUM_VALID_IN_BUFFER_GET(*tag_buf); + tag_buf++; + + ppdu_user_desc->qos_ctrl = + HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf); + ppdu_user_desc->frame_ctrl = + HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf); + ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl; + + if (ppdu_user_desc->delayed_ba) + ppdu_user_desc->mpdu_success = 0; + + tag_buf += 3; + + if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) { + ppdu_user_desc->ppdu_cookie = + HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf); + ppdu_user_desc->is_ppdu_cookie_valid = 1; + } + + /* returning earlier causes other feilds unpopulated */ + if (peer_id == DP_SCAN_PEER_ID) { + vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id, + DP_MOD_ID_TX_PPDU_STATS); + if (!vdev) + return; + qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw, + QDF_MAC_ADDR_SIZE); + dp_vdev_unref_delete(pdev->soc, vdev, DP_MOD_ID_TX_PPDU_STATS); + } else { + peer = dp_peer_get_ref_by_id(pdev->soc, peer_id, + DP_MOD_ID_TX_PPDU_STATS); + if (!peer) { + /* + * fw sends peer_id which is about to removed but + * it was already removed in host. + * eg: for disassoc, fw send ppdu stats + * with peer id equal to previously associated + * peer's peer_id but it was removed + */ + vdev = dp_vdev_get_ref_by_id(pdev->soc, + ppdu_desc->vdev_id, + DP_MOD_ID_TX_PPDU_STATS); + if (!vdev) + return; + qdf_mem_copy(ppdu_user_desc->mac_addr, + vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE); + dp_vdev_unref_delete(pdev->soc, vdev, + DP_MOD_ID_TX_PPDU_STATS); + return; + } + qdf_mem_copy(ppdu_user_desc->mac_addr, + peer->mac_addr.raw, QDF_MAC_ADDR_SIZE); + dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); + } +} + +/** + * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv + * @pdev: DP pdev handle + * @tag_buf: T2H message buffer carrying the user rate TLV + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev, + uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + uint16_t peer_id; + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + struct dp_vdev *vdev; + uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); + + ppdu_desc = + (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf); + + curr_user_index = + dp_get_ppdu_info_user_index(pdev, + peer_id, ppdu_info); + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); + if (peer_id == DP_SCAN_PEER_ID) { + vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id, + DP_MOD_ID_TX_PPDU_STATS); + if (!vdev) + return; + dp_vdev_unref_delete(pdev->soc, vdev, + DP_MOD_ID_TX_PPDU_STATS); + } + ppdu_user_desc->peer_id = peer_id; + + ppdu_user_desc->tid = + HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf); + + tag_buf += 1; + + ppdu_user_desc->user_pos = + HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf); + ppdu_user_desc->mu_group_id = + HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf); + + tag_buf += 1; + + ppdu_user_desc->ru_start = + HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf); + ppdu_user_desc->ru_tones = + (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) - + HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1; + ppdu_desc->usr_ru_tones_sum += ppdu_user_desc->ru_tones; + + tag_buf += 2; + + ppdu_user_desc->ppdu_type = + HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf); + + tag_buf++; + ppdu_user_desc->tx_rate = *tag_buf; + + ppdu_user_desc->ltf_size = + HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf); + ppdu_user_desc->stbc = + HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf); + ppdu_user_desc->he_re = + HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf); + ppdu_user_desc->txbf = + HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf); + ppdu_user_desc->bw = + HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2; + ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf); + ppdu_desc->usr_nss_sum += ppdu_user_desc->nss; + ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf); + ppdu_user_desc->preamble = + HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf); + ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf); + ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf); + ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf); +} + +/* + * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process + * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv + * pdev: DP PDEV handle + * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf = + (htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf; + + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + uint16_t peer_id; + uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS; + uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); + + ppdu_desc = + (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + + peer_id = + HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); + ppdu_user_desc->peer_id = peer_id; + + ppdu_user_desc->start_seq = dp_stats_buf->start_seq; + qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap, + sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS); + + dp_process_ppdu_stats_update_failed_bitmap(pdev, + (void *)ppdu_user_desc, + ppdu_info->ppdu_id, + size); +} + +/* + * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process + * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv + * soc: DP SOC handle + * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf = + (htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf; + + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + uint16_t peer_id; + uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS; + uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); + + ppdu_desc = + (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + + peer_id = + HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); + ppdu_user_desc->peer_id = peer_id; + + ppdu_user_desc->start_seq = dp_stats_buf->start_seq; + qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap, + sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS); + + dp_process_ppdu_stats_update_failed_bitmap(pdev, + (void *)ppdu_user_desc, + ppdu_info->ppdu_id, + size); +} + +/* + * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process + * htt_ppdu_stats_user_cmpltn_common_tlv + * soc: DP SOC handle + * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_cmpltn_common_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + uint16_t peer_id; + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + uint8_t bw_iter; + htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf = + (htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf; + uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); + + ppdu_desc = + (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + peer_id = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf); + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); + ppdu_user_desc->peer_id = peer_id; + + ppdu_user_desc->completion_status = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET( + *tag_buf); + + ppdu_user_desc->tid = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf); + + tag_buf++; + if (qdf_likely(ppdu_user_desc->completion_status == + HTT_PPDU_STATS_USER_STATUS_OK)) { + ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi; + ppdu_user_desc->usr_ack_rssi = dp_stats_buf->ack_rssi; + ppdu_user_desc->ack_rssi_valid = 1; + } else { + ppdu_user_desc->ack_rssi_valid = 0; + } + + tag_buf++; + + ppdu_user_desc->mpdu_success = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf); + + ppdu_user_desc->mpdu_failed = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) - + ppdu_user_desc->mpdu_success; + + tag_buf++; + + ppdu_user_desc->long_retries = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf); + + ppdu_user_desc->short_retries = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf); + ppdu_user_desc->retry_msdus = + ppdu_user_desc->long_retries + ppdu_user_desc->short_retries; + + ppdu_user_desc->is_ampdu = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf); + ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu; + + ppdu_desc->resp_type = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf); + ppdu_desc->mprot_type = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf); + ppdu_desc->rts_success = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf); + ppdu_desc->rts_failure = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf); + ppdu_user_desc->pream_punct = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PREAM_PUNC_TX_GET(*tag_buf); + + ppdu_info->compltn_common_tlv++; + + /* + * MU BAR may send request to n users but we may received ack only from + * m users. To have count of number of users respond back, we have a + * separate counter bar_num_users per PPDU that get increment for every + * htt_ppdu_stats_user_cmpltn_common_tlv + */ + ppdu_desc->bar_num_users++; + + tag_buf++; + for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) { + ppdu_user_desc->rssi_chain[bw_iter] = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf); + tag_buf++; + } + + ppdu_user_desc->sa_tx_antenna = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf); + + tag_buf++; + ppdu_user_desc->sa_is_training = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf); + if (ppdu_user_desc->sa_is_training) { + ppdu_user_desc->sa_goodput = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf); + } + + tag_buf++; + for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) { + ppdu_user_desc->sa_max_rates[bw_iter] = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]); + } + + tag_buf += CDP_NUM_SA_BW; + ppdu_user_desc->current_rate_per = + HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf); +} + +/* + * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process + * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv + * pdev: DP PDEV handle + * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf = + (htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + struct cdp_tx_completion_ppdu *ppdu_desc; + uint8_t curr_user_index = 0; + uint16_t peer_id; + uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); + + ppdu_desc = + (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + + peer_id = + HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); + ppdu_user_desc->peer_id = peer_id; + + ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no; + qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap, + sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS); + ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32; +} + +/* + * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process + * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv + * pdev: DP PDEV handle + * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf = + (htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + struct cdp_tx_completion_ppdu *ppdu_desc; + uint8_t curr_user_index = 0; + uint16_t peer_id; + uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); + + ppdu_desc = + (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + + peer_id = + HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf); + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); + ppdu_user_desc->peer_id = peer_id; + + ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no; + qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap, + sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS); + ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32; +} + +/* + * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process + * htt_ppdu_stats_user_compltn_ack_ba_status_tlv + * pdev: DP PDE handle + * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + uint16_t peer_id; + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); + + ppdu_desc = + (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf += 2; + peer_id = + HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf); + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); + if (!ppdu_user_desc->ack_ba_tlv) { + ppdu_user_desc->ack_ba_tlv = 1; + } else { + pdev->stats.ack_ba_comes_twice++; + return; + } + + ppdu_user_desc->peer_id = peer_id; + + tag_buf++; + /* not to update ppdu_desc->tid from this TLV */ + ppdu_user_desc->num_mpdu = + HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf); + + ppdu_user_desc->num_msdu = + HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf); + + ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu; + + tag_buf++; + ppdu_user_desc->start_seq = + HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET( + *tag_buf); + + tag_buf++; + ppdu_user_desc->success_bytes = *tag_buf; + + /* increase ack ba tlv counter on successful mpdu */ + if (ppdu_user_desc->num_mpdu) + ppdu_info->ack_ba_tlv++; + + if (ppdu_user_desc->ba_size == 0) { + ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq; + ppdu_user_desc->ba_bitmap[0] = 1; + ppdu_user_desc->ba_size = 1; + } +} + +/* + * dp_process_ppdu_stats_user_common_array_tlv: Process + * htt_ppdu_stats_user_common_array_tlv + * pdev: DP PDEV handle + * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void dp_process_ppdu_stats_user_common_array_tlv( + struct dp_pdev *pdev, uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + uint32_t peer_id; + struct cdp_tx_completion_ppdu *ppdu_desc; + struct cdp_tx_completion_ppdu_user *ppdu_user_desc; + uint8_t curr_user_index = 0; + struct htt_tx_ppdu_stats_info *dp_stats_buf; + uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); + + ppdu_desc = + (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + + tag_buf++; + dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf; + tag_buf += 3; + peer_id = + HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf); + + if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "Invalid peer"); + return; + } + + curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info); + + ppdu_user_desc = &ppdu_desc->user[curr_user_index]; + ppdu_user_desc->tlv_bitmap |= (1 << tlv_type); + + ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes; + ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes; + + tag_buf++; + + ppdu_user_desc->success_msdus = + HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf); + ppdu_user_desc->retry_bytes = + HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf); + tag_buf++; + ppdu_user_desc->failed_msdus = + HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf); +} + +/* + * dp_process_ppdu_stats_flush_tlv: Process + * htt_ppdu_stats_flush_tlv + * @pdev: DP PDEV handle + * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void +dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev, + uint32_t *tag_buf, + struct ppdu_info *ppdu_info) +{ + struct cdp_tx_completion_ppdu *ppdu_desc; + uint32_t peer_id; + uint8_t tid; + struct dp_peer *peer; + + ppdu_desc = (struct cdp_tx_completion_ppdu *) + qdf_nbuf_data(ppdu_info->nbuf); + ppdu_desc->is_flush = 1; + + tag_buf++; + ppdu_desc->drop_reason = *tag_buf; + + tag_buf++; + ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf); + ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf); + ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf); + + tag_buf++; + peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf); + tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf); + + ppdu_desc->num_users = 1; + ppdu_desc->user[0].peer_id = peer_id; + ppdu_desc->user[0].tid = tid; + + ppdu_desc->queue_type = + HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf); + + peer = dp_peer_get_ref_by_id(pdev->soc, peer_id, + DP_MOD_ID_TX_PPDU_STATS); + if (!peer) + goto add_ppdu_to_sched_list; + + if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) { + DP_STATS_INC(peer, + tx.excess_retries_per_ac[TID_TO_WME_AC(tid)], + ppdu_desc->num_msdu); + } + + dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); + +add_ppdu_to_sched_list: + ppdu_info->done = 1; + TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem); + pdev->list_depth--; + TAILQ_INSERT_TAIL(&pdev->sched_comp_ppdu_list, ppdu_info, + ppdu_info_list_elem); + pdev->sched_comp_list_depth++; +} + +/** + * dp_process_ppdu_stats_sch_cmd_status_tlv: Process schedule command status tlv + * Here we are not going to process the buffer. + * @pdev: DP PDEV handle + * @ppdu_info: per ppdu tlv structure + * + * return:void + */ +static void +dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev, + struct ppdu_info *ppdu_info) +{ + struct cdp_tx_completion_ppdu *ppdu_desc; + struct dp_peer *peer; + uint8_t num_users; + uint8_t i; + + ppdu_desc = (struct cdp_tx_completion_ppdu *) + qdf_nbuf_data(ppdu_info->nbuf); + + num_users = ppdu_desc->bar_num_users; + + for (i = 0; i < num_users; i++) { + if (ppdu_desc->user[i].user_pos == 0) { + if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { + /* update phy mode for bar frame */ + ppdu_desc->phy_mode = + ppdu_desc->user[i].preamble; + ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs; + break; + } + if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) { + ppdu_desc->frame_ctrl = + ppdu_desc->user[i].frame_ctrl; + break; + } + } + } + + if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA && + ppdu_desc->delayed_ba) { + qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users); + + for (i = 0; i < ppdu_desc->num_users; i++) { + struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu; + uint64_t start_tsf; + uint64_t end_tsf; + uint32_t ppdu_id; + + ppdu_id = ppdu_desc->ppdu_id; + peer = dp_peer_get_ref_by_id + (pdev->soc, ppdu_desc->user[i].peer_id, + DP_MOD_ID_TX_PPDU_STATS); + /** + * This check is to make sure peer is not deleted + * after processing the TLVs. + */ + if (!peer) + continue; + + delay_ppdu = &peer->delayed_ba_ppdu_stats; + start_tsf = ppdu_desc->ppdu_start_timestamp; + end_tsf = ppdu_desc->ppdu_end_timestamp; + /** + * save delayed ba user info + */ + if (ppdu_desc->user[i].delayed_ba) { + dp_peer_copy_delay_stats(peer, + &ppdu_desc->user[i], + ppdu_id); + peer->last_delayed_ba_ppduid = ppdu_id; + delay_ppdu->ppdu_start_timestamp = start_tsf; + delay_ppdu->ppdu_end_timestamp = end_tsf; + } + ppdu_desc->user[i].peer_last_delayed_ba = + peer->last_delayed_ba; + + dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); + + if (ppdu_desc->user[i].delayed_ba && + !ppdu_desc->user[i].debug_copied) { + QDF_TRACE(QDF_MODULE_ID_TXRX, + QDF_TRACE_LEVEL_INFO_MED, + "%s: %d ppdu_id[%d] bar_ppdu_id[%d] num_users[%d] usr[%d] htt_frame_type[%d]\n", + __func__, __LINE__, + ppdu_desc->ppdu_id, + ppdu_desc->bar_ppdu_id, + ppdu_desc->num_users, + i, + ppdu_desc->htt_frame_type); + } + } + } + + /* + * when frame type is BAR and STATS_COMMON_TLV is set + * copy the store peer delayed info to BAR status + */ + if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { + for (i = 0; i < ppdu_desc->bar_num_users; i++) { + struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu; + uint64_t start_tsf; + uint64_t end_tsf; + + peer = dp_peer_get_ref_by_id + (pdev->soc, + ppdu_desc->user[i].peer_id, + DP_MOD_ID_TX_PPDU_STATS); + /** + * This check is to make sure peer is not deleted + * after processing the TLVs. + */ + if (!peer) + continue; + + if (ppdu_desc->user[i].completion_status != + HTT_PPDU_STATS_USER_STATUS_OK) { + dp_peer_unref_delete(peer, + DP_MOD_ID_TX_PPDU_STATS); + continue; + } + + delay_ppdu = &peer->delayed_ba_ppdu_stats; + start_tsf = delay_ppdu->ppdu_start_timestamp; + end_tsf = delay_ppdu->ppdu_end_timestamp; + + if (peer->last_delayed_ba) { + dp_peer_copy_stats_to_bar(peer, + &ppdu_desc->user[i]); + ppdu_desc->ppdu_id = + peer->last_delayed_ba_ppduid; + ppdu_desc->ppdu_start_timestamp = start_tsf; + ppdu_desc->ppdu_end_timestamp = end_tsf; + } + ppdu_desc->user[i].peer_last_delayed_ba = + peer->last_delayed_ba; + dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); + } + } + + TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem); + pdev->list_depth--; + TAILQ_INSERT_TAIL(&pdev->sched_comp_ppdu_list, ppdu_info, + ppdu_info_list_elem); + pdev->sched_comp_list_depth++; +} + +/** + * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU + * + * If the TLV length sent as part of PPDU TLV is less that expected size i.e + * size of corresponding data structure, pad the remaining bytes with zeros + * and continue processing the TLVs + * + * @pdev: DP pdev handle + * @tag_buf: TLV buffer + * @tlv_expected_size: Expected size of Tag + * @tlv_len: TLV length received from FW + * + * Return: Pointer to updated TLV + */ +static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev, + uint32_t *tag_buf, + uint16_t tlv_expected_size, + uint16_t tlv_len) +{ + uint32_t *tlv_desc = tag_buf; + + qdf_assert_always(tlv_len != 0); + + if (tlv_len < tlv_expected_size) { + qdf_mem_zero(pdev->ppdu_tlv_buf, tlv_expected_size); + qdf_mem_copy(pdev->ppdu_tlv_buf, tag_buf, tlv_len); + tlv_desc = pdev->ppdu_tlv_buf; + } + + return tlv_desc; +} + +/** + * dp_process_ppdu_tag(): Function to process the PPDU TLVs + * @pdev: DP pdev handle + * @tag_buf: TLV buffer + * @tlv_len: length of tlv + * @ppdu_info: per ppdu tlv structure + * + * return: void + */ +static void dp_process_ppdu_tag(struct dp_pdev *pdev, + uint32_t *tag_buf, + uint32_t tlv_len, + struct ppdu_info *ppdu_info) +{ + uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf); + uint16_t tlv_expected_size; + uint32_t *tlv_desc; + + switch (tlv_type) { + case HTT_PPDU_STATS_COMMON_TLV: + tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMMON_TLV: + tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc, + ppdu_info); + break; + case HTT_PPDU_STATS_USR_RATE_TLV: + tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc, + ppdu_info); + break; + case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV: + tlv_expected_size = + sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv( + pdev, tlv_desc, ppdu_info); + break; + case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV: + tlv_expected_size = + sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv( + pdev, tlv_desc, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV: + tlv_expected_size = + sizeof(htt_ppdu_stats_user_cmpltn_common_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_user_cmpltn_common_tlv( + pdev, tlv_desc, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV: + tlv_expected_size = + sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv( + pdev, tlv_desc, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV: + tlv_expected_size = + sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv( + pdev, tlv_desc, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV: + tlv_expected_size = + sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv( + pdev, tlv_desc, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV: + tlv_expected_size = + sizeof(htt_ppdu_stats_usr_common_array_tlv_v); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_user_common_array_tlv( + pdev, tlv_desc, ppdu_info); + break; + case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV: + tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv); + tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf, + tlv_expected_size, tlv_len); + dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc, + ppdu_info); + break; + case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV: + dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info); + break; + default: + break; + } +} + +#ifdef WLAN_ATF_ENABLE +static void +dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev, + struct cdp_tx_completion_ppdu *ppdu_desc, + struct cdp_tx_completion_ppdu_user *user) +{ + uint32_t nss_ru_width_sum = 0; + + if (!pdev || !ppdu_desc || !user) + return; + + if (!pdev->dp_atf_stats_enable) + return; + + if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA) + return; + + nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum; + if (!nss_ru_width_sum) + nss_ru_width_sum = 1; + + /** + * For SU-MIMO PPDU phy Tx time is same for the single user. + * For MU-MIMO phy Tx time is calculated per user as below + * user phy tx time = + * Entire PPDU duration * MU Ratio * OFDMA Ratio + * MU Ratio = usr_nss / Sum_of_nss_of_all_users + * OFDMA_ratio = usr_ru_width / Sum_of_ru_width_of_all_users + * usr_ru_widt = ru_end – ru_start + 1 + */ + if (ppdu_desc->htt_frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) { + user->phy_tx_time_us = ppdu_desc->phy_ppdu_tx_time_us; + } else { + user->phy_tx_time_us = (ppdu_desc->phy_ppdu_tx_time_us * + user->nss * user->ru_tones) / nss_ru_width_sum; + } +} +#else +static void +dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev, + struct cdp_tx_completion_ppdu *ppdu_desc, + struct cdp_tx_completion_ppdu_user *user) +{ +} +#endif + +/** + * dp_ppdu_desc_user_stats_update(): Function to update TX user stats + * @pdev: DP pdev handle + * @ppdu_info: per PPDU TLV descriptor + * + * return: void + */ +void +dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev, + struct ppdu_info *ppdu_info) +{ + struct cdp_tx_completion_ppdu *ppdu_desc = NULL; + struct dp_peer *peer = NULL; + uint32_t tlv_bitmap_expected; + uint32_t tlv_bitmap_default; + uint16_t i; + uint32_t num_users; + + ppdu_desc = (struct cdp_tx_completion_ppdu *) + qdf_nbuf_data(ppdu_info->nbuf); + + if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR) + ppdu_desc->ppdu_id = ppdu_info->ppdu_id; + + tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP; + if (pdev->tx_sniffer_enable || pdev->mcopy_mode || + pdev->tx_capture_enabled) { + if (ppdu_info->is_ampdu) + tlv_bitmap_expected = + dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap( + ppdu_info->tlv_bitmap); + } + + tlv_bitmap_default = tlv_bitmap_expected; + + if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) { + num_users = ppdu_desc->bar_num_users; + ppdu_desc->num_users = ppdu_desc->bar_num_users; + } else { + num_users = ppdu_desc->num_users; + } + qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users); + + for (i = 0; i < num_users; i++) { + ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu; + ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu; + + peer = dp_peer_get_ref_by_id(pdev->soc, + ppdu_desc->user[i].peer_id, + DP_MOD_ID_TX_PPDU_STATS); + /** + * This check is to make sure peer is not deleted + * after processing the TLVs. + */ + if (!peer) + continue; + + ppdu_desc->user[i].is_bss_peer = peer->bss_peer; + /* + * different frame like DATA, BAR or CTRL has different + * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we + * receive other tlv in-order/sequential from fw. + * Since ACK_BA_STATUS TLV come from Hardware it is + * asynchronous So we need to depend on some tlv to confirm + * all tlv is received for a ppdu. + * So we depend on both SCHED_CMD_STATUS_TLV and + * ACK_BA_STATUS_TLV. for failure packet we won't get + * ACK_BA_STATUS_TLV. + */ + if (!(ppdu_info->tlv_bitmap & + (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) || + (!(ppdu_info->tlv_bitmap & + (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) && + (ppdu_desc->user[i].completion_status == + HTT_PPDU_STATS_USER_STATUS_OK))) { + dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); + continue; + } + + /** + * Update tx stats for data frames having Qos as well as + * non-Qos data tid + */ + + if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX || + (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) || + (ppdu_desc->htt_frame_type == + HTT_STATS_FTYPE_SGEN_QOS_NULL) || + ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) && + (ppdu_desc->num_mpdu > 1))) && + (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) { + dp_tx_stats_update(pdev, peer, + &ppdu_desc->user[i], + ppdu_desc->ack_rssi); + dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]); + } + + dp_ppdu_desc_user_phy_tx_time_update(pdev, ppdu_desc, + &ppdu_desc->user[i]); + + dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS); + tlv_bitmap_expected = tlv_bitmap_default; + } +} + +#ifndef WLAN_TX_PKT_CAPTURE_ENH + +/** + * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor + * to upper layer + * @pdev: DP pdev handle + * @ppdu_info: per PPDU TLV descriptor + * + * return: void + */ +static +void dp_ppdu_desc_deliver(struct dp_pdev *pdev, + struct ppdu_info *ppdu_info) +{ + struct ppdu_info *s_ppdu_info = NULL; + struct ppdu_info *ppdu_info_next = NULL; + struct cdp_tx_completion_ppdu *ppdu_desc = NULL; + qdf_nbuf_t nbuf; + uint32_t time_delta = 0; + bool starved = 0; + bool matched = 0; + bool recv_ack_ba_done = 0; + + if (ppdu_info->tlv_bitmap & + (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) && + ppdu_info->done) + recv_ack_ba_done = 1; + + pdev->last_sched_cmdid = ppdu_info->sched_cmdid; + + s_ppdu_info = TAILQ_FIRST(&pdev->sched_comp_ppdu_list); + + TAILQ_FOREACH_SAFE(s_ppdu_info, &pdev->sched_comp_ppdu_list, + ppdu_info_list_elem, ppdu_info_next) { + if (s_ppdu_info->tsf_l32 > ppdu_info->tsf_l32) + time_delta = (MAX_TSF_32 - s_ppdu_info->tsf_l32) + + ppdu_info->tsf_l32; + else + time_delta = ppdu_info->tsf_l32 - s_ppdu_info->tsf_l32; + + if (!s_ppdu_info->done && !recv_ack_ba_done) { + if (time_delta < MAX_SCHED_STARVE) { + dp_info("pdev[%d] ppdu_id[%d] sched_cmdid[%d] TLV_B[0x%x] TSF[%u] D[%d]", + pdev->pdev_id, + s_ppdu_info->ppdu_id, + s_ppdu_info->sched_cmdid, + s_ppdu_info->tlv_bitmap, + s_ppdu_info->tsf_l32, + s_ppdu_info->done); + break; + } + starved = 1; + } + + pdev->delivered_sched_cmdid = s_ppdu_info->sched_cmdid; + TAILQ_REMOVE(&pdev->sched_comp_ppdu_list, s_ppdu_info, + ppdu_info_list_elem); + pdev->sched_comp_list_depth--; + + nbuf = s_ppdu_info->nbuf; + qdf_assert_always(nbuf); + ppdu_desc = (struct cdp_tx_completion_ppdu *) + qdf_nbuf_data(nbuf); + ppdu_desc->tlv_bitmap = s_ppdu_info->tlv_bitmap; + + if (starved) { + dp_err("ppdu starved fc[0x%x] h_ftype[%d] tlv_bitmap[0x%x] cs[%d]\n", + ppdu_desc->frame_ctrl, + ppdu_desc->htt_frame_type, + ppdu_desc->tlv_bitmap, + ppdu_desc->user[0].completion_status); + starved = 0; + } + + if (ppdu_info->ppdu_id == s_ppdu_info->ppdu_id && + ppdu_info->sched_cmdid == s_ppdu_info->sched_cmdid) + matched = 1; + + dp_ppdu_desc_user_stats_update(pdev, s_ppdu_info); + + qdf_mem_free(s_ppdu_info); + + /** + * Deliver PPDU stats only for valid (acked) data + * frames if sniffer mode is not enabled. + * If sniffer mode is enabled, PPDU stats + * for all frames including mgmt/control + * frames should be delivered to upper layer + */ + if (pdev->tx_sniffer_enable || pdev->mcopy_mode) { + dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, + pdev->soc, + nbuf, HTT_INVALID_PEER, + WDI_NO_VAL, + pdev->pdev_id); + } else { + if (ppdu_desc->num_mpdu != 0 && + ppdu_desc->num_users != 0 && + ppdu_desc->frame_ctrl & + HTT_FRAMECTRL_DATATYPE) { + dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, + pdev->soc, + nbuf, HTT_INVALID_PEER, + WDI_NO_VAL, + pdev->pdev_id); + } else { + qdf_nbuf_free(nbuf); + } + } + + if (matched) + break; + } +} + +#endif + +/** + * dp_get_ppdu_desc(): Function to allocate new PPDU status + * desc for new ppdu id + * @pdev: DP pdev handle + * @ppdu_id: PPDU unique identifier + * @tlv_type: TLV type received + * @tsf_l32: timestamp received along with ppdu stats indication header + * @max_users: Maximum user for that particular ppdu + * + * return: ppdu_info per ppdu tlv structure + */ +static +struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id, + uint8_t tlv_type, uint32_t tsf_l32, + uint8_t max_users) +{ + struct ppdu_info *ppdu_info = NULL; + struct ppdu_info *s_ppdu_info = NULL; + struct ppdu_info *ppdu_info_next = NULL; + struct cdp_tx_completion_ppdu *ppdu_desc = NULL; + uint32_t size = 0; + struct cdp_tx_completion_ppdu *tmp_ppdu_desc = NULL; + struct cdp_tx_completion_ppdu_user *tmp_user; + uint32_t time_delta; + + /* + * Find ppdu_id node exists or not + */ + TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list, + ppdu_info_list_elem, ppdu_info_next) { + if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) { + if (ppdu_info->tsf_l32 > tsf_l32) + time_delta = (MAX_TSF_32 - + ppdu_info->tsf_l32) + tsf_l32; + else + time_delta = tsf_l32 - ppdu_info->tsf_l32; + + if (time_delta > WRAP_DROP_TSF_DELTA) { + TAILQ_REMOVE(&pdev->ppdu_info_list, + ppdu_info, ppdu_info_list_elem); + pdev->list_depth--; + pdev->stats.ppdu_wrap_drop++; + tmp_ppdu_desc = + (struct cdp_tx_completion_ppdu *) + qdf_nbuf_data(ppdu_info->nbuf); + tmp_user = &tmp_ppdu_desc->user[0]; + dp_htt_tx_stats_info("S_PID [%d] S_TSF[%u] TLV_BITMAP[0x%x] [CMPLTN - %d ACK_BA - %d] CS[%d] - R_PID[%d] R_TSF[%u] R_TLV_TAG[0x%x]\n", + ppdu_info->ppdu_id, + ppdu_info->tsf_l32, + ppdu_info->tlv_bitmap, + tmp_user->completion_status, + ppdu_info->compltn_common_tlv, + ppdu_info->ack_ba_tlv, + ppdu_id, tsf_l32, + tlv_type); + qdf_nbuf_free(ppdu_info->nbuf); + ppdu_info->nbuf = NULL; + qdf_mem_free(ppdu_info); + } else { + break; + } + } + } + + /* + * check if it is ack ba tlv and if it is not there in ppdu info + * list then check it in sched completion ppdu list + */ + if (!ppdu_info && + tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) { + TAILQ_FOREACH(s_ppdu_info, + &pdev->sched_comp_ppdu_list, + ppdu_info_list_elem) { + if (s_ppdu_info && (s_ppdu_info->ppdu_id == ppdu_id)) { + if (s_ppdu_info->tsf_l32 > tsf_l32) + time_delta = (MAX_TSF_32 - + s_ppdu_info->tsf_l32) + + tsf_l32; + else + time_delta = tsf_l32 - + s_ppdu_info->tsf_l32; + if (time_delta < WRAP_DROP_TSF_DELTA) { + ppdu_info = s_ppdu_info; + break; + } + } else { + /* + * ACK BA STATUS TLV comes sequential order + * if we received ack ba status tlv for second + * ppdu and first ppdu is still waiting for + * ACK BA STATUS TLV. Based on fw comment + * we won't receive it tlv later. So we can + * set ppdu info done. + */ + if (s_ppdu_info) + s_ppdu_info->done = 1; + } + } + } + + if (ppdu_info) { + if (ppdu_info->tlv_bitmap & (1 << tlv_type)) { + /** + * if we get tlv_type that is already been processed + * for ppdu, that means we got a new ppdu with same + * ppdu id. Hence Flush the older ppdu + * for MUMIMO and OFDMA, In a PPDU we have + * multiple user with same tlv types. tlv bitmap is + * used to check whether SU or MU_MIMO/OFDMA + */ + if (!(ppdu_info->tlv_bitmap & + (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) + return ppdu_info; + + ppdu_desc = (struct cdp_tx_completion_ppdu *) + qdf_nbuf_data(ppdu_info->nbuf); + + /** + * apart from ACK BA STATUS TLV rest all comes in order + * so if tlv type not ACK BA STATUS TLV we can deliver + * ppdu_info + */ + if ((tlv_type == + HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) && + (ppdu_desc->htt_frame_type == + HTT_STATS_FTYPE_SGEN_MU_BAR)) + return ppdu_info; + + dp_ppdu_desc_deliver(pdev, ppdu_info); + } else { + return ppdu_info; + } + } + + /** + * Flush the head ppdu descriptor if ppdu desc list reaches max + * threshold + */ + if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) { + ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list); + TAILQ_REMOVE(&pdev->ppdu_info_list, + ppdu_info, ppdu_info_list_elem); + pdev->list_depth--; + pdev->stats.ppdu_drop++; + qdf_nbuf_free(ppdu_info->nbuf); + ppdu_info->nbuf = NULL; + qdf_mem_free(ppdu_info); + } + + size = sizeof(struct cdp_tx_completion_ppdu) + + (max_users * sizeof(struct cdp_tx_completion_ppdu_user)); + + /* + * Allocate new ppdu_info node + */ + ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info)); + if (!ppdu_info) + return NULL; + + ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, size, + 0, 4, TRUE); + if (!ppdu_info->nbuf) { + qdf_mem_free(ppdu_info); + return NULL; + } + + ppdu_info->ppdu_desc = + (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf); + qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), size); + + if (qdf_nbuf_put_tail(ppdu_info->nbuf, size) == NULL) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "No tailroom for HTT PPDU"); + qdf_nbuf_free(ppdu_info->nbuf); + ppdu_info->nbuf = NULL; + ppdu_info->last_user = 0; + qdf_mem_free(ppdu_info); + return NULL; + } + + ppdu_info->ppdu_desc->max_users = max_users; + ppdu_info->tsf_l32 = tsf_l32; + /** + * No lock is needed because all PPDU TLVs are processed in + * same context and this list is updated in same context + */ + TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info, + ppdu_info_list_elem); + pdev->list_depth++; + return ppdu_info; +} + +/** + * dp_htt_process_tlv(): Function to process each PPDU TLVs + * @pdev: DP pdev handle + * @htt_t2h_msg: HTT target to host message + * + * return: ppdu_info per ppdu tlv structure + */ + +static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev, + qdf_nbuf_t htt_t2h_msg) +{ + uint32_t length; + uint32_t ppdu_id; + uint8_t tlv_type; + uint32_t tlv_length, tlv_bitmap_expected; + uint8_t *tlv_buf; + struct ppdu_info *ppdu_info = NULL; + struct cdp_tx_completion_ppdu *ppdu_desc = NULL; + uint8_t max_users = CDP_MU_MAX_USERS; + uint32_t tsf_l32; + + uint32_t *msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg); + + length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word); + + msg_word = msg_word + 1; + ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word); + + msg_word = msg_word + 1; + tsf_l32 = (uint32_t)(*msg_word); + + msg_word = msg_word + 2; + while (length > 0) { + tlv_buf = (uint8_t *)msg_word; + tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word); + tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word); + if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG)) + pdev->stats.ppdu_stats_counter[tlv_type]++; + + if (tlv_length == 0) + break; + + tlv_length += HTT_TLV_HDR_LEN; + + /** + * Not allocating separate ppdu descriptor for MGMT Payload + * TLV as this is sent as separate WDI indication and it + * doesn't contain any ppdu information + */ + if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) { + pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf; + pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id; + pdev->mgmtctrl_frm_info.mgmt_buf_len = + HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET + (*(msg_word + 1)); + msg_word = + (uint32_t *)((uint8_t *)tlv_buf + tlv_length); + length -= (tlv_length); + continue; + } + + /* + * retrieve max_users if it's USERS_INFO, + * else, it's 1 for COMPLTN_FLUSH, + * else, use CDP_MU_MAX_USERS + */ + if (tlv_type == HTT_PPDU_STATS_USERS_INFO_TLV) { + max_users = + HTT_PPDU_STATS_USERS_INFO_TLV_MAX_USERS_GET(*(msg_word + 1)); + } else if (tlv_type == HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) { + max_users = 1; + } + + ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type, + tsf_l32, max_users); + if (!ppdu_info) + return NULL; + + ppdu_info->ppdu_id = ppdu_id; + ppdu_info->tlv_bitmap |= (1 << tlv_type); + + dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info); + + /** + * Increment pdev level tlv count to monitor + * missing TLVs + */ + pdev->tlv_count++; + ppdu_info->last_tlv_cnt = pdev->tlv_count; + msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length); + length -= (tlv_length); + } + + if (!ppdu_info) + return NULL; + + pdev->last_ppdu_id = ppdu_id; + + tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP; + + if (pdev->tx_sniffer_enable || pdev->mcopy_mode || + pdev->tx_capture_enabled) { + if (ppdu_info->is_ampdu) + tlv_bitmap_expected = + dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap( + ppdu_info->tlv_bitmap); + } + + ppdu_desc = ppdu_info->ppdu_desc; + + if (!ppdu_desc) + return NULL; + + if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status != + HTT_PPDU_STATS_USER_STATUS_OK) { + tlv_bitmap_expected = tlv_bitmap_expected & 0xFF; + } + + /* + * for frame type DATA and BAR, we update stats based on MSDU, + * successful msdu and mpdu are populate from ACK BA STATUS TLV + * which comes out of order. successful mpdu also populated from + * COMPLTN COMMON TLV which comes in order. for every ppdu_info + * we store successful mpdu from both tlv and compare before delivering + * to make sure we received ACK BA STATUS TLV. For some self generated + * frame we won't get ack ba status tlv so no need to wait for + * ack ba status tlv. + */ + if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL && + ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) { + /* + * most of the time bar frame will have duplicate ack ba + * status tlv + */ + if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR && + (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)) + return NULL; + /* + * For data frame, compltn common tlv should match ack ba status + * tlv and completion status. Reason we are checking first user + * for ofdma, completion seen at next MU BAR frm, for mimo + * only for first user completion will be immediate. + */ + if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA && + (ppdu_desc->user[0].completion_status == 0 && + (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv))) + return NULL; + } + + /** + * Once all the TLVs for a given PPDU has been processed, + * return PPDU status to be delivered to higher layer. + * tlv_bitmap_expected can't be available for different frame type. + * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu. + * apart from ACK BA TLV, FW sends other TLV in sequential order. + * flush tlv comes separate. + */ + if ((ppdu_info->tlv_bitmap != 0 && + (ppdu_info->tlv_bitmap & + (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) || + (ppdu_info->tlv_bitmap & + (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) { + ppdu_info->done = 1; + return ppdu_info; + } + + return NULL; +} +#endif /* FEATURE_PERPKT_INFO */ + +/** + * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW + * @soc: DP SOC handle + * @pdev_id: pdev id + * @htt_t2h_msg: HTT message nbuf + * + * return:void + */ +#if defined(WDI_EVENT_ENABLE) +#ifdef FEATURE_PERPKT_INFO +static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, + uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) +{ + struct dp_pdev *pdev = soc->pdev_list[pdev_id]; + struct ppdu_info *ppdu_info = NULL; + bool free_buf = true; + + if (pdev_id >= MAX_PDEV_CNT) + return true; + + pdev = soc->pdev_list[pdev_id]; + if (!pdev) + return true; + + if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && + !pdev->mcopy_mode && !pdev->bpr_enable) + return free_buf; + + qdf_spin_lock_bh(&pdev->ppdu_stats_lock); + ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg); + + if (pdev->mgmtctrl_frm_info.mgmt_buf) { + if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv + (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) != + QDF_STATUS_SUCCESS) + free_buf = false; + } + + if (ppdu_info) + dp_ppdu_desc_deliver(pdev, ppdu_info); + + pdev->mgmtctrl_frm_info.mgmt_buf = NULL; + pdev->mgmtctrl_frm_info.mgmt_buf_len = 0; + pdev->mgmtctrl_frm_info.ppdu_id = 0; + + qdf_spin_unlock_bh(&pdev->ppdu_stats_lock); + + return free_buf; +} +#else +static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc, + uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg) +{ + return true; +} +#endif +#endif + +#if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG) +/* + * dp_ppdu_stats_ind_handler() - PPDU stats msg handler + * @htt_soc: HTT SOC handle + * @msg_word: Pointer to payload + * @htt_t2h_msg: HTT msg nbuf + * + * Return: True if buffer should be freed by caller. + */ +static bool +dp_ppdu_stats_ind_handler(struct htt_soc *soc, + uint32_t *msg_word, + qdf_nbuf_t htt_t2h_msg) +{ + u_int8_t pdev_id; + u_int8_t target_pdev_id; + bool free_buf; + + target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word); + pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc, + target_pdev_id); + dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc, + htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL, + pdev_id); + + free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id, + htt_t2h_msg); + + return free_buf; +} +#endif + +/* + * dp_htt_ppdu_stats_attach() - attach resources for HTT PPDU stats processing + * @pdev: Datapath PDEV handle + * + * Return: QDF_STATUS_SUCCESS: Success + * QDF_STATUS_E_NOMEM: Error + */ +static QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev) +{ + pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE); + + if (!pdev->ppdu_tlv_buf) { + QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail"); + return QDF_STATUS_E_NOMEM; + } + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_htt_ppdu_stats_detach() - detach stats resources + * @pdev: Datapath PDEV handle + * + * Return: void + */ +static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev) +{ + struct ppdu_info *ppdu_info, *ppdu_info_next; + + TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list, + ppdu_info_list_elem, ppdu_info_next) { + if (!ppdu_info) + break; + TAILQ_REMOVE(&pdev->ppdu_info_list, + ppdu_info, ppdu_info_list_elem); + pdev->list_depth--; + qdf_assert_always(ppdu_info->nbuf); + qdf_nbuf_free(ppdu_info->nbuf); + qdf_mem_free(ppdu_info); + } + + TAILQ_FOREACH_SAFE(ppdu_info, &pdev->sched_comp_ppdu_list, + ppdu_info_list_elem, ppdu_info_next) { + if (!ppdu_info) + break; + TAILQ_REMOVE(&pdev->sched_comp_ppdu_list, + ppdu_info, ppdu_info_list_elem); + pdev->sched_comp_list_depth--; + qdf_assert_always(ppdu_info->nbuf); + qdf_nbuf_free(ppdu_info->nbuf); + qdf_mem_free(ppdu_info); + } + + if (pdev->ppdu_tlv_buf) + qdf_mem_free(pdev->ppdu_tlv_buf); +} + +void +dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev) +{ + struct cdp_pdev_mon_stats *rx_mon_stats; + uint32_t *stat_ring_ppdu_ids; + uint32_t *dest_ring_ppdu_ids; + int i, idx; + + rx_mon_stats = &pdev->rx_mon_stats; + + DP_PRINT_STATS("PDEV Rx Monitor Stats:\n"); + + DP_PRINT_STATS("status_ppdu_compl_cnt = %d", + rx_mon_stats->status_ppdu_compl); + DP_PRINT_STATS("status_ppdu_start_cnt = %d", + rx_mon_stats->status_ppdu_start); + DP_PRINT_STATS("status_ppdu_end_cnt = %d", + rx_mon_stats->status_ppdu_end); + DP_PRINT_STATS("status_ppdu_start_mis_cnt = %d", + rx_mon_stats->status_ppdu_start_mis); + DP_PRINT_STATS("status_ppdu_end_mis_cnt = %d", + rx_mon_stats->status_ppdu_end_mis); + DP_PRINT_STATS("status_ppdu_done_cnt = %d", + rx_mon_stats->status_ppdu_done); + DP_PRINT_STATS("dest_ppdu_done_cnt = %d", + rx_mon_stats->dest_ppdu_done); + DP_PRINT_STATS("dest_mpdu_done_cnt = %d", + rx_mon_stats->dest_mpdu_done); + DP_PRINT_STATS("tlv_tag_status_err_cnt = %u", + rx_mon_stats->tlv_tag_status_err); + DP_PRINT_STATS("mon status DMA not done WAR count= %u", + rx_mon_stats->status_buf_done_war); + DP_PRINT_STATS("dest_mpdu_drop_cnt = %d", + rx_mon_stats->dest_mpdu_drop); + DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d", + rx_mon_stats->dup_mon_linkdesc_cnt); + DP_PRINT_STATS("dup_mon_buf_cnt = %d", + rx_mon_stats->dup_mon_buf_cnt); + DP_PRINT_STATS("mon_rx_buf_reaped = %u", + rx_mon_stats->mon_rx_bufs_reaped_dest); + DP_PRINT_STATS("mon_rx_buf_replenished = %u", + rx_mon_stats->mon_rx_bufs_replenished_dest); + DP_PRINT_STATS("ppdu_id_mismatch = %u", + rx_mon_stats->ppdu_id_mismatch); + DP_PRINT_STATS("mpdu_ppdu_id_match_cnt = %d", + rx_mon_stats->ppdu_id_match); + DP_PRINT_STATS("ppdus dropped frm status ring = %d", + rx_mon_stats->status_ppdu_drop); + DP_PRINT_STATS("ppdus dropped frm dest ring = %d", + rx_mon_stats->dest_ppdu_drop); + stat_ring_ppdu_ids = + (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST); + dest_ring_ppdu_ids = + (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST); + + if (!stat_ring_ppdu_ids || !dest_ring_ppdu_ids) + DP_PRINT_STATS("Unable to allocate ppdu id hist mem\n"); + + qdf_spin_lock_bh(&pdev->mon_lock); + idx = rx_mon_stats->ppdu_id_hist_idx; + qdf_mem_copy(stat_ring_ppdu_ids, + rx_mon_stats->stat_ring_ppdu_id_hist, + sizeof(uint32_t) * MAX_PPDU_ID_HIST); + qdf_mem_copy(dest_ring_ppdu_ids, + rx_mon_stats->dest_ring_ppdu_id_hist, + sizeof(uint32_t) * MAX_PPDU_ID_HIST); + qdf_spin_unlock_bh(&pdev->mon_lock); + + DP_PRINT_STATS("PPDU Id history:"); + DP_PRINT_STATS("stat_ring_ppdu_ids\t dest_ring_ppdu_ids"); + for (i = 0; i < MAX_PPDU_ID_HIST; i++) { + idx = (idx + 1) & (MAX_PPDU_ID_HIST - 1); + DP_PRINT_STATS("%*u\t%*u", 16, + rx_mon_stats->stat_ring_ppdu_id_hist[idx], 16, + rx_mon_stats->dest_ring_ppdu_id_hist[idx]); + } + qdf_mem_free(stat_ring_ppdu_ids); + qdf_mem_free(dest_ring_ppdu_ids); + DP_PRINT_STATS("mon_rx_dest_stuck = %d", + rx_mon_stats->mon_rx_dest_stuck); +} + +/* + *dp_set_bpr_enable() - API to enable/disable bpr feature + *@pdev_handle: DP_PDEV handle. + *@val: Provided value. + * + *Return: 0 for success. nonzero for failure. + */ +static QDF_STATUS +dp_set_bpr_enable(struct dp_pdev *pdev, int val) +{ + switch (val) { + case CDP_BPR_DISABLE: + pdev->bpr_enable = CDP_BPR_DISABLE; + if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en && + !pdev->tx_sniffer_enable && !pdev->mcopy_mode) { + dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id); + } else if (pdev->enhanced_stats_en && + !pdev->tx_sniffer_enable && !pdev->mcopy_mode && + !pdev->pktlog_ppdu_stats) { + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_ENH_STATS, + pdev->pdev_id); + } + break; + case CDP_BPR_ENABLE: + pdev->bpr_enable = CDP_BPR_ENABLE; + if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && + !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) { + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_BPR, + pdev->pdev_id); + } else if (pdev->enhanced_stats_en && + !pdev->tx_sniffer_enable && !pdev->mcopy_mode && + !pdev->pktlog_ppdu_stats) { + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_BPR_ENH, + pdev->pdev_id); + } else if (pdev->pktlog_ppdu_stats) { + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_BPR_PKTLOG, + pdev->pdev_id); + } + break; + default: + break; + } + + return QDF_STATUS_SUCCESS; +} + +#ifdef ATH_SUPPORT_NAC +/* + * dp_set_filter_neigh_peers() - set filter neighbour peers for smart mesh + * @pdev_handle: device object + * @val: value to be set + * + * Return: void + */ +static int dp_set_filter_neigh_peers(struct dp_pdev *pdev, + bool val) +{ + /* Enable/Disable smart mesh filtering. This flag will be checked + * during rx processing to check if packets are from NAC clients. + */ + pdev->filter_neighbour_peers = val; + return 0; +} +#endif /* ATH_SUPPORT_NAC */ + +#ifdef WLAN_ATF_ENABLE +static void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value) +{ + if (!pdev) { + dp_cdp_err("Invalid pdev"); + return; + } + + pdev->dp_atf_stats_enable = value; +} +#endif + +/** + * dp_set_bsscolor() - sets bsscolor for tx capture + * @pdev: Datapath PDEV handle + * @bsscolor: new bsscolor + */ +static void +dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor) +{ + pdev->rx_mon_recv_status.bsscolor = bsscolor; +} + +/** + * dp_pdev_get_filter_ucast_data() - get DP PDEV monitor ucast filter + * @soc : data path soc handle + * @pdev_id : pdev_id + * Return: true on ucast filter flag set + */ +static bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + if ((pdev->fp_data_filter & FILTER_DATA_UCAST) || + (pdev->mo_data_filter & FILTER_DATA_UCAST)) + return true; + + return false; +} + +/** + * dp_pdev_get_filter_mcast_data() - get DP PDEV monitor mcast filter + * @pdev_handle: Datapath PDEV handle + * Return: true on mcast filter flag set + */ +static bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + if ((pdev->fp_data_filter & FILTER_DATA_MCAST) || + (pdev->mo_data_filter & FILTER_DATA_MCAST)) + return true; + + return false; +} + +/** + * dp_pdev_get_filter_non_data() - get DP PDEV monitor non_data filter + * @pdev_handle: Datapath PDEV handle + * Return: true on non data filter flag set + */ +static bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle) +{ + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + + if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) || + (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) { + if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) || + (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) { + return true; + } + } + + return false; +} + +/** + * dp_vdev_set_monitor_mode_buf_rings () - set monitor mode buf rings + * + * Allocate SW descriptor pool, buffers, link descriptor memory + * Initialize monitor related SRNGs + * + * @pdev: DP pdev object + * + * Return: void + */ +static void dp_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev) +{ + uint32_t mac_id; + uint32_t mac_for_pdev; + struct dp_srng *mon_buf_ring; + uint32_t num_entries; + struct dp_soc *soc = pdev->soc; + + /* If delay monitor replenish is disabled, allocate link descriptor + * monitor ring buffers of ring size. + */ + if (!wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) { + dp_vdev_set_monitor_mode_rings(pdev, false); + } else { + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + mac_for_pdev = + dp_get_lmac_id_for_pdev_id(pdev->soc, + mac_id, + pdev->pdev_id); + + dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev, + FALSE); + mon_buf_ring = + &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev]; + /* + * Configure low interrupt threshld when monitor mode is + * configured. + */ + if (mon_buf_ring->hal_srng) { + num_entries = mon_buf_ring->num_entries; + hal_set_low_threshold(mon_buf_ring->hal_srng, + num_entries >> 3); + htt_srng_setup(pdev->soc->htt_handle, + pdev->pdev_id, + mon_buf_ring->hal_srng, + RXDMA_MONITOR_BUF); + } + } + } +} + +/* + * dp_set_pktlog_wifi3() - attach txrx vdev + * @pdev: Datapath PDEV handle + * @event: which event's notifications are being subscribed to + * @enable: WDI event subscribe or not. (True or False) + * + * Return: Success, NULL on failure + */ +#ifdef WDI_EVENT_ENABLE +int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, + bool enable) +{ + struct dp_soc *soc = NULL; + int max_mac_rings = wlan_cfg_get_num_mac_rings + (pdev->wlan_cfg_ctx); + uint8_t mac_id = 0; + + soc = pdev->soc; + dp_is_hw_dbs_enable(soc, &max_mac_rings); + + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + FL("Max_mac_rings %d "), + max_mac_rings); + + if (enable) { + switch (event) { + case WDI_EVENT_RX_DESC: + if (pdev->monitor_vdev) { + /* Nothing needs to be done if monitor mode is + * enabled + */ + pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL; + return 0; + } + + if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) { + pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL; + dp_mon_filter_setup_rx_pkt_log_full(pdev); + if (dp_mon_filter_update(pdev) != + QDF_STATUS_SUCCESS) { + dp_cdp_err("%pK: Pktlog full filters set failed", soc); + dp_mon_filter_reset_rx_pkt_log_full(pdev); + pdev->rx_pktlog_mode = + DP_RX_PKTLOG_DISABLED; + return 0; + } + + if (soc->reap_timer_init && + (!dp_is_enable_reap_timer_non_pkt(pdev))) + qdf_timer_mod(&soc->mon_reap_timer, + DP_INTR_POLL_TIMER_MS); + } + break; + + case WDI_EVENT_LITE_RX: + if (pdev->monitor_vdev) { + /* Nothing needs to be done if monitor mode is + * enabled + */ + pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE; + return 0; + } + if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) { + pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE; + + /* + * Set the packet log lite mode filter. + */ + dp_mon_filter_setup_rx_pkt_log_lite(pdev); + if (dp_mon_filter_update(pdev) != + QDF_STATUS_SUCCESS) { + dp_cdp_err("%pK: Pktlog lite filters set failed", soc); + dp_mon_filter_reset_rx_pkt_log_lite(pdev); + pdev->rx_pktlog_mode = + DP_RX_PKTLOG_DISABLED; + return 0; + } + + if (soc->reap_timer_init && + (!dp_is_enable_reap_timer_non_pkt(pdev))) + qdf_timer_mod(&soc->mon_reap_timer, + DP_INTR_POLL_TIMER_MS); + } + break; + + case WDI_EVENT_LITE_T2H: + for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { + int mac_for_pdev = dp_get_mac_id_for_pdev( + mac_id, pdev->pdev_id); + + pdev->pktlog_ppdu_stats = true; + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_TXLITE_STATS_BITMASK_CFG, + mac_for_pdev); + } + break; + + case WDI_EVENT_RX_CBF: + if (pdev->monitor_vdev) { + /* Nothing needs to be done if monitor mode is + * enabled + */ + dp_info("Monitor mode, CBF setting filters"); + pdev->rx_pktlog_cbf = true; + return 0; + } + if (!pdev->rx_pktlog_cbf) { + pdev->rx_pktlog_cbf = true; + pdev->monitor_configured = true; + dp_vdev_set_monitor_mode_buf_rings(pdev); + /* + * Set the packet log lite mode filter. + */ + qdf_info("Non monitor mode: Enable destination ring"); + + dp_mon_filter_setup_rx_pkt_log_cbf(pdev); + if (dp_mon_filter_update(pdev) != + QDF_STATUS_SUCCESS) { + dp_err("Pktlog set CBF filters failed"); + dp_mon_filter_reset_rx_pktlog_cbf(pdev); + pdev->rx_pktlog_mode = + DP_RX_PKTLOG_DISABLED; + pdev->monitor_configured = false; + return 0; + } + + if (soc->reap_timer_init && + !dp_is_enable_reap_timer_non_pkt(pdev)) + qdf_timer_mod(&soc->mon_reap_timer, + DP_INTR_POLL_TIMER_MS); + } + break; + + default: + /* Nothing needs to be done for other pktlog types */ + break; + } + } else { + switch (event) { + case WDI_EVENT_RX_DESC: + case WDI_EVENT_LITE_RX: + if (pdev->monitor_vdev) { + /* Nothing needs to be done if monitor mode is + * enabled + */ + pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED; + return 0; + } + if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) { + pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED; + dp_mon_filter_reset_rx_pkt_log_full(pdev); + if (dp_mon_filter_update(pdev) != + QDF_STATUS_SUCCESS) { + dp_cdp_err("%pK: Pktlog filters reset failed", soc); + return 0; + } + + dp_mon_filter_reset_rx_pkt_log_lite(pdev); + if (dp_mon_filter_update(pdev) != + QDF_STATUS_SUCCESS) { + dp_cdp_err("%pK: Pktlog filters reset failed", soc); + return 0; + } + + if (soc->reap_timer_init && + (!dp_is_enable_reap_timer_non_pkt(pdev))) + qdf_timer_stop(&soc->mon_reap_timer); + } + break; + case WDI_EVENT_LITE_T2H: + /* + * To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW + * passing value 0. Once these macros will define in htt + * header file will use proper macros + */ + for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { + int mac_for_pdev = + dp_get_mac_id_for_pdev(mac_id, + pdev->pdev_id); + + pdev->pktlog_ppdu_stats = false; + if (!pdev->enhanced_stats_en && + !pdev->tx_sniffer_enable && + !pdev->mcopy_mode) { + dp_h2t_cfg_stats_msg_send(pdev, 0, + mac_for_pdev); + } else if (pdev->tx_sniffer_enable || + pdev->mcopy_mode) { + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_SNIFFER, + mac_for_pdev); + } else if (pdev->enhanced_stats_en) { + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_ENH_STATS, + mac_for_pdev); + } + } + + break; + case WDI_EVENT_RX_CBF: + pdev->rx_pktlog_cbf = false; + break; + + default: + /* Nothing needs to be done for other pktlog types */ + break; + } + } + return 0; +} +#endif + +/* MCL specific functions */ +#if defined(DP_CON_MON) +#ifndef REMOVE_PKT_LOG +/** + * dp_pktlogmod_exit() - API to cleanup pktlog info + * @pdev: Pdev handle + * + * Return: none + */ +static void dp_pktlogmod_exit(struct dp_pdev *pdev) +{ + struct dp_soc *soc = pdev->soc; + struct hif_opaque_softc *scn = soc->hif_handle; + + if (!scn) { + dp_err("Invalid hif(scn) handle"); + return; + } + + /* stop mon_reap_timer if it has been started */ + if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED && + soc->reap_timer_init && (!dp_is_enable_reap_timer_non_pkt(pdev))) + qdf_timer_sync_cancel(&soc->mon_reap_timer); + + pktlogmod_exit(scn); + pdev->pkt_log_init = false; +} +#endif +#endif /*DP_CON_MON*/ + +#ifdef WDI_EVENT_ENABLE +QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer) +{ + struct cdp_interface_peer_stats peer_stats_intf; + struct cdp_peer_stats *peer_stats = &peer->stats; + + if (!peer->vdev) + return QDF_STATUS_E_FAULT; + + qdf_mem_zero(&peer_stats_intf, sizeof(peer_stats_intf)); + if (peer_stats->rx.last_snr != peer_stats->rx.snr) + peer_stats_intf.rssi_changed = true; + + if ((peer_stats->rx.snr && peer_stats_intf.rssi_changed) || + (peer_stats->tx.tx_rate && + peer_stats->tx.tx_rate != peer_stats->tx.last_tx_rate)) { + qdf_mem_copy(peer_stats_intf.peer_mac, peer->mac_addr.raw, + QDF_MAC_ADDR_SIZE); + peer_stats_intf.vdev_id = peer->vdev->vdev_id; + peer_stats_intf.last_peer_tx_rate = peer_stats->tx.last_tx_rate; + peer_stats_intf.peer_tx_rate = peer_stats->tx.tx_rate; + peer_stats_intf.peer_rssi = peer_stats->rx.snr; + peer_stats_intf.tx_packet_count = peer_stats->tx.ucast.num; + peer_stats_intf.rx_packet_count = peer_stats->rx.to_stack.num; + peer_stats_intf.tx_byte_count = peer_stats->tx.tx_success.bytes; + peer_stats_intf.rx_byte_count = peer_stats->rx.to_stack.bytes; + peer_stats_intf.per = peer_stats->tx.last_per; + peer_stats_intf.ack_rssi = peer_stats->tx.last_ack_rssi; + peer_stats_intf.free_buff = INVALID_FREE_BUFF; + dp_wdi_event_handler(WDI_EVENT_PEER_STATS, dp_pdev->soc, + (void *)&peer_stats_intf, 0, + WDI_NO_VAL, dp_pdev->pdev_id); + } + + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef FEATURE_NAC_RSSI +/** + * dp_rx_nac_filter(): Function to perform filtering of non-associated + * clients + * @pdev: DP pdev handle + * @rx_pkt_hdr: Rx packet Header + * + * return: dp_vdev* + */ +static +struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev, + uint8_t *rx_pkt_hdr) +{ + struct ieee80211_frame *wh; + struct dp_neighbour_peer *peer = NULL; + + wh = (struct ieee80211_frame *)rx_pkt_hdr; + + if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS) + return NULL; + + qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); + TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, + neighbour_peer_list_elem) { + if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], + wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) { + dp_rx_debug("%pK: NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x", + pdev->soc, + peer->neighbour_peers_macaddr.raw[0], + peer->neighbour_peers_macaddr.raw[1], + peer->neighbour_peers_macaddr.raw[2], + peer->neighbour_peers_macaddr.raw[3], + peer->neighbour_peers_macaddr.raw[4], + peer->neighbour_peers_macaddr.raw[5]); + + qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); + + return pdev->monitor_vdev; + } + } + qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); + + return NULL; +} + +static QDF_STATUS dp_filter_neighbour_peer(struct dp_pdev *pdev, + uint8_t *rx_pkt_hdr) +{ + struct dp_vdev *vdev = NULL; + + if (pdev->filter_neighbour_peers) { + /* Next Hop scenario not yet handle */ + vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr); + if (vdev) { + dp_rx_mon_deliver(pdev->soc, pdev->pdev_id, + pdev->invalid_peer_head_msdu, + pdev->invalid_peer_tail_msdu); + + pdev->invalid_peer_head_msdu = NULL; + pdev->invalid_peer_tail_msdu = NULL; + return QDF_STATUS_SUCCESS; + } + } + + return QDF_STATUS_E_FAILURE; +} +#endif + +#if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) +/* + * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients) + * address for smart mesh filtering + * @txrx_soc: cdp soc handle + * @vdev_id: id of virtual device object + * @cmd: Add/Del command + * @macaddr: nac client mac address + * + * Return: success/failure + */ +static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, + uint32_t cmd, uint8_t *macaddr) +{ + struct dp_soc *soc = (struct dp_soc *)soc_hdl; + struct dp_pdev *pdev; + struct dp_neighbour_peer *peer = NULL; + struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, + DP_MOD_ID_CDP); + + if (!vdev || !macaddr) + goto fail0; + + pdev = vdev->pdev; + + if (!pdev) + goto fail0; + + /* Store address of NAC (neighbour peer) which will be checked + * against TA of received packets. + */ + if (cmd == DP_NAC_PARAM_ADD) { + peer = (struct dp_neighbour_peer *)qdf_mem_malloc( + sizeof(*peer)); + + if (!peer) { + dp_cdp_err("%pK: DP neighbour peer node memory allocation failed" + , soc); + goto fail0; + } + + qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0], + macaddr, QDF_MAC_ADDR_SIZE); + peer->vdev = vdev; + + qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); + + /* add this neighbour peer into the list */ + TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer, + neighbour_peer_list_elem); + qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); + + /* first neighbour */ + if (!pdev->neighbour_peers_added) { + QDF_STATUS status = QDF_STATUS_SUCCESS; + + pdev->neighbour_peers_added = true; + + dp_mon_filter_setup_smart_monitor(pdev); + status = dp_mon_filter_update(pdev); + if (status != QDF_STATUS_SUCCESS) { + dp_cdp_err("%pK: smart mon filter setup failed", + soc); + dp_mon_filter_reset_smart_monitor(pdev); + pdev->neighbour_peers_added = false; + } + } + + } else if (cmd == DP_NAC_PARAM_DEL) { + qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); + TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, + neighbour_peer_list_elem) { + if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], + macaddr, QDF_MAC_ADDR_SIZE)) { + /* delete this peer from the list */ + TAILQ_REMOVE(&pdev->neighbour_peers_list, + peer, neighbour_peer_list_elem); + qdf_mem_free(peer); + break; + } + } + /* last neighbour deleted */ + if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) { + QDF_STATUS status = QDF_STATUS_SUCCESS; + + dp_mon_filter_reset_smart_monitor(pdev); + status = dp_mon_filter_update(pdev); + if (status != QDF_STATUS_SUCCESS) { + dp_cdp_err("%pK: smart mon filter clear failed", + soc); + } + pdev->neighbour_peers_added = false; + } + qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); + } + dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); + return 1; + +fail0: + if (vdev) + dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); + return 0; +} +#endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ + +#ifdef ATH_SUPPORT_NAC_RSSI +/** + * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC + * @soc_hdl: DP soc handle + * @vdev_id: id of DP vdev handle + * @mac_addr: neighbour mac + * @rssi: rssi value + * + * Return: 0 for success. nonzero for failure. + */ +static QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc_hdl, + uint8_t vdev_id, + char *mac_addr, + uint8_t *rssi) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, + DP_MOD_ID_CDP); + struct dp_pdev *pdev; + struct dp_neighbour_peer *peer = NULL; + QDF_STATUS status = QDF_STATUS_E_FAILURE; + + if (!vdev) + return status; + + pdev = vdev->pdev; + *rssi = 0; + qdf_spin_lock_bh(&pdev->neighbour_peer_mutex); + TAILQ_FOREACH(peer, &pdev->neighbour_peers_list, + neighbour_peer_list_elem) { + if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0], + mac_addr, QDF_MAC_ADDR_SIZE) == 0) { + *rssi = peer->rssi; + status = QDF_STATUS_SUCCESS; + break; + } + } + qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex); + dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); + return status; +} + +static QDF_STATUS +dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc, + uint8_t vdev_id, + enum cdp_nac_param_cmd cmd, char *bssid, + char *client_macaddr, + uint8_t chan_num) +{ + struct dp_soc *soc = (struct dp_soc *)cdp_soc; + struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id, + DP_MOD_ID_CDP); + struct dp_pdev *pdev; + + if (!vdev) + return QDF_STATUS_E_FAILURE; + + pdev = (struct dp_pdev *)vdev->pdev; + pdev->nac_rssi_filtering = 1; + /* Store address of NAC (neighbour peer) which will be checked + * against TA of received packets. + */ + + if (cmd == CDP_NAC_PARAM_ADD) { + dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id, + DP_NAC_PARAM_ADD, + (uint8_t *)client_macaddr); + } else if (cmd == CDP_NAC_PARAM_DEL) { + dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id, + DP_NAC_PARAM_DEL, + (uint8_t *)client_macaddr); + } + + if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi) + soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi + (soc->ctrl_psoc, pdev->pdev_id, + vdev->vdev_id, cmd, bssid, client_macaddr); + + dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP); + return QDF_STATUS_SUCCESS; +} +#endif + +#if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) +/* + * dp_cfr_filter() - Configure HOST RX monitor status ring for CFR + * @soc_hdl: Datapath soc handle + * @pdev_id: id of data path pdev handle + * @enable: Enable/Disable CFR + * @filter_val: Flag to select Filter for monitor mode + */ +static void dp_cfr_filter(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, + bool enable, + struct cdp_monitor_filter *filter_val) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = NULL; + struct htt_rx_ring_tlv_filter htt_tlv_filter = {0}; + int max_mac_rings; + uint8_t mac_id = 0; + + pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + if (!pdev) { + dp_err("pdev is NULL"); + return; + } + + if (pdev->monitor_vdev) { + dp_info("No action is needed since monitor mode is enabled\n"); + return; + } + soc = pdev->soc; + pdev->cfr_rcc_mode = false; + max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx); + dp_is_hw_dbs_enable(soc, &max_mac_rings); + + dp_debug("Max_mac_rings %d", max_mac_rings); + dp_info("enable : %d, mode: 0x%x", enable, filter_val->mode); + + if (enable) { + pdev->cfr_rcc_mode = true; + + htt_tlv_filter.ppdu_start = 1; + htt_tlv_filter.ppdu_end = 1; + htt_tlv_filter.ppdu_end_user_stats = 1; + htt_tlv_filter.ppdu_end_user_stats_ext = 1; + htt_tlv_filter.ppdu_end_status_done = 1; + htt_tlv_filter.mpdu_start = 1; + htt_tlv_filter.offset_valid = false; + + htt_tlv_filter.enable_fp = + (filter_val->mode & MON_FILTER_PASS) ? 1 : 0; + htt_tlv_filter.enable_md = 0; + htt_tlv_filter.enable_mo = + (filter_val->mode & MON_FILTER_OTHER) ? 1 : 0; + htt_tlv_filter.fp_mgmt_filter = filter_val->fp_mgmt; + htt_tlv_filter.fp_ctrl_filter = filter_val->fp_ctrl; + htt_tlv_filter.fp_data_filter = filter_val->fp_data; + htt_tlv_filter.mo_mgmt_filter = filter_val->mo_mgmt; + htt_tlv_filter.mo_ctrl_filter = filter_val->mo_ctrl; + htt_tlv_filter.mo_data_filter = filter_val->mo_data; + } + + for (mac_id = 0; mac_id < max_mac_rings; mac_id++) { + int mac_for_pdev = + dp_get_mac_id_for_pdev(mac_id, + pdev->pdev_id); + + htt_h2t_rx_ring_cfg(soc->htt_handle, + mac_for_pdev, + soc->rxdma_mon_status_ring[mac_id] + .hal_srng, + RXDMA_MONITOR_STATUS, + RX_MON_STATUS_BUF_SIZE, + &htt_tlv_filter); + } +} + +/* + * dp_enable_mon_reap_timer() - enable/disable reap timer + * @soc_hdl: Datapath soc handle + * @pdev_id: id of objmgr pdev + * @enable: Enable/Disable reap timer of monitor status ring + * + * Return: none + */ +static void +dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, + bool enable) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *pdev = NULL; + + pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + if (!pdev) { + dp_err("pdev is NULL"); + return; + } + + pdev->enable_reap_timer_non_pkt = enable; + if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) { + dp_debug("pktlog enabled %d", pdev->rx_pktlog_mode); + return; + } + + if (!soc->reap_timer_init) { + dp_err("reap timer not init"); + return; + } + + if (enable) + qdf_timer_mod(&soc->mon_reap_timer, + DP_INTR_POLL_TIMER_MS); + else + qdf_timer_sync_cancel(&soc->mon_reap_timer); +} +#endif + +#if defined(DP_CON_MON) +#ifndef REMOVE_PKT_LOG +/** + * dp_pkt_log_init() - API to initialize packet log + * @soc_hdl: Datapath soc handle + * @pdev_id: id of data path pdev handle + * @scn: HIF context + * + * Return: none + */ +void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn) +{ + struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl); + struct dp_pdev *handle = + dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id); + + if (!handle) { + dp_err("pdev handle is NULL"); + return; + } + + if (handle->pkt_log_init) { + mon_init_err("%pK: Packet log not initialized", soc); + return; + } + + pktlog_sethandle(&handle->pl_dev, scn); + pktlog_set_pdev_id(handle->pl_dev, pdev_id); + pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION); + + if (pktlogmod_init(scn)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "%s: pktlogmod_init failed", __func__); + handle->pkt_log_init = false; + } else { + handle->pkt_log_init = true; + } +} + +/** + * dp_pkt_log_con_service() - connect packet log service + * @soc_hdl: Datapath soc handle + * @pdev_id: id of data path pdev handle + * @scn: device context + * + * Return: none + */ +static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, void *scn) +{ + dp_pkt_log_init(soc_hdl, pdev_id, scn); + pktlog_htc_attach(); +} +#else +static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl, + uint8_t pdev_id, void *scn) +{ +} +#endif +#endif + +/* + * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients) + * @pdev: device object + * + * Return: void + */ +static void dp_neighbour_peers_detach(struct dp_pdev *pdev) +{ + struct dp_neighbour_peer *peer = NULL; + struct dp_neighbour_peer *temp_peer = NULL; + + TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list, + neighbour_peer_list_elem, temp_peer) { + /* delete this peer from the list */ + TAILQ_REMOVE(&pdev->neighbour_peers_list, + peer, neighbour_peer_list_elem); + qdf_mem_free(peer); + } + + qdf_spinlock_destroy(&pdev->neighbour_peer_mutex); +} + +/* + * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer + * modes are enabled or not. + * @dp_pdev: dp pdev handle. + * + * Return: bool + */ +static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev) +{ + if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable && + !pdev->mcopy_mode) + return true; + else + return false; +} + +#ifdef FEATURE_PERPKT_INFO +/* + * dp_enable_enhanced_stats()- API to enable enhanced statistcs + * @soc_handle: DP_SOC handle + * @pdev_id: id of DP_PDEV handle + * + * Return: QDF_STATUS + */ +static QDF_STATUS +dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id) +{ + struct dp_pdev *pdev = NULL; + QDF_STATUS status = QDF_STATUS_SUCCESS; + + pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + + if (!pdev) + return QDF_STATUS_E_FAILURE; + + if (pdev->enhanced_stats_en == 0) + dp_cal_client_timer_start(pdev->cal_client_ctx); + + pdev->enhanced_stats_en = 1; + + dp_mon_filter_setup_enhanced_stats(pdev); + status = dp_mon_filter_update(pdev); + if (status != QDF_STATUS_SUCCESS) { + dp_cdp_err("%pK: Failed to set enhanced mode filters", soc); + dp_mon_filter_reset_enhanced_stats(pdev); + dp_cal_client_timer_stop(pdev->cal_client_ctx); + pdev->enhanced_stats_en = 0; + return QDF_STATUS_E_FAILURE; + } + + if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) { + dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, + pdev->pdev_id); + } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) { + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_BPR_ENH, + pdev->pdev_id); + } + + return QDF_STATUS_SUCCESS; +} + +/* + * dp_disable_enhanced_stats()- API to disable enhanced statistcs + * + * @param soc - the soc handle + * @param pdev_id - pdev_id of pdev + * @return - QDF_STATUS + */ +static QDF_STATUS +dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id) +{ + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + + if (!pdev) + return QDF_STATUS_E_FAILURE; + + if (pdev->enhanced_stats_en == 1) + dp_cal_client_timer_stop(pdev->cal_client_ctx); + + pdev->enhanced_stats_en = 0; + + if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) { + dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id); + } else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) { + dp_h2t_cfg_stats_msg_send(pdev, + DP_PPDU_STATS_CFG_BPR, + pdev->pdev_id); + } + + dp_mon_filter_reset_enhanced_stats(pdev); + if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Failed to reset enhanced mode filters")); + } + + return QDF_STATUS_SUCCESS; +} +#endif /* FEATURE_PERPKT_INFO */ + +/** + * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering + * for pktlog + * @soc: cdp_soc handle + * @pdev_id: id of dp pdev handle + * @mac_addr: Peer mac address + * @enb_dsb: Enable or disable peer based filtering + * + * Return: QDF_STATUS + */ +static int +dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id, + uint8_t *mac_addr, uint8_t enb_dsb) +{ + struct dp_peer *peer; + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + + if (!pdev) + return QDF_STATUS_E_FAILURE; + + peer = dp_peer_find_hash_find((struct dp_soc *)soc, mac_addr, + 0, DP_VDEV_ALL, DP_MOD_ID_CDP); + + if (!peer) { + dp_err("Invalid Peer"); + return QDF_STATUS_E_FAILURE; + } + + peer->peer_based_pktlog_filter = enb_dsb; + pdev->dp_peer_based_pktlog = enb_dsb; + + dp_peer_unref_delete(peer, DP_MOD_ID_CDP); + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer + * @soc: DP_SOC handle + * @pdev_id: id of DP_PDEV handle + * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode + * @is_tx_pkt_cap_enable: enable/disable/delete/print + * Tx packet capture in monitor mode + * @peer_mac: MAC address for which the above need to be enabled/disabled + * + * Return: Success if Rx & Tx capture is enabled for peer, false otherwise + */ +QDF_STATUS +dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc, + uint8_t pdev_id, + bool is_rx_pkt_cap_enable, + uint8_t is_tx_pkt_cap_enable, + uint8_t *peer_mac) +{ + struct dp_peer *peer; + QDF_STATUS status; + struct dp_pdev *pdev = + dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc, + pdev_id); + if (!pdev) + return QDF_STATUS_E_FAILURE; + + peer = dp_peer_find_hash_find((struct dp_soc *)soc, + peer_mac, 0, DP_VDEV_ALL, + DP_MOD_ID_CDP); + if (!peer) + return QDF_STATUS_E_FAILURE; + + /* we need to set tx pkt capture for non associated peer */ + status = dp_peer_set_tx_capture_enabled(pdev, peer, + is_tx_pkt_cap_enable, + peer_mac); + + status = dp_peer_set_rx_capture_enabled(pdev, peer, + is_rx_pkt_cap_enable, + peer_mac); + dp_peer_unref_delete(peer, DP_MOD_ID_CDP); + + return status; +} + +/** + * dp_vdev_set_monitor_mode_rings () - set monitor mode rings + * + * Allocate SW descriptor pool, buffers, link descriptor memory + * Initialize monitor related SRNGs + * + * @pdev: DP pdev object + * + * Return: QDF_STATUS + */ +QDF_STATUS dp_vdev_set_monitor_mode_rings(struct dp_pdev *pdev, + uint8_t delayed_replenish) +{ + struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; + uint32_t mac_id; + uint32_t mac_for_pdev; + struct dp_soc *soc = pdev->soc; + QDF_STATUS status = QDF_STATUS_SUCCESS; + struct dp_srng *mon_buf_ring; + uint32_t num_entries; + + pdev_cfg_ctx = pdev->wlan_cfg_ctx; + + /* If monitor rings are aleady initilized, return from here */ + if (pdev->pdev_mon_init) + return QDF_STATUS_SUCCESS; + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id, + pdev->pdev_id); + + /* Allocate sw rx descriptor pool for mon RxDMA buffer ring */ + status = dp_rx_pdev_mon_buf_desc_pool_alloc(pdev, mac_for_pdev); + if (!QDF_IS_STATUS_SUCCESS(status)) { + dp_err("%s: dp_rx_pdev_mon_buf_desc_pool_alloc() failed\n", + __func__); + goto fail0; + } + + dp_rx_pdev_mon_buf_desc_pool_init(pdev, mac_for_pdev); + + /* If monitor buffers are already allocated, + * do not allocate. + */ + status = dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev, + delayed_replenish); + + mon_buf_ring = &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev]; + /* + * Configure low interrupt threshld when monitor mode is + * configured. + */ + if (mon_buf_ring->hal_srng) { + num_entries = mon_buf_ring->num_entries; + hal_set_low_threshold(mon_buf_ring->hal_srng, + num_entries >> 3); + htt_srng_setup(pdev->soc->htt_handle, + pdev->pdev_id, + mon_buf_ring->hal_srng, + RXDMA_MONITOR_BUF); + } + + /* Allocate link descriptors for the mon link descriptor ring */ + status = dp_hw_link_desc_pool_banks_alloc(soc, mac_for_pdev); + if (!QDF_IS_STATUS_SUCCESS(status)) { + dp_err("%s: dp_hw_link_desc_pool_banks_alloc() failed", + __func__); + goto fail0; + } + dp_link_desc_ring_replenish(soc, mac_for_pdev); + + htt_srng_setup(soc->htt_handle, pdev->pdev_id, + soc->rxdma_mon_desc_ring[mac_for_pdev].hal_srng, + RXDMA_MONITOR_DESC); + htt_srng_setup(soc->htt_handle, pdev->pdev_id, + soc->rxdma_mon_dst_ring[mac_for_pdev].hal_srng, + RXDMA_MONITOR_DST); + } + pdev->pdev_mon_init = 1; + + return QDF_STATUS_SUCCESS; + +fail0: + return QDF_STATUS_E_FAILURE; +} + QDF_STATUS dp_mon_soc_cfg_init(struct dp_soc *soc) { int target_type; @@ -704,7 +4661,6 @@ QDF_STATUS dp_mon_pdev_detach(struct dp_pdev *pdev) mon_init_err("pdev is NULL"); return QDF_STATUS_E_FAILURE; } - mon_pdev = pdev->monitor_pdev; dp_rx_pdev_mon_desc_pool_free(pdev); dp_mon_rings_free(pdev); @@ -797,6 +4753,65 @@ static struct dp_mon_ops monitor_ops = { .mon_pdev_init = dp_mon_pdev_init, .mon_pdev_deinit = dp_mon_pdev_deinit, .mon_config_debug_sniffer = dp_config_debug_sniffer, + .mon_flush_rings = dp_flush_monitor_rings, +#if !defined(DISABLE_MON_CONFIG) + .mon_htt_srng_setup = dp_mon_htt_srng_setup, +#endif +#if defined(DP_CON_MON) + .mon_service_rings = dp_service_mon_rings, +#endif +#ifndef DISABLE_MON_CONFIG + .mon_process = dp_mon_process, +#endif +#if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC) + .mon_drop_packets_for_mac = dp_mon_drop_packets_for_mac, +#endif + .mon_peer_tx_init = dp_peer_tx_init, + .mon_peer_tx_cleanup = dp_peer_tx_cleanup, +#ifdef WLAN_TX_PKT_CAPTURE_ENH + .mon_peer_tid_peer_id_update = dp_peer_tid_peer_id_update, + .mon_tx_ppdu_stats_attach = dp_tx_ppdu_stats_attach, + .mon_tx_ppdu_stats_detach = dp_tx_ppdu_stats_detach, + .mon_tx_capture_debugfs_init = dp_tx_capture_debugfs_init, + .mon_tx_add_to_comp_queue = dp_tx_add_to_comp_queue, + .mon_peer_tx_capture_filter_check = dp_peer_tx_capture_filter_check, +#endif +#if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG) + .mon_ppdu_stats_ind_handler = dp_ppdu_stats_ind_handler, +#endif + .mon_htt_ppdu_stats_attach = dp_htt_ppdu_stats_attach, + .mon_htt_ppdu_stats_detach = dp_htt_ppdu_stats_detach, + .mon_print_pdev_rx_mon_stats = dp_print_pdev_rx_mon_stats, +#ifdef WLAN_TX_PKT_CAPTURE_ENH + .mon_print_pdev_tx_capture_stats = dp_print_pdev_tx_capture_stats, + .mon_config_enh_tx_capture = dp_config_enh_tx_capture, +#endif +#ifdef WLAN_RX_PKT_CAPTURE_ENH + .mon_config_enh_rx_capture = dp_config_enh_rx_capture, +#endif + .mon_set_bpr_enable = dp_set_bpr_enable, +#ifdef ATH_SUPPORT_NAC + .mon_set_filter_neigh_peers = dp_set_filter_neigh_peers, +#endif +#ifdef WLAN_ATF_ENABLE + .mon_set_atf_stats_enable = dp_set_atf_stats_enable, +#endif + .mon_set_bsscolor = dp_mon_set_bsscolor, + .mon_pdev_get_filter_ucast_data = dp_pdev_get_filter_ucast_data, + .mon_pdev_get_filter_mcast_data = dp_pdev_get_filter_mcast_data, + .mon_pdev_get_filter_non_data = dp_pdev_get_filter_non_data, +#ifdef WDI_EVENT_ENABLE + .mon_set_pktlog_wifi3 = dp_set_pktlog_wifi3, +#endif +#if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG) + .mon_pktlogmod_exit = dp_pktlogmod_exit, +#endif + .mon_vdev_set_monitor_mode_buf_rings = + dp_vdev_set_monitor_mode_buf_rings, + .mon_neighbour_peers_detach = dp_neighbour_peers_detach, +#ifdef FEATURE_NAC_RSSI + .mon_filter_neighbour_peer = dp_filter_neighbour_peer, +#endif }; static struct cdp_mon_ops dp_ops_mon = { @@ -822,7 +4837,77 @@ static inline void dp_mon_cdp_ops_register(struct dp_soc *soc) } ops->mon_ops = &dp_ops_mon; +#if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) + ops->cfr_ops->txrx_cfr_filter = dp_cfr_filter; + ops->cfr_ops->txrx_enable_mon_reap_timer = dp_enable_mon_reap_timer; +#endif ops->cmn_drv_ops->txrx_set_monitor_mode = dp_vdev_set_monitor_mode; + ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev = + dp_get_mon_vdev_from_pdev_wifi3; +#ifdef DP_PEER_EXTENDED_API + ops->misc_ops->pkt_log_init = dp_pkt_log_init; + ops->misc_ops->pkt_log_con_service = dp_pkt_log_con_service; +#endif +#ifdef ATH_SUPPORT_NAC_RSSI + ops->ctrl_ops->txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi; + ops->ctrl_ops->txrx_vdev_get_neighbour_rssi = + dp_vdev_get_neighbour_rssi; +#endif +#if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) + ops->ctrl_ops->txrx_update_filter_neighbour_peers = + dp_update_filter_neighbour_peers; +#endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ + ops->ctrl_ops->enable_peer_based_pktlog = + dp_enable_peer_based_pktlog; +#if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH) + ops->ctrl_ops->txrx_update_peer_pkt_capture_params = + dp_peer_update_pkt_capture_params; +#endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */ +#ifdef FEATURE_PERPKT_INFO + ops->host_stats_ops->txrx_enable_enhanced_stats = + dp_enable_enhanced_stats; + ops->host_stats_ops->txrx_disable_enhanced_stats = + dp_disable_enhanced_stats; +#endif /* FEATURE_PERPKT_INFO */ + return; +} + +static inline void dp_mon_cdp_ops_deregister(struct dp_soc *soc) +{ + struct cdp_ops *ops = soc->cdp_soc.ops; + + if (!ops) { + mon_init_err("cdp_ops is NULL"); + return; + } + + ops->mon_ops = NULL; +#if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE) + ops->cfr_ops->txrx_cfr_filter = NULL; + ops->cfr_ops->txrx_enable_mon_reap_timer = NULL; +#endif + ops->cmn_drv_ops->txrx_set_monitor_mode = NULL; + ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev = NULL; +#ifdef DP_PEER_EXTENDED_API + ops->misc_ops->pkt_log_init = NULL; + ops->misc_ops->pkt_log_con_service = NULL; +#endif +#ifdef ATH_SUPPORT_NAC_RSSI + ops->ctrl_ops->txrx_vdev_config_for_nac_rssi = NULL; + ops->ctrl_ops->txrx_vdev_get_neighbour_rssi = NULL; +#endif +#if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC) + ops->ctrl_ops->txrx_update_filter_neighbour_peers = NULL; +#endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */ + ops->ctrl_ops->enable_peer_based_pktlog = NULL; +#if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH) + ops->ctrl_ops->txrx_update_peer_pkt_capture_params = NULL; +#endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */ +#ifdef FEATURE_PERPKT_INFO + ops->host_stats_ops->txrx_enable_enhanced_stats = NULL; + ops->host_stats_ops->txrx_disable_enhanced_stats = NULL; +#endif /* FEATURE_PERPKT_INFO */ + return; } QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc) diff --git a/dp/wifi3.0/monitor/dp_mon.h b/dp/wifi3.0/monitor/dp_mon.h index 9a08c6cd7a..6934d528fa 100644 --- a/dp/wifi3.0/monitor/dp_mon.h +++ b/dp/wifi3.0/monitor/dp_mon.h @@ -13,6 +13,7 @@ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#define mon_rx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX, params) struct dp_mon_ops { QDF_STATUS (*mon_soc_cfg_init)(struct dp_soc *soc); QDF_STATUS (*mon_pdev_attach)(struct dp_pdev *pdev); @@ -20,6 +21,85 @@ struct dp_mon_ops { QDF_STATUS (*mon_pdev_init)(struct dp_pdev *pdev); QDF_STATUS (*mon_pdev_deinit)(struct dp_pdev *pdev); QDF_STATUS (*mon_config_debug_sniffer)(struct dp_pdev *pdev, int val); + void (*mon_flush_rings)(struct dp_soc *soc); +#if !defined(DISABLE_MON_CONFIG) + QDF_STATUS (*mon_htt_srng_setup)(struct dp_soc *soc, + struct dp_pdev *pdev, + int mac_id, + int mac_for_pdev); +#endif +#if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC) + uint32_t (*mon_drop_packets_for_mac)(struct dp_pdev *pdev, + uint32_t mac_id, + uint32_t quota); +#endif +#if defined(DP_CON_MON) + void (*mon_service_rings)(struct dp_soc *soc, uint32_t quota); +#endif +#ifndef DISABLE_MON_CONFIG + uint32_t (*mon_process)(struct dp_soc *soc, + struct dp_intr *int_ctx, + uint32_t mac_id, + uint32_t quota); +#endif + void (*mon_peer_tx_init)(struct dp_pdev *pdev, struct dp_peer *peer); + void (*mon_peer_tx_cleanup)(struct dp_vdev *vdev, + struct dp_peer *peer); +#ifdef WLAN_TX_PKT_CAPTURE_ENH + void (*mon_peer_tid_peer_id_update)(struct dp_peer *peer, + uint16_t peer_id); + void (*mon_tx_ppdu_stats_attach)(struct dp_pdev *pdev); + void (*mon_tx_ppdu_stats_detach)(struct dp_pdev *pdev); + QDF_STATUS (*mon_tx_capture_debugfs_init)(struct dp_pdev *pdev); + void (*mon_peer_tx_capture_filter_check)(struct dp_pdev *pdev, + struct dp_peer *peer); + QDF_STATUS (*mon_tx_add_to_comp_queue)(struct dp_soc *soc, + struct dp_tx_desc_s *desc, + struct hal_tx_completion_status *ts, + struct dp_peer *peer); +#endif +#if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG) + bool (*mon_ppdu_stats_ind_handler)(struct htt_soc *soc, + uint32_t *msg_word, + qdf_nbuf_t htt_t2h_msg); +#endif + QDF_STATUS (*mon_htt_ppdu_stats_attach)(struct dp_pdev *pdev); + void (*mon_htt_ppdu_stats_detach)(struct dp_pdev *pdev); + void (*mon_print_pdev_rx_mon_stats)(struct dp_pdev *pdev); + +#ifdef WLAN_TX_PKT_CAPTURE_ENH + void (*mon_print_pdev_tx_capture_stats)(struct dp_pdev *pdev); + QDF_STATUS (*mon_config_enh_tx_capture)(struct dp_pdev *pdev, + uint8_t val); +#endif +#ifdef WLAN_RX_PKT_CAPTURE_ENH + QDF_STATUS (*mon_config_enh_rx_capture)(struct dp_pdev *pdev, + uint8_t val); +#endif + QDF_STATUS (*mon_set_bpr_enable)(struct dp_pdev *pdev, int val); +#ifdef ATH_SUPPORT_NAC + int (*mon_set_filter_neigh_peers)(struct dp_pdev *pdev, bool val); +#endif +#ifdef WLAN_ATF_ENABLE + void (*mon_set_atf_stats_enable)(struct dp_pdev *pdev, bool value); +#endif + void (*mon_set_bsscolor)(struct dp_pdev *pdev, uint8_t bsscolor); + bool (*mon_pdev_get_filter_ucast_data)(struct cdp_pdev *pdev_handle); + bool (*mon_pdev_get_filter_non_data)(struct cdp_pdev *pdev_handle); + bool (*mon_pdev_get_filter_mcast_data)(struct cdp_pdev *pdev_handle); +#ifdef WDI_EVENT_ENABLE + int (*mon_set_pktlog_wifi3)(struct dp_pdev *pdev, uint32_t event, + bool enable); +#endif +#if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG) + void (*mon_pktlogmod_exit)(struct dp_pdev *pdev); +#endif + void (*mon_vdev_set_monitor_mode_buf_rings)(struct dp_pdev *pdev); + void (*mon_neighbour_peers_detach)(struct dp_pdev *pdev); +#ifdef FEATURE_NAC_RSSI + QDF_STATUS (*mon_filter_neighbour_peer)(struct dp_pdev *pdev, + uint8_t *rx_pkt_hdr); +#endif }; struct dp_mon_soc { @@ -38,6 +118,212 @@ struct dp_mon_vdev { struct dp_mon_peer { }; +#ifdef FEATURE_PERPKT_INFO +void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf); +#else +static inline +void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf) +{ +} +#endif + +#ifndef WLAN_TX_PKT_CAPTURE_ENH +/** + * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID + * @peer: Datapath peer + * + */ +static inline void dp_peer_tid_queue_init(struct dp_peer *peer) +{ +} + +/** + * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID + * @peer: Datapath peer + * + */ +static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer) +{ +} + +/** + * dp_peer_update_80211_hdr() – dp peer update 80211 hdr + * @vdev: Datapath vdev + * @peer: Datapath peer + * + */ +static inline void +dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer) +{ +} + +/** + * dp_tx_ppdu_stats_attach - Initialize Tx PPDU stats and enhanced capture + * @pdev: DP PDEV + * + * Return: none + */ +static inline void dp_tx_ppdu_stats_attach(struct dp_pdev *pdev) +{ +} + +/** + * dp_tx_ppdu_stats_detach - Cleanup Tx PPDU stats and enhanced capture + * @pdev: DP PDEV + * + * Return: none + */ +static inline void dp_tx_ppdu_stats_detach(struct dp_pdev *pdev) +{ +} + +/** + * dp_tx_add_to_comp_queue() - add completion msdu to queue + * @soc: DP Soc handle + * @tx_desc: software Tx descriptor + * @ts : Tx completion status from HAL/HTT descriptor + * @peer: DP peer + * + * Return: none + */ +static inline +QDF_STATUS dp_tx_add_to_comp_queue(struct dp_soc *soc, + struct dp_tx_desc_s *desc, + struct hal_tx_completion_status *ts, + struct dp_peer *peer) +{ + return QDF_STATUS_E_FAILURE; +} + +/* + * dp_peer_tx_capture_filter_check: check filter is enable for the filter + * and update tx_cap_enabled flag + * @pdev: DP PDEV handle + * @peer: DP PEER handle + * + * return: void + */ +static inline +void dp_peer_tx_capture_filter_check(struct dp_pdev *pdev, + struct dp_peer *peer) +{ +} + +/* + * dp_tx_capture_debugfs_init: tx capture debugfs init + * @pdev: DP PDEV handle + * + * return: QDF_STATUS + */ +static inline +QDF_STATUS dp_tx_capture_debugfs_init(struct dp_pdev *pdev) +{ + return QDF_STATUS_E_FAILURE; +} +#endif + +#ifdef WLAN_TX_PKT_CAPTURE_ENH +extern uint8_t +dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX]; +#endif + +#ifdef WDI_EVENT_ENABLE +void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn); +#else +static inline void +dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn) +{ +} +#endif + +#ifdef WDI_EVENT_ENABLE +QDF_STATUS dp_peer_stats_notify(struct dp_pdev *pdev, struct dp_peer *peer); +#else +static inline QDF_STATUS dp_peer_stats_notify(struct dp_pdev *pdev, + struct dp_peer *peer) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG) +static void dp_pktlogmod_exit(struct dp_pdev *handle); +#else +static void dp_pktlogmod_exit(struct dp_pdev *handle) { } +#endif + +#ifndef WLAN_TX_PKT_CAPTURE_ENH +/** + * dp_tx_ppdu_stats_process - Deferred PPDU stats handler + * @context: Opaque work context (PDEV) + * + * Return: none + */ +static inline void dp_tx_ppdu_stats_process(void *context) +{ +} + +/* + * dp_tx_capture_htt_frame_counter: increment counter for htt_frame_type + * pdev: DP pdev handle + * htt_frame_type: htt frame type received from fw + * + * return: void + */ +static inline +void dp_tx_capture_htt_frame_counter(struct dp_pdev *pdev, + uint32_t htt_frame_type) +{ +} + +/* + * dp_tx_cature_stats: print tx capture stats + * @pdev: DP PDEV handle + * + * return: void + */ +static inline +void dp_print_pdev_tx_capture_stats(struct dp_pdev *pdev) +{ +} +#endif + +/* + * dp_is_enable_reap_timer_non_pkt() - check if mon reap timer is + * enabled by non-pkt log or not + * @pdev: point to dp pdev + * + * Return: true if mon reap timer is enabled by non-pkt log + */ +static inline bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev) +{ + if (!pdev) { + dp_err("null pdev"); + return false; + } + + return pdev->enable_reap_timer_non_pkt; +} + +#ifdef FEATURE_NAC_RSSI +static inline QDF_STATUS monitor_drop_inv_peer_pkts(struct dp_vdev *vdev, + struct ieee80211_frame *wh) +{ + struct dp_pdev *pdev = vdev->pdev; + struct dp_soc *soc = pdev->soc; + + if (!soc->hw_nac_monitor_support && + pdev->filter_neighbour_peers && + vdev->opmode == wlan_op_mode_sta) { + mon_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm", + soc, wh->i_addr1); + return QDF_STATUS_SUCCESS; + } + + return QDF_STATUS_E_FAILURE; +} +#endif + static inline QDF_STATUS monitor_pdev_attach(struct dp_pdev *pdev) { struct dp_mon_ops *monitor_ops; @@ -166,3 +452,772 @@ static inline QDF_STATUS monitor_config_debug_sniffer(struct dp_pdev *pdev, return monitor_ops->mon_config_debug_sniffer(pdev, val); } + +static inline void monitor_flush_rings(struct dp_soc *soc) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_flush_rings) { + qdf_err("callback not registered"); + return; + } + + return monitor_ops->mon_flush_rings(soc); +} + +#if !defined(DISABLE_MON_CONFIG) +static inline QDF_STATUS monitor_htt_srng_setup(struct dp_soc *soc, + struct dp_pdev *pdev, + int mac_id, + int mac_for_pdev) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return QDF_STATUS_SUCCESS; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_htt_srng_setup) { + qdf_err("callback not registered"); + return QDF_STATUS_E_FAILURE; + } + + return monitor_ops->mon_htt_srng_setup(soc, pdev, mac_id, + mac_for_pdev); +} +#else +static inline QDF_STATUS monitor_htt_srng_setup(struct dp_soc *soc, + struct dp_pdev *pdev, + int mac_id, + int mac_for_pdev) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#if defined(DP_CON_MON) +static inline void monitor_service_mon_rings(struct dp_soc *soc, uint32_t quota) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_service_rings) { + qdf_err("callback not registered"); + return; + } + + return monitor_ops->mon_service_rings(soc, quota); +} +#endif + +#ifndef DISABLE_MON_CONFIG +static inline +uint32_t monitor_process(struct dp_soc *soc, struct dp_intr *int_ctx, + uint32_t mac_id, uint32_t quota) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return 0; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_process) { + qdf_err("callback not registered"); + return 0; + } + + return monitor_ops->mon_process(soc, int_ctx, mac_id, quota); +} +#else +static inline +uint32_t monitor_process(struct dp_soc *soc, struct dp_intr *int_ctx, + uint32_t mac_id, uint32_t quota) +{ + return 0; +} +#endif + +#if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC) +static inline +uint32_t monitor_drop_packets_for_mac(struct dp_pdev *pdev, + uint32_t mac_id, uint32_t quota) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return 0; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_drop_packets_for_mac) { + qdf_err("callback not registered"); + return 0; + } + + return monitor_ops->mon_drop_packets_for_mac(pdev, + mac_id, quota); +} +#else +static inline +uint32_t monitor_drop_packets_for_mac(struct dp_pdev *pdev, + uint32_t mac_id, uint32_t quota) +{ + return 0; +} +#endif + +static inline void monitor_peer_tx_init(struct dp_pdev *pdev, + struct dp_peer *peer) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_peer_tx_init) { + qdf_err("callback not registered"); + return; + } + + return monitor_ops->mon_peer_tx_init(pdev, peer); +} + +static inline void monitor_peer_tx_cleanup(struct dp_vdev *vdev, + struct dp_peer *peer) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = vdev->pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_peer_tx_cleanup) { + qdf_err("callback not registered"); + return; + } + + return monitor_ops->mon_peer_tx_cleanup(vdev, peer); +} + +#ifdef WLAN_TX_PKT_CAPTURE_ENH +static inline +void monitor_peer_tid_peer_id_update(struct dp_soc *soc, + struct dp_peer *peer, + uint16_t peer_id) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_peer_tid_peer_id_update) { + qdf_err("callback not registered"); + return; + } + + return monitor_ops->mon_peer_tid_peer_id_update(peer, peer_id); +} + +static inline void monitor_tx_ppdu_stats_attach(struct dp_pdev *pdev) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_tx_ppdu_stats_attach) { + qdf_err("callback not registered"); + return; + } + + return monitor_ops->mon_tx_ppdu_stats_attach(pdev); +} + +static inline void monitor_tx_ppdu_stats_detach(struct dp_pdev *pdev) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_tx_ppdu_stats_detach) { + qdf_err("callback not registered"); + return; + } + + return monitor_ops->mon_tx_ppdu_stats_detach(pdev); +} + +static inline QDF_STATUS monitor_tx_capture_debugfs_init(struct dp_pdev *pdev) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return QDF_STATUS_E_FAILURE; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_tx_capture_debugfs_init) { + qdf_err("callback not registered"); + return QDF_STATUS_E_FAILURE; + } + + return monitor_ops->mon_tx_capture_debugfs_init(pdev); +} + +static inline void monitor_peer_tx_capture_filter_check(struct dp_pdev *pdev, + struct dp_peer *peer) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_peer_tx_capture_filter_check) { + qdf_err("callback not registered"); + return; + } + + return monitor_ops->mon_peer_tx_capture_filter_check(pdev, peer); +} + +static inline +QDF_STATUS monitor_tx_add_to_comp_queue(struct dp_soc *soc, + struct dp_tx_desc_s *desc, + struct hal_tx_completion_status *ts, + struct dp_peer *peer) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return QDF_STATUS_SUCCESS; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_tx_add_to_comp_queue) { + qdf_err("callback not registered"); + return QDF_STATUS_E_FAILURE; + } + + return monitor_ops->mon_tx_add_to_comp_queue(soc, desc, ts, peer); +} + +#else +static inline void monitor_peer_tid_peer_id_update(struct dp_soc *soc, + struct dp_peer *peer, + uint16_t peer_id) +{ +} + +static inline void monitor_tx_ppdu_stats_attach(struct dp_pdev *pdev) +{ +} + +static inline void monitor_tx_ppdu_stats_detach(struct dp_pdev *pdev) +{ +} + +static inline QDF_STATUS monitor_tx_capture_debugfs_init(struct dp_pdev *pdev) +{ + return QDF_STATUS_E_FAILURE; +} + +static inline void monitor_peer_tx_capture_filter_check(struct dp_pdev *pdev, + struct dp_peer *peer) +{ +} + +static inline +QDF_STATUS monitor_tx_add_to_comp_queue(struct dp_soc *soc, + struct dp_tx_desc_s *desc, + struct hal_tx_completion_status *ts, + struct dp_peer *peer) +{ + return QDF_STATUS_E_FAILURE; +} + +#endif + +#if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG) +static inline bool monitor_ppdu_stats_ind_handler(struct htt_soc *soc, + uint32_t *msg_word, + qdf_nbuf_t htt_t2h_msg) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = soc->dp_soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return true; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_ppdu_stats_ind_handler) { + qdf_err("callback not registered"); + return true; + } + + return monitor_ops->mon_ppdu_stats_ind_handler(soc, msg_word, + htt_t2h_msg); +} +#else +static inline bool monitor_ppdu_stats_ind_handler(struct htt_soc *soc, + uint32_t *msg_word, + qdf_nbuf_t htt_t2h_msg) +{ + return true; +} +#endif + +static inline QDF_STATUS monitor_htt_ppdu_stats_attach(struct dp_pdev *pdev) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return QDF_STATUS_SUCCESS; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_htt_ppdu_stats_attach) { + qdf_err("callback not registered"); + return QDF_STATUS_E_FAILURE; + } + + return monitor_ops->mon_htt_ppdu_stats_attach(pdev); +} + +static inline void monitor_htt_ppdu_stats_detach(struct dp_pdev *pdev) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_htt_ppdu_stats_detach) { + qdf_err("callback not registered"); + return; + } + + return monitor_ops->mon_htt_ppdu_stats_detach(pdev); +} + +static inline void monitor_print_pdev_rx_mon_stats(struct dp_pdev *pdev) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_print_pdev_rx_mon_stats) { + qdf_err("callback not registered"); + return; + } + + return monitor_ops->mon_print_pdev_rx_mon_stats(pdev); +} + +#ifdef WLAN_TX_PKT_CAPTURE_ENH +static inline void monitor_print_pdev_tx_capture_stats(struct dp_pdev *pdev) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_print_pdev_tx_capture_stats) { + qdf_err("callback not registered"); + return; + } + + return monitor_ops->mon_print_pdev_tx_capture_stats(pdev); +} + +static inline QDF_STATUS monitor_config_enh_tx_capture(struct dp_pdev *pdev, + uint32_t val) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return QDF_STATUS_E_FAILURE; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_config_enh_tx_capture) { + qdf_err("callback not registered"); + return QDF_STATUS_E_FAILURE; + } + + return monitor_ops->mon_config_enh_tx_capture(pdev, val); +} +#else +static inline void monitor_print_pdev_tx_capture_stats(struct dp_pdev *pdev) +{ +} + +static inline QDF_STATUS monitor_config_enh_tx_capture(struct dp_pdev *pdev, + uint32_t val) +{ + return QDF_STATUS_E_INVAL; +} +#endif + +#ifdef WLAN_RX_PKT_CAPTURE_ENH +static inline QDF_STATUS monitor_config_enh_rx_capture(struct dp_pdev *pdev, + uint32_t val) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return QDF_STATUS_E_FAILURE; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_config_enh_rx_capture) { + qdf_err("callback not registered"); + return QDF_STATUS_E_FAILURE; + } + + return monitor_ops->mon_config_enh_rx_capture(pdev, val); +} +#else +static inline QDF_STATUS monitor_config_enh_rx_capture(struct dp_pdev *pdev, + uint32_t val) +{ + return QDF_STATUS_E_INVAL; +} +#endif + +static inline QDF_STATUS monitor_set_bpr_enable(struct dp_pdev *pdev, + uint32_t val) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return QDF_STATUS_E_FAILURE; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_set_bpr_enable) { + qdf_err("callback not registered"); + return QDF_STATUS_E_FAILURE; + } + + return monitor_ops->mon_set_bpr_enable(pdev, val); +} + +#ifdef ATH_SUPPORT_NAC +static inline int monitor_set_filter_neigh_peers(struct dp_pdev *pdev, bool val) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return 0; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_set_filter_neigh_peers) { + qdf_err("callback not registered"); + return 0; + } + + return monitor_ops->mon_set_filter_neigh_peers(pdev, val); +} +#else +static inline int monitor_set_filter_neigh_peers(struct dp_pdev *pdev, bool val) +{ + return 0; +} +#endif + +#ifdef WLAN_ATF_ENABLE +static inline +void monitor_set_atf_stats_enable(struct dp_pdev *pdev, bool value) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_set_atf_stats_enable) { + qdf_err("callback not registered"); + return; + } + + return monitor_ops->mon_set_atf_stats_enable(pdev, value); +} +#else +static inline +void monitor_set_atf_stats_enable(struct dp_pdev *pdev, bool value) +{ +} +#endif + +static inline +void monitor_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_set_bsscolor) { + qdf_err("callback not registered"); + return; + } + + return monitor_ops->mon_set_bsscolor(pdev, bsscolor); +} + +static inline +bool monitor_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle) +{ + struct dp_mon_ops *monitor_ops; + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return false; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_pdev_get_filter_mcast_data) { + qdf_err("callback not registered"); + return false; + } + + return monitor_ops->mon_pdev_get_filter_mcast_data(pdev_handle); +} + +static inline +bool monitor_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle) +{ + struct dp_mon_ops *monitor_ops; + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return false; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_pdev_get_filter_non_data) { + qdf_err("callback not registered"); + return false; + } + + return monitor_ops->mon_pdev_get_filter_non_data(pdev_handle); +} + +static inline +bool monitor_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle) +{ + struct dp_mon_ops *monitor_ops; + struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return false; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_pdev_get_filter_ucast_data) { + qdf_err("callback not registered"); + return false; + } + + return monitor_ops->mon_pdev_get_filter_ucast_data(pdev_handle); +} + +#ifdef WDI_EVENT_ENABLE +static inline +int monitor_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, bool enable) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return 0; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_set_pktlog_wifi3) { + qdf_err("callback not registered"); + return 0; + } + + return monitor_ops->mon_set_pktlog_wifi3(pdev, event, enable); +} +#else +static inline int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event, + bool enable) +{ + return 0; +} +#endif + +#if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG) +static inline void monitor_pktlogmod_exit(struct dp_pdev *pdev) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_pktlogmod_exit) { + qdf_err("callback not registered"); + return; + } + + return monitor_ops->mon_pktlogmod_exit(pdev); +} +#else +static inline void monitor_pktlogmod_exit(struct dp_pdev *pdev) {} +#endif + +static inline +void monitor_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_vdev_set_monitor_mode_buf_rings) { + qdf_err("callback not registered"); + return; + } + + return monitor_ops->mon_vdev_set_monitor_mode_buf_rings(pdev); +} + +static inline +void monitor_neighbour_peers_detach(struct dp_pdev *pdev) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_neighbour_peers_detach) { + qdf_err("callback not registered"); + return; + } + + return monitor_ops->mon_neighbour_peers_detach(pdev); +} + +#ifdef FEATURE_NAC_RSSI +static inline QDF_STATUS monitor_filter_neighbour_peer(struct dp_pdev *pdev, + uint8_t *rx_pkt_hdr) +{ + struct dp_mon_ops *monitor_ops; + struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc; + + if (!mon_soc) { + qdf_err("monitor soc is NULL"); + return QDF_STATUS_E_FAILURE; + } + + monitor_ops = mon_soc->mon_ops; + if (!monitor_ops || !monitor_ops->mon_filter_neighbour_peer) { + qdf_err("callback not registered"); + return QDF_STATUS_E_FAILURE; + } + + return monitor_ops->mon_filter_neighbour_peer(pdev, rx_pkt_hdr); +} +#endif diff --git a/dp/wifi3.0/monitor/dp_rx_mon_status.c b/dp/wifi3.0/monitor/dp_rx_mon_status.c index 43a44749e6..c1756ddc82 100644 --- a/dp/wifi3.0/monitor/dp_rx_mon_status.c +++ b/dp/wifi3.0/monitor/dp_rx_mon_status.c @@ -27,6 +27,8 @@ #include "dp_rx_mon.h" #include "dp_internal.h" #include "qdf_mem.h" /* qdf_mem_malloc,free */ +#include "dp_htt.h" +#include "dp_mon.h" #include "htt.h" diff --git a/wlan_cfg/wlan_cfg.c b/wlan_cfg/wlan_cfg.c index c2c84c1123..170d6952a1 100644 --- a/wlan_cfg/wlan_cfg.c +++ b/wlan_cfg/wlan_cfg.c @@ -1682,22 +1682,30 @@ int wlan_cfg_get_dma_mon_buf_ring_size(struct wlan_cfg_dp_pdev_ctxt *cfg) return cfg->dma_mon_buf_ring_size; } +qdf_export_symbol(wlan_cfg_get_dma_mon_buf_ring_size); + int wlan_cfg_get_dma_mon_dest_ring_size(struct wlan_cfg_dp_pdev_ctxt *cfg) { return cfg->dma_mon_dest_ring_size; } +qdf_export_symbol(wlan_cfg_get_dma_mon_dest_ring_size); + int wlan_cfg_get_dma_mon_stat_ring_size(struct wlan_cfg_dp_pdev_ctxt *cfg) { return cfg->dma_mon_status_ring_size; } +qdf_export_symbol(wlan_cfg_get_dma_mon_stat_ring_size); + int wlan_cfg_get_dma_mon_desc_ring_size(struct wlan_cfg_dp_pdev_ctxt *cfg) { return cfg->rxdma_monitor_desc_ring; } +qdf_export_symbol(wlan_cfg_get_dma_mon_desc_ring_size); + int wlan_cfg_get_rx_dma_buf_ring_size(struct wlan_cfg_dp_pdev_ctxt *cfg) { return cfg->rx_dma_buf_ring_size;