qcacmn: Initialization of stats in txrx_peer and monitor_peer

* Initialize stats in dp_txrx_peer and dp_monitor_peer as per
  the stats re-organization.
* Update peer calibrated stats on every timer expiry of 1 second.
* Define APIs to reset monitor_peer stats and to fetch rdkstats_ctx
  from monitor peer.
* Place check to make sure that monitor peer related APIs are
  are called only for legacy peer or MLO Link peer.
* Update peer based pktlog flag, tx and rx capture feature flags
  in monitor_peer.

Change-Id: Idadbe19e85d4a9fdf77fb6dabe0c8a4952b024b8
CRs-Fixed: 3092123
This commit is contained in:
Harsh Kumar Bijlani
2021-12-21 01:38:57 +05:30
committed by Madan Koyyalamudi
parent 148f220408
commit eee813ad02
10 changed files with 393 additions and 103 deletions

View File

@@ -1,5 +1,6 @@
/*
* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -177,3 +178,50 @@ void dp_cal_client_update_peer_stats(struct cdp_peer_stats *peer_stats)
qdf_export_symbol(dp_cal_client_update_peer_stats);
void dp_cal_client_update_peer_stats_wifi3(struct cdp_calibr_stats_intf *peer_stats_intf,
struct cdp_calibr_stats *peer_calibr_stats)
{
uint32_t temp_rx_bytes = peer_stats_intf->to_stack.bytes;
uint32_t temp_rx_data = peer_stats_intf->to_stack.num;
uint32_t temp_tx_bytes = peer_stats_intf->tx_success.bytes;
uint32_t temp_tx_data = peer_stats_intf->tx_success.num;
uint32_t temp_tx_ucast_pkts = peer_stats_intf->tx_ucast.num;
peer_calibr_stats->rx.rx_byte_rate = temp_rx_bytes -
peer_calibr_stats->rx.rx_bytes_success_last;
peer_calibr_stats->rx.rx_data_rate = temp_rx_data -
peer_calibr_stats->rx.rx_data_success_last;
peer_calibr_stats->tx.tx_byte_rate = temp_tx_bytes -
peer_calibr_stats->tx.tx_bytes_success_last;
peer_calibr_stats->tx.tx_data_rate = temp_tx_data -
peer_calibr_stats->tx.tx_data_success_last;
peer_calibr_stats->tx.tx_data_ucast_rate = temp_tx_ucast_pkts -
peer_calibr_stats->tx.tx_data_ucast_last;
/* Check tx and rx packets in last one second, and increment
* inactive time for peer
*/
if (peer_calibr_stats->tx.tx_data_rate || peer_calibr_stats->rx.rx_data_rate)
peer_calibr_stats->tx.inactive_time = 0;
else
peer_calibr_stats->tx.inactive_time++;
peer_calibr_stats->rx.rx_bytes_success_last = temp_rx_bytes;
peer_calibr_stats->rx.rx_data_success_last = temp_rx_data;
peer_calibr_stats->tx.tx_bytes_success_last = temp_tx_bytes;
peer_calibr_stats->tx.tx_data_success_last = temp_tx_data;
peer_calibr_stats->tx.tx_data_ucast_last = temp_tx_ucast_pkts;
if (peer_calibr_stats->tx.tx_data_ucast_rate) {
if (peer_calibr_stats->tx.tx_data_ucast_rate >
peer_calibr_stats->tx.tx_data_rate)
peer_calibr_stats->tx.last_per =
((peer_calibr_stats->tx.tx_data_ucast_rate -
peer_calibr_stats->tx.tx_data_rate) * 100) /
peer_calibr_stats->tx.tx_data_ucast_rate;
else
peer_calibr_stats->tx.last_per = 0;
}
}
qdf_export_symbol(dp_cal_client_update_peer_stats_wifi3);

View File

@@ -1,5 +1,6 @@
/*
* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -45,6 +46,8 @@ void dp_cal_client_timer_start(void *ctx);
void dp_cal_client_timer_stop(void *ctx);
void dp_cal_client_stats_timer_fn(void *pdev_hdl);
void dp_cal_client_update_peer_stats(struct cdp_peer_stats *peer_stats);
void dp_cal_client_update_peer_stats_wifi3(struct cdp_calibr_stats_intf *stats_intf,
struct cdp_calibr_stats *calibr_stats);
#ifndef ATH_SUPPORT_EXT_STAT
void dp_cal_client_attach(struct cdp_cal_client **cal_client_ctx,
@@ -72,6 +75,11 @@ void dp_cal_client_stats_timer_fn(void *pdev_hdl)
void dp_cal_client_update_peer_stats(struct cdp_peer_stats *peer_stats)
{
}
void dp_cal_client_update_peer_stats_wifi3(struct cdp_calibr_stats_intf *stats_intf,
struct cdp_calibr_stats *calibr_stats)
{
}
#endif
#endif /*_DP_CAL_CLIENT_H_*/

View File

@@ -230,6 +230,17 @@ static inline QDF_STATUS dp_monitor_peer_detach(struct dp_soc *soc,
return QDF_STATUS_E_FAILURE;
}
static inline struct cdp_peer_rate_stats_ctx*
dp_monitor_peer_get_rdkstats_ctx(struct dp_soc *soc, struct dp_peer *peer)
{
return NULL;
}
static inline
void dp_monitor_peer_reset_stats(struct dp_soc *soc, struct dp_peer *peer)
{
}
static inline QDF_STATUS dp_monitor_pdev_init(struct dp_pdev *pdev)
{
return QDF_STATUS_SUCCESS;

View File

@@ -5369,9 +5369,6 @@ static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
qdf_spinlock_destroy(&pdev->tx_mutex);
qdf_spinlock_destroy(&pdev->vdev_list_lock);
if (pdev->invalid_peer)
qdf_mem_free(pdev->invalid_peer);
dp_monitor_pdev_deinit(pdev);
dp_pdev_srng_deinit(pdev);
@@ -6848,6 +6845,8 @@ static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
/* initialize the peer_id */
txrx_peer->vdev = peer->vdev;
DP_STATS_INIT(txrx_peer);
dp_wds_ext_peer_init(txrx_peer);
dp_peer_rx_bufq_resources_init(txrx_peer);
dp_peer_hw_txrx_stats_init(soc, txrx_peer);
@@ -6868,6 +6867,21 @@ static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
return QDF_STATUS_SUCCESS;
}
static inline
void dp_txrx_peer_stats_clr(struct dp_txrx_peer *txrx_peer)
{
if (!txrx_peer)
return;
txrx_peer->tx_failed = 0;
txrx_peer->comp_pkt.num = 0;
txrx_peer->comp_pkt.bytes = 0;
txrx_peer->to_stack.num = 0;
txrx_peer->to_stack.bytes = 0;
DP_STATS_CLR(txrx_peer);
}
/*
* dp_peer_create_wifi3() - attach txrx peer
* @soc_hdl: Datapath soc handle
@@ -6885,7 +6899,6 @@ dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
int i;
struct dp_soc *soc = (struct dp_soc *)soc_hdl;
struct dp_pdev *pdev;
struct cdp_peer_cookie peer_cookie;
enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
struct dp_vdev *vdev = NULL;
@@ -6934,17 +6947,20 @@ dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
qdf_spinlock_create(&peer->peer_info_lock);
DP_STATS_INIT(peer);
DP_STATS_UPD(peer, rx.avg_snr, CDP_INVALID_SNR);
/*
* In tx_monitor mode, filter may be set for unassociated peer
* when unassociated peer get associated peer need to
* update tx_cap_enabled flag to support peer filter.
*/
if (!IS_MLO_DP_MLD_PEER(peer)) {
dp_monitor_peer_tx_capture_filter_check(pdev, peer);
dp_monitor_peer_reset_stats(soc, peer);
}
if (peer->txrx_peer) {
dp_peer_rx_bufq_resources_init(peer->txrx_peer);
dp_txrx_peer_stats_clr(peer->txrx_peer);
dp_set_peer_isolation(peer->txrx_peer, false);
dp_wds_ext_peer_init(peer->txrx_peer);
dp_peer_hw_txrx_stats_init(soc, peer->txrx_peer);
@@ -7062,27 +7078,6 @@ dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
peer->valid = 1;
dp_local_peer_id_alloc(pdev, peer);
DP_STATS_INIT(peer);
DP_STATS_UPD(peer, rx.avg_snr, CDP_INVALID_SNR);
qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
QDF_MAC_ADDR_SIZE);
peer_cookie.ctx = NULL;
peer_cookie.pdev_id = pdev->pdev_id;
peer_cookie.cookie = pdev->next_peer_cookie++;
#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, pdev->soc,
(void *)&peer_cookie,
peer->peer_id, WDI_NO_VAL, pdev->pdev_id);
#endif
if (soc->rdkstats_enabled) {
if (!peer_cookie.ctx) {
pdev->next_peer_cookie--;
qdf_err("Failed to initialize peer rate stats");
} else {
peer->rdkstats_ctx = (struct cdp_peer_rate_stats_ctx *)
peer_cookie.ctx;
}
}
dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
@@ -7408,6 +7403,7 @@ dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
* which is REO2TCL ring. for this reason we should
* not setup reo_queues and default route for bss_peer.
*/
if (!IS_MLO_DP_MLD_PEER(peer))
dp_monitor_peer_tx_init(pdev, peer);
if (!setup_info)
@@ -7458,6 +7454,7 @@ dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
}
}
if (!IS_MLO_DP_MLD_PEER(peer))
dp_peer_ppdu_delayed_ba_init(peer);
fail:
@@ -7998,7 +7995,6 @@ void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
struct dp_pdev *pdev = vdev->pdev;
struct dp_soc *soc = pdev->soc;
uint16_t peer_id;
struct cdp_peer_cookie peer_cookie;
struct dp_peer *tmp_peer;
bool found = false;
@@ -8027,21 +8023,6 @@ void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
dp_peer_debug("Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer,
QDF_MAC_ADDR_REF(peer->mac_addr.raw));
/* send peer destroy event to upper layer */
qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
QDF_MAC_ADDR_SIZE);
peer_cookie.ctx = NULL;
peer_cookie.ctx = (struct cdp_stats_cookie *)
peer->rdkstats_ctx;
#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
soc,
(void *)&peer_cookie,
peer->peer_id,
WDI_NO_VAL,
pdev->pdev_id);
#endif
peer->rdkstats_ctx = NULL;
wlan_minidump_remove(peer, sizeof(*peer), soc->ctrl_psoc,
WLAN_MD_DP_PEER, "dp_peer");
@@ -8063,6 +8044,8 @@ void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
/* cleanup the peer data */
dp_peer_cleanup(vdev, peer);
if (!IS_MLO_DP_MLD_PEER(peer))
dp_monitor_peer_detach(soc, peer);
qdf_spinlock_destroy(&peer->peer_state_lock);
@@ -9492,6 +9475,11 @@ static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
if (qdf_unlikely(!peer))
return;
if (qdf_unlikely(!peer->txrx_peer)) {
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
return;
}
if (qdf_likely(peer->txrx_peer->delay_stats)) {
delay_stats = peer->txrx_peer->delay_stats;
ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
@@ -11499,7 +11487,7 @@ dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer,
dp_wdi_event_handler(
WDI_EVENT_FLUSH_RATE_STATS_REQ,
soc, peer->rdkstats_ctx,
soc, dp_monitor_peer_get_rdkstats_ctx(soc, peer),
peer->peer_id,
WDI_NO_VAL, peer->vdev->pdev->pdev_id);
}
@@ -11550,7 +11538,9 @@ static void *dp_peer_get_rdkstats_ctx(struct cdp_soc_t *soc_hdl,
if (!peer)
return NULL;
rdkstats_ctx = peer->rdkstats_ctx;
if (!IS_MLO_DP_MLD_PEER(peer))
rdkstats_ctx = dp_monitor_peer_get_rdkstats_ctx(soc,
peer);
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
}
@@ -14624,16 +14614,6 @@ static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
soc->tcl_cmd_credit_ring.hal_srng);
dp_tx_pdev_init(pdev);
/*
* Variable to prevent double pdev deinitialization during
* radio detach execution .i.e. in the absence of any vdev.
*/
pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
if (!pdev->invalid_peer) {
dp_init_err("%pK: Invalid peer memory allocation failed", soc);
goto fail2;
}
/*
* set nss pdev config based on soc config
@@ -14682,7 +14662,7 @@ static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
if (!pdev->sojourn_buf) {
dp_init_err("%pK: Failed to allocate sojourn buf", soc);
goto fail3;
goto fail2;
}
sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
@@ -14693,18 +14673,18 @@ static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
if (dp_rxdma_ring_setup(soc, pdev)) {
dp_init_err("%pK: RXDMA ring config failed", soc);
goto fail4;
goto fail3;
}
if (dp_init_ipa_rx_refill_buf_ring(soc, pdev))
goto fail4;
goto fail3;
if (dp_ipa_ring_resource_setup(soc, pdev))
goto fail5;
goto fail4;
if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
dp_init_err("%pK: dp_ipa_uc_attach failed", soc);
goto fail5;
goto fail4;
}
ret = dp_rx_fst_attach(soc, pdev);
@@ -14712,18 +14692,18 @@ static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
(ret != QDF_STATUS_E_NOSUPPORT)) {
dp_init_err("%pK: RX Flow Search Table attach failed: pdev %d err %d",
soc, pdev_id, ret);
goto fail6;
goto fail5;
}
if (dp_pdev_bkp_stats_attach(pdev) != QDF_STATUS_SUCCESS) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
FL("dp_pdev_bkp_stats_attach failed"));
goto fail7;
goto fail6;
}
if (dp_monitor_pdev_init(pdev)) {
dp_init_err("%pK: dp_monitor_pdev_init failed\n", soc);
goto fail8;
goto fail7;
}
/* initialize sw rx descriptors */
@@ -14739,22 +14719,20 @@ static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
qdf_skb_total_mem_stats_read());
return QDF_STATUS_SUCCESS;
fail8:
dp_pdev_bkp_stats_detach(pdev);
fail7:
dp_rx_fst_detach(soc, pdev);
dp_pdev_bkp_stats_detach(pdev);
fail6:
dp_ipa_uc_detach(soc, pdev);
dp_rx_fst_detach(soc, pdev);
fail5:
dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
dp_ipa_uc_detach(soc, pdev);
fail4:
dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
fail3:
dp_rxdma_ring_cleanup(soc, pdev);
qdf_nbuf_free(pdev->sojourn_buf);
fail3:
fail2:
qdf_spinlock_destroy(&pdev->tx_mutex);
qdf_spinlock_destroy(&pdev->vdev_list_lock);
qdf_mem_free(pdev->invalid_peer);
fail2:
dp_pdev_srng_deinit(pdev);
fail1:
dp_wdi_event_detach(pdev);

View File

@@ -2541,6 +2541,7 @@ static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
}
if (peer->peer_id == HTT_INVALID_PEER) {
if (!IS_MLO_DP_MLD_PEER(peer))
dp_monitor_peer_tid_peer_id_update(soc, peer,
peer_id);
} else {
@@ -3881,6 +3882,7 @@ void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
/* save vdev related member in case vdev freed */
vdev_opmode = vdev->opmode;
if (!IS_MLO_DP_MLD_PEER(peer))
dp_monitor_peer_tx_cleanup(vdev, peer);
if (vdev_opmode != wlan_op_mode_monitor)

View File

@@ -730,27 +730,33 @@ void dp_service_mon_rings(struct dp_soc *soc, uint32_t quota)
#endif
/*
* dp_peer_tx_init() Initialize receive TID state
* dp_mon_peer_tx_init() Initialize receive TID state in monitor peer
* @pdev: Datapath pdev
* @peer: Datapath peer
*
*/
static void
dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
dp_mon_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
{
if (!peer->monitor_peer)
return;
dp_peer_tid_queue_init(peer);
dp_peer_update_80211_hdr(peer->vdev, peer);
}
/*
* dp_peer_tx_cleanup() Deinitialize receive TID state
* dp_mon_peer_tx_cleanup() Deinitialize receive TID state in monitor peer
* @vdev: Datapath vdev
* @peer: Datapath peer
*
*/
static void
dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
dp_mon_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
{
if (!peer->monitor_peer)
return;
dp_peer_tid_queue_cleanup(peer);
}
@@ -866,8 +872,8 @@ dp_mon_register_feature_ops_1_0(struct dp_soc *soc)
}
mon_ops->mon_config_debug_sniffer = dp_config_debug_sniffer;
mon_ops->mon_peer_tx_init = dp_peer_tx_init;
mon_ops->mon_peer_tx_cleanup = dp_peer_tx_cleanup;
mon_ops->mon_peer_tx_init = dp_mon_peer_tx_init;
mon_ops->mon_peer_tx_cleanup = dp_mon_peer_tx_cleanup;
mon_ops->mon_htt_ppdu_stats_attach = dp_htt_ppdu_stats_attach;
mon_ops->mon_htt_ppdu_stats_detach = dp_htt_ppdu_stats_detach;
mon_ops->mon_print_pdev_rx_mon_stats = dp_print_pdev_rx_mon_stats;
@@ -989,6 +995,8 @@ struct dp_mon_ops monitor_ops_1_0 = {
.mon_vdev_detach = dp_mon_vdev_detach,
.mon_peer_attach = dp_mon_peer_attach,
.mon_peer_detach = dp_mon_peer_detach,
.mon_peer_get_rdkstats_ctx = dp_mon_peer_get_rdkstats_ctx,
.mon_peer_reset_stats = dp_mon_peer_reset_stats,
.mon_flush_rings = dp_flush_monitor_rings,
#if !defined(DISABLE_MON_CONFIG)
.mon_pdev_htt_srng_setup = dp_mon_htt_srng_setup_1_0,

View File

@@ -1137,6 +1137,8 @@ struct dp_mon_ops monitor_ops_2_0 = {
.mon_vdev_detach = dp_mon_vdev_detach,
.mon_peer_attach = dp_mon_peer_attach,
.mon_peer_detach = dp_mon_peer_detach,
.mon_peer_get_rdkstats_ctx = dp_mon_peer_get_rdkstats_ctx,
.mon_peer_reset_stats = dp_mon_peer_reset_stats,
.mon_flush_rings = NULL,
#if !defined(DISABLE_MON_CONFIG)
.mon_pdev_htt_srng_setup = dp_mon_pdev_htt_srng_setup_2_0,

View File

@@ -1796,6 +1796,7 @@ dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id,
uint8_t *mac_addr, uint8_t enb_dsb)
{
struct dp_peer *peer;
QDF_STATUS status = QDF_STATUS_E_FAILURE;
struct dp_pdev *pdev =
dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
pdev_id);
@@ -1814,12 +1815,15 @@ dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id,
return QDF_STATUS_E_FAILURE;
}
if (!IS_MLO_DP_MLD_PEER(peer) && peer->monitor_peer) {
peer->monitor_peer->peer_based_pktlog_filter = enb_dsb;
mon_pdev->dp_peer_based_pktlog = enb_dsb;
status = QDF_STATUS_SUCCESS;
}
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
return QDF_STATUS_SUCCESS;
return status;
}
/**
@@ -1842,7 +1846,7 @@ dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
uint8_t *peer_mac)
{
struct dp_peer *peer;
QDF_STATUS status;
QDF_STATUS status = QDF_STATUS_E_FAILURE;
struct dp_pdev *pdev =
dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
pdev_id);
@@ -1856,12 +1860,16 @@ dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
return QDF_STATUS_E_FAILURE;
/* we need to set tx pkt capture for non associated peer */
status = dp_monitor_tx_peer_filter(pdev, peer, is_tx_pkt_cap_enable,
if (!IS_MLO_DP_MLD_PEER(peer)) {
status = dp_monitor_tx_peer_filter(pdev, peer,
is_tx_pkt_cap_enable,
peer_mac);
status = dp_peer_set_rx_capture_enabled(pdev, peer,
is_rx_pkt_cap_enable,
peer_mac);
}
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
return status;
@@ -1991,7 +1999,26 @@ dp_peer_cal_clients_stats_update(struct dp_soc *soc,
struct dp_peer *peer,
void *arg)
{
dp_cal_client_update_peer_stats(&peer->stats);
struct cdp_calibr_stats_intf peer_stats_intf = {0};
struct dp_peer *tgt_peer = NULL;
struct dp_txrx_peer *txrx_peer = NULL;
if (!dp_peer_is_primary_link_peer(peer))
return;
tgt_peer = dp_get_tgt_peer_from_peer(peer);
if (!tgt_peer || !(tgt_peer->txrx_peer))
return;
txrx_peer = tgt_peer->txrx_peer;
peer_stats_intf.to_stack = txrx_peer->to_stack;
peer_stats_intf.tx_success =
txrx_peer->stats.per_pkt_stats.tx.tx_success;
peer_stats_intf.tx_ucast =
txrx_peer->stats.per_pkt_stats.tx.ucast;
dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf,
&tgt_peer->stats);
}
/*dp_iterate_update_peer_list - update peer stats on cal client timer
@@ -4701,24 +4728,31 @@ QDF_STATUS dp_mon_pdev_init(struct dp_pdev *pdev)
soc = pdev->soc;
mon_pdev = pdev->monitor_pdev;
mon_pdev->invalid_mon_peer = qdf_mem_malloc(sizeof(struct dp_mon_peer));
if (!mon_pdev->invalid_mon_peer) {
dp_mon_err("%pK: Memory allocation failed for invalid "
"monitor peer", pdev);
return QDF_STATUS_E_NOMEM;
}
mon_ops = dp_mon_ops_get(pdev->soc);
if (!mon_ops) {
dp_mon_err("Monitor ops is NULL");
return QDF_STATUS_E_FAILURE;
goto fail0;
}
mon_pdev->filter = dp_mon_filter_alloc(mon_pdev);
if (!mon_pdev->filter) {
dp_mon_err("%pK: Memory allocation failed for monitor filter",
pdev);
return QDF_STATUS_E_NOMEM;
goto fail0;
}
if (mon_ops->tx_mon_filter_alloc) {
if (mon_ops->tx_mon_filter_alloc(pdev)) {
dp_mon_err("%pK: Memory allocation failed for tx monitor filter",
pdev);
return QDF_STATUS_E_NOMEM;
dp_mon_err("%pK: Memory allocation failed for tx monitor "
"filter", pdev);
goto fail1;
}
}
@@ -4753,12 +4787,12 @@ QDF_STATUS dp_mon_pdev_init(struct dp_pdev *pdev)
pdev->soc->osdev,
&dp_iterate_update_peer_list);
if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
goto fail0;
goto fail2;
if (mon_ops->mon_rings_init) {
if (mon_ops->mon_rings_init(pdev)) {
dp_mon_err("%pK: MONITOR rings setup failed", pdev);
goto fail1;
goto fail3;
}
}
@@ -4775,14 +4809,17 @@ QDF_STATUS dp_mon_pdev_init(struct dp_pdev *pdev)
mon_pdev->is_dp_mon_pdev_initialized = true;
return QDF_STATUS_SUCCESS;
fail1:
fail3:
dp_htt_ppdu_stats_detach(pdev);
fail0:
fail2:
qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex);
qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock);
if (mon_ops->tx_mon_filter_dealloc)
mon_ops->tx_mon_filter_dealloc(pdev);
fail1:
dp_mon_filter_dealloc(mon_pdev);
fail0:
qdf_mem_free(mon_pdev->invalid_mon_peer);
return QDF_STATUS_E_FAILURE;
}
@@ -4821,6 +4858,8 @@ QDF_STATUS dp_mon_pdev_deinit(struct dp_pdev *pdev)
dp_mon_filter_dealloc(mon_pdev);
if (mon_ops->mon_rings_deinit)
mon_ops->mon_rings_deinit(pdev);
if (mon_pdev->invalid_mon_peer)
qdf_mem_free(mon_pdev->invalid_mon_peer);
mon_pdev->is_dp_mon_pdev_initialized = false;
return QDF_STATUS_SUCCESS;
@@ -4866,6 +4905,92 @@ QDF_STATUS dp_mon_vdev_detach(struct dp_vdev *vdev)
return QDF_STATUS_SUCCESS;
}
#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
/**
* dp_mon_peer_attach_notify() - Raise WDI event for peer create
* @peer: DP Peer handle
*
* Return: none
*/
static inline
void dp_mon_peer_attach_notify(struct dp_peer *peer)
{
struct dp_mon_peer *mon_peer = peer->monitor_peer;
struct dp_pdev *pdev;
struct dp_soc *soc;
struct cdp_peer_cookie peer_cookie;
pdev = peer->vdev->pdev;
soc = pdev->soc;
qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
QDF_MAC_ADDR_SIZE);
peer_cookie.ctx = NULL;
peer_cookie.pdev_id = pdev->pdev_id;
peer_cookie.cookie = pdev->next_peer_cookie++;
dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, soc,
(void *)&peer_cookie,
peer->peer_id, WDI_NO_VAL, pdev->pdev_id);
if (soc->rdkstats_enabled) {
if (!peer_cookie.ctx) {
pdev->next_peer_cookie--;
qdf_err("Failed to initialize peer rate stats");
mon_peer->rdkstats_ctx = NULL;
} else {
mon_peer->rdkstats_ctx = (struct cdp_peer_rate_stats_ctx *)
peer_cookie.ctx;
}
}
}
/**
* dp_mon_peer_detach_notify() - Raise WDI event for peer destroy
* @peer: DP Peer handle
*
* Return: none
*/
static inline
void dp_mon_peer_detach_notify(struct dp_peer *peer)
{
struct dp_mon_peer *mon_peer = peer->monitor_peer;
struct dp_pdev *pdev;
struct dp_soc *soc;
struct cdp_peer_cookie peer_cookie;
pdev = peer->vdev->pdev;
soc = pdev->soc;
/* send peer destroy event to upper layer */
qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
QDF_MAC_ADDR_SIZE);
peer_cookie.ctx = NULL;
peer_cookie.ctx = (struct cdp_stats_cookie *)mon_peer->rdkstats_ctx;
dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
soc,
(void *)&peer_cookie,
peer->peer_id,
WDI_NO_VAL,
pdev->pdev_id);
mon_peer->rdkstats_ctx = NULL;
}
#else
static inline
void dp_mon_peer_attach_notify(struct dp_peer *peer)
{
peer->monitor_peer->rdkstats_ctx = NULL;
}
static inline
void dp_mon_peer_detach_notify(struct dp_peer *peer)
{
peer->monitor_peer->rdkstats_ctx = NULL;
}
#endif
#if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
QDF_STATUS dp_mon_peer_attach(struct dp_peer *peer)
{
@@ -4887,6 +5012,11 @@ QDF_STATUS dp_mon_peer_attach(struct dp_peer *peer)
*/
dp_monitor_peer_tx_capture_filter_check(pdev, peer);
DP_STATS_INIT(mon_peer);
DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR);
dp_mon_peer_attach_notify(peer);
return QDF_STATUS_SUCCESS;
}
#endif
@@ -4895,6 +5025,11 @@ QDF_STATUS dp_mon_peer_detach(struct dp_peer *peer)
{
struct dp_mon_peer *mon_peer = peer->monitor_peer;
if (!mon_peer)
return QDF_STATUS_SUCCESS;
dp_mon_peer_detach_notify(peer);
qdf_mem_free(mon_peer);
peer->monitor_peer = NULL;
@@ -4916,6 +5051,30 @@ void dp_mon_register_intr_ops(struct dp_soc *soc)
}
#endif
struct cdp_peer_rate_stats_ctx *dp_mon_peer_get_rdkstats_ctx(struct dp_peer *peer)
{
struct dp_mon_peer *mon_peer = peer->monitor_peer;
if (mon_peer)
return mon_peer->rdkstats_ctx;
else
return NULL;
}
#ifdef QCA_ENHANCED_STATS_SUPPORT
void dp_mon_peer_reset_stats(struct dp_peer *peer)
{
struct dp_mon_peer *mon_peer = NULL;
mon_peer = peer->monitor_peer;
if (!mon_peer)
return;
DP_STATS_CLR(mon_peer);
DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR);
}
#endif
void dp_mon_ops_register(struct dp_soc *soc)
{
struct dp_mon_soc *mon_soc = soc->monitor_soc;

View File

@@ -186,6 +186,28 @@ QDF_STATUS dp_mon_peer_attach(struct dp_peer *peer)
*/
QDF_STATUS dp_mon_peer_detach(struct dp_peer *peer);
/*
* dp_mon_peer_get_rdkstats_ctx() - Get rdk stats context from monitor peer
* @peer: Datapath peer handle
*
* Return: rdkstats_ctx
*/
struct cdp_peer_rate_stats_ctx *dp_mon_peer_get_rdkstats_ctx(struct dp_peer *peer);
#ifdef QCA_ENHANCED_STATS_SUPPORT
/*
* dp_mon_peer_reset_stats() - Reset monitor peer stats
* @peer: Datapath peer handle
*
* Return: none
*/
void dp_mon_peer_reset_stats(struct dp_peer *peer);
#else
static inline void dp_mon_peer_reset_stats(struct dp_peer *peer)
{
}
#endif
/*
* dp_mon_cdp_ops_register() - Register monitor cdp ops
* @soc: Datapath soc handle
@@ -435,6 +457,8 @@ struct dp_mon_ops {
QDF_STATUS (*mon_vdev_detach)(struct dp_vdev *vdev);
QDF_STATUS (*mon_peer_attach)(struct dp_peer *peer);
QDF_STATUS (*mon_peer_detach)(struct dp_peer *peer);
struct cdp_peer_rate_stats_ctx *(*mon_peer_get_rdkstats_ctx)(struct dp_peer *peer);
void (*mon_peer_reset_stats)(struct dp_peer *peer);
QDF_STATUS (*mon_config_debug_sniffer)(struct dp_pdev *pdev, int val);
void (*mon_flush_rings)(struct dp_soc *soc);
#if !defined(DISABLE_MON_CONFIG)
@@ -1616,6 +1640,56 @@ static inline QDF_STATUS dp_monitor_peer_detach(struct dp_soc *soc,
return monitor_ops->mon_peer_detach(peer);
}
/*
* dp_monitor_peer_get_rdkstats_ctx() - Get RDK stats context from monitor peer
* @soc: Datapath soc handle
* @peer: Datapath peer handle
*
* Return: RDK stats context
*/
static inline struct cdp_peer_rate_stats_ctx*
dp_monitor_peer_get_rdkstats_ctx(struct dp_soc *soc, struct dp_peer *peer)
{
struct dp_mon_ops *monitor_ops;
struct dp_mon_soc *mon_soc = soc->monitor_soc;
if (!mon_soc)
return NULL;
monitor_ops = mon_soc->mon_ops;
if (!monitor_ops || !monitor_ops->mon_peer_get_rdkstats_ctx) {
dp_mon_debug("callback not registered");
return NULL;
}
return monitor_ops->mon_peer_get_rdkstats_ctx(peer);
}
/*
* dp_monitor_peer_reset_stats() - Reset monitor peer stats
* @soc: Datapath soc handle
* @peer: Datapath peer handle
*
* Return: none
*/
static inline void dp_monitor_peer_reset_stats(struct dp_soc *soc,
struct dp_peer *peer)
{
struct dp_mon_ops *monitor_ops;
struct dp_mon_soc *mon_soc = soc->monitor_soc;
if (!mon_soc)
return;
monitor_ops = mon_soc->mon_ops;
if (!monitor_ops || !monitor_ops->mon_peer_reset_stats) {
dp_mon_debug("callback not registered");
return;
}
monitor_ops->mon_peer_reset_stats(peer);
}
/*
* dp_monitor_pdev_init() - Monitor pdev init
* @pdev: point to pdev

View File

@@ -1658,7 +1658,7 @@ dp_rx_process_peer_based_pktlog(struct dp_soc *soc,
if (!peer)
return;
if ((peer->peer_id != HTT_INVALID_PEER) &&
if ((peer->peer_id != HTT_INVALID_PEER) && (peer->monitor_peer) &&
(peer->monitor_peer->peer_based_pktlog_filter)) {
dp_wdi_event_handler(
WDI_EVENT_RX_DESC, soc,