qcacmn: Add support to aggregate Link level stats

Add support to aggregate and ship MLO link stats
for ML peer.

Change-Id: Icb6c7b66d6f5c27e8a8f99655cf468e90c655ad6
CRs-Fixed: 3397721
This commit is contained in:
Amrit Sahai
2023-01-19 11:26:30 +05:30
committed by Madan Koyyalamudi
parent 24dc3d385c
commit ee22464bee
3 changed files with 160 additions and 39 deletions

View File

@@ -1951,12 +1951,8 @@ void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
_tgtobj->rx.multicast.bytes += _srcobj->rx.multicast.bytes; \ _tgtobj->rx.multicast.bytes += _srcobj->rx.multicast.bytes; \
_tgtobj->rx.bcast.num += _srcobj->rx.bcast.num; \ _tgtobj->rx.bcast.num += _srcobj->rx.bcast.num; \
_tgtobj->rx.bcast.bytes += _srcobj->rx.bcast.bytes; \ _tgtobj->rx.bcast.bytes += _srcobj->rx.bcast.bytes; \
if (_tgtobj->rx.to_stack.num >= _tgtobj->rx.multicast.num) \ _tgtobj->rx.unicast.num += _srcobj->rx.unicast.num; \
_tgtobj->rx.unicast.num = \ _tgtobj->rx.unicast.bytes += _srcobj->rx.unicast.bytes; \
_tgtobj->rx.to_stack.num - _tgtobj->rx.multicast.num; \
if (_tgtobj->rx.to_stack.bytes >= _tgtobj->rx.multicast.bytes) \
_tgtobj->rx.unicast.bytes = \
_tgtobj->rx.to_stack.bytes - _tgtobj->rx.multicast.bytes; \
_tgtobj->rx.raw.num += _srcobj->rx.raw.num; \ _tgtobj->rx.raw.num += _srcobj->rx.raw.num; \
_tgtobj->rx.raw.bytes += _srcobj->rx.raw.bytes; \ _tgtobj->rx.raw.bytes += _srcobj->rx.raw.bytes; \
_tgtobj->rx.nawds_mcast_drop += _srcobj->rx.nawds_mcast_drop; \ _tgtobj->rx.nawds_mcast_drop += _srcobj->rx.nawds_mcast_drop; \
@@ -5230,4 +5226,20 @@ void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc);
void dp_get_peer_stats(struct dp_peer *peer, void dp_get_peer_stats(struct dp_peer *peer,
struct cdp_peer_stats *peer_stats); struct cdp_peer_stats *peer_stats);
/**
* dp_get_peer_hw_link_id() - get peer hardware link id
* @soc: soc handle
* @pdev: data path pdev
*
* Return: link_id
*/
static inline int
dp_get_peer_hw_link_id(struct dp_soc *soc,
struct dp_pdev *pdev)
{
if (wlan_cfg_is_peer_link_stats_enabled(soc->wlan_cfg_ctx))
return soc->arch_ops.get_hw_link_id(pdev);
return 0;
}
#endif /* #ifndef _DP_INTERNAL_H_ */ #endif /* #ifndef _DP_INTERNAL_H_ */

View File

@@ -10025,6 +10025,7 @@ void dp_get_peer_basic_stats(struct dp_peer *peer,
peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes; peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
} }
#ifdef QCA_ENHANCED_STATS_SUPPORT
/** /**
* dp_get_peer_per_pkt_stats()- Get peer per pkt stats * dp_get_peer_per_pkt_stats()- Get peer per pkt stats
* @peer: Datapath peer * @peer: Datapath peer
@@ -10038,15 +10039,33 @@ void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
{ {
struct dp_txrx_peer *txrx_peer; struct dp_txrx_peer *txrx_peer;
struct dp_peer_per_pkt_stats *per_pkt_stats; struct dp_peer_per_pkt_stats *per_pkt_stats;
uint8_t inx = 0, link_id = 0;
struct dp_pdev *pdev;
struct dp_soc *soc;
uint8_t stats_arr_size;
txrx_peer = dp_get_txrx_peer(peer); txrx_peer = dp_get_txrx_peer(peer);
pdev = peer->vdev->pdev;
if (!txrx_peer) if (!txrx_peer)
return; return;
per_pkt_stats = &txrx_peer->stats.per_pkt_stats; if (!IS_MLO_DP_LINK_PEER(peer)) {
DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats); stats_arr_size = txrx_peer->stats_arr_size;
for (inx = 0; inx < stats_arr_size; inx++) {
per_pkt_stats = &txrx_peer->stats[inx].per_pkt_stats;
DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
}
} else {
soc = pdev->soc;
link_id = dp_get_peer_hw_link_id(soc, pdev);
per_pkt_stats =
&txrx_peer->stats[link_id].per_pkt_stats;
DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
}
} }
#ifdef WLAN_FEATURE_11BE_MLO
/** /**
* dp_get_peer_extd_stats()- Get peer extd stats * dp_get_peer_extd_stats()- Get peer extd stats
* @peer: Datapath peer * @peer: Datapath peer
@@ -10054,8 +10073,6 @@ void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
* *
* Return: none * Return: none
*/ */
#ifdef QCA_ENHANCED_STATS_SUPPORT
#ifdef WLAN_FEATURE_11BE_MLO
static inline static inline
void dp_get_peer_extd_stats(struct dp_peer *peer, void dp_get_peer_extd_stats(struct dp_peer *peer,
struct cdp_peer_stats *peer_stats) struct cdp_peer_stats *peer_stats)
@@ -10095,6 +10112,21 @@ void dp_get_peer_extd_stats(struct dp_peer *peer,
} }
#endif #endif
#else #else
static inline
void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
struct cdp_peer_stats *peer_stats)
{
struct dp_txrx_peer *txrx_peer;
struct dp_peer_per_pkt_stats *per_pkt_stats;
txrx_peer = dp_get_txrx_peer(peer);
if (!txrx_peer)
return;
per_pkt_stats = &txrx_peer->stats[0].per_pkt_stats;
DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
}
static inline static inline
void dp_get_peer_extd_stats(struct dp_peer *peer, void dp_get_peer_extd_stats(struct dp_peer *peer,
struct cdp_peer_stats *peer_stats) struct cdp_peer_stats *peer_stats)
@@ -10108,7 +10140,7 @@ void dp_get_peer_extd_stats(struct dp_peer *peer,
return; return;
} }
extd_stats = &txrx_peer->stats.extd_stats; extd_stats = &txrx_peer->stats[0].extd_stats;
DP_UPDATE_EXTD_STATS(peer_stats, extd_stats); DP_UPDATE_EXTD_STATS(peer_stats, extd_stats);
} }
#endif #endif

View File

@@ -4901,7 +4901,7 @@ void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev,
if (!txrx_peer) if (!txrx_peer)
goto dp_vdev_peer_stats_update_protocol_cnt_free_peer; goto dp_vdev_peer_stats_update_protocol_cnt_free_peer;
} }
per_pkt_stats = &txrx_peer->stats.per_pkt_stats; per_pkt_stats = &txrx_peer->stats[0].per_pkt_stats;
if (qdf_nbuf_is_icmp_pkt(nbuf) == true) if (qdf_nbuf_is_icmp_pkt(nbuf) == true)
prot = CDP_TRACE_ICMP; prot = CDP_TRACE_ICMP;
@@ -8254,18 +8254,54 @@ QDF_STATUS dp_txrx_get_peer_per_pkt_stats_param(struct dp_peer *peer,
struct dp_peer *tgt_peer; struct dp_peer *tgt_peer;
struct dp_txrx_peer *txrx_peer; struct dp_txrx_peer *txrx_peer;
struct dp_peer_per_pkt_stats *peer_stats; struct dp_peer_per_pkt_stats *peer_stats;
uint8_t link_id = 0;
uint8_t idx = 0;
uint8_t stats_arr_size;
struct cdp_pkt_info pkt_info = {0};
struct dp_soc *soc = peer->vdev->pdev->soc;
struct dp_pdev *pdev = peer->vdev->pdev;
txrx_peer = dp_get_txrx_peer(peer); txrx_peer = dp_get_txrx_peer(peer);
if (!txrx_peer) if (!txrx_peer)
return QDF_STATUS_E_FAILURE; return QDF_STATUS_E_FAILURE;
peer_stats = &txrx_peer->stats.per_pkt_stats; stats_arr_size = txrx_peer->stats_arr_size;
if (IS_MLO_DP_LINK_PEER(peer))
link_id = dp_get_peer_hw_link_id(soc, pdev);
switch (type) { switch (type) {
case cdp_peer_tx_ucast: case cdp_peer_tx_ucast:
buf->tx_ucast = peer_stats->tx.ucast; if (link_id > 0) {
peer_stats = &txrx_peer->stats[link_id].per_pkt_stats;
buf->tx_ucast = peer_stats->tx.ucast;
} else {
for (idx = 0; idx < stats_arr_size; idx++) {
peer_stats =
&txrx_peer->stats[idx].per_pkt_stats;
pkt_info.num += peer_stats->tx.ucast.num;
pkt_info.bytes += peer_stats->tx.ucast.bytes;
}
buf->tx_ucast = pkt_info;
}
break; break;
case cdp_peer_tx_mcast: case cdp_peer_tx_mcast:
buf->tx_mcast = peer_stats->tx.mcast; if (link_id > 0) {
peer_stats = &txrx_peer->stats[link_id].per_pkt_stats;
buf->tx_mcast = peer_stats->tx.mcast;
} else {
for (idx = 0; idx < stats_arr_size; idx++) {
peer_stats =
&txrx_peer->stats[idx].per_pkt_stats;
pkt_info.num += peer_stats->tx.mcast.num;
pkt_info.bytes += peer_stats->tx.mcast.bytes;
}
buf->tx_mcast = pkt_info;
}
break; break;
case cdp_peer_tx_inactive_time: case cdp_peer_tx_inactive_time:
tgt_peer = dp_get_tgt_peer_from_peer(peer); tgt_peer = dp_get_tgt_peer_from_peer(peer);
@@ -8276,7 +8312,20 @@ QDF_STATUS dp_txrx_get_peer_per_pkt_stats_param(struct dp_peer *peer,
ret = QDF_STATUS_E_FAILURE; ret = QDF_STATUS_E_FAILURE;
break; break;
case cdp_peer_rx_ucast: case cdp_peer_rx_ucast:
buf->rx_ucast = peer_stats->rx.unicast; if (link_id > 0) {
peer_stats = &txrx_peer->stats[link_id].per_pkt_stats;
buf->rx_ucast = peer_stats->rx.unicast;
} else {
for (idx = 0; idx < stats_arr_size; idx++) {
peer_stats =
&txrx_peer->stats[idx].per_pkt_stats;
pkt_info.num += peer_stats->rx.unicast.num;
pkt_info.bytes += peer_stats->rx.unicast.bytes;
}
buf->rx_ucast = pkt_info;
}
break; break;
default: default:
ret = QDF_STATUS_E_FAILURE; ret = QDF_STATUS_E_FAILURE;
@@ -8337,7 +8386,7 @@ QDF_STATUS dp_txrx_get_peer_extd_stats_param(struct dp_peer *peer,
if (!txrx_peer) if (!txrx_peer)
return QDF_STATUS_E_FAILURE; return QDF_STATUS_E_FAILURE;
peer_stats = &txrx_peer->stats.extd_stats; peer_stats = &txrx_peer->stats[0].extd_stats;
switch (type) { switch (type) {
case cdp_peer_tx_rate: case cdp_peer_tx_rate:
@@ -8463,6 +8512,8 @@ void dp_update_vdev_stats(struct dp_soc *soc, struct dp_peer *srcobj,
struct dp_txrx_peer *txrx_peer; struct dp_txrx_peer *txrx_peer;
struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)arg; struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)arg;
struct dp_peer_per_pkt_stats *per_pkt_stats; struct dp_peer_per_pkt_stats *per_pkt_stats;
uint8_t link_id = 0;
struct dp_pdev *pdev = srcobj->vdev->pdev;
txrx_peer = dp_get_txrx_peer(srcobj); txrx_peer = dp_get_txrx_peer(srcobj);
if (qdf_unlikely(!txrx_peer)) if (qdf_unlikely(!txrx_peer))
@@ -8471,13 +8522,20 @@ void dp_update_vdev_stats(struct dp_soc *soc, struct dp_peer *srcobj,
if (qdf_unlikely(dp_is_wds_extended(txrx_peer))) if (qdf_unlikely(dp_is_wds_extended(txrx_peer)))
return; return;
if (!dp_peer_is_primary_link_peer(srcobj)) if (dp_peer_is_primary_link_peer(srcobj)) {
goto link_stats; dp_update_vdev_basic_stats(txrx_peer, vdev_stats);
per_pkt_stats = &txrx_peer->stats[0].per_pkt_stats;
DP_UPDATE_PER_PKT_STATS(vdev_stats, per_pkt_stats);
}
dp_update_vdev_basic_stats(txrx_peer, vdev_stats); if (IS_MLO_DP_LINK_PEER(srcobj)) {
link_id = dp_get_peer_hw_link_id(soc, pdev);
per_pkt_stats = &txrx_peer->stats.per_pkt_stats; if (link_id > 0) {
DP_UPDATE_PER_PKT_STATS(vdev_stats, per_pkt_stats); per_pkt_stats = &txrx_peer->
stats[link_id].per_pkt_stats;
DP_UPDATE_PER_PKT_STATS(vdev_stats, per_pkt_stats);
}
}
link_stats: link_stats:
dp_monitor_peer_get_stats(soc, srcobj, vdev_stats, UPDATE_VDEV_STATS); dp_monitor_peer_get_stats(soc, srcobj, vdev_stats, UPDATE_VDEV_STATS);
@@ -8490,21 +8548,32 @@ void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
struct dp_txrx_peer *txrx_peer; struct dp_txrx_peer *txrx_peer;
struct dp_peer_per_pkt_stats *per_pkt_stats; struct dp_peer_per_pkt_stats *per_pkt_stats;
struct cdp_vdev_stats *vdev_stats = &vdev->stats; struct cdp_vdev_stats *vdev_stats = &vdev->stats;
uint8_t link_id = 0;
struct dp_pdev *pdev = vdev->pdev;
txrx_peer = peer->txrx_peer; txrx_peer = dp_get_txrx_peer(peer);
if (!txrx_peer) if (!txrx_peer)
goto link_stats; goto link_stats;
dp_update_vdev_basic_stats(txrx_peer, vdev_stats);
dp_peer_aggregate_tid_stats(peer); dp_peer_aggregate_tid_stats(peer);
per_pkt_stats = &txrx_peer->stats.per_pkt_stats; if (!IS_MLO_DP_LINK_PEER(peer)) {
DP_UPDATE_PER_PKT_STATS(vdev_stats, per_pkt_stats); per_pkt_stats = &txrx_peer->stats[0].per_pkt_stats;
dp_update_vdev_basic_stats(txrx_peer, vdev_stats);
DP_UPDATE_PER_PKT_STATS(vdev_stats, per_pkt_stats);
}
if (IS_MLO_DP_LINK_PEER(peer)) {
link_id = dp_get_peer_hw_link_id(soc, pdev);
per_pkt_stats = &txrx_peer->
stats[link_id].per_pkt_stats;
DP_UPDATE_PER_PKT_STATS(vdev_stats,
per_pkt_stats);
}
link_stats: link_stats:
dp_monitor_peer_get_stats(soc, peer, vdev_stats, UPDATE_VDEV_STATS); dp_monitor_peer_get_stats(soc, peer, vdev_stats, UPDATE_VDEV_STATS);
} }
#else #else
void dp_update_vdev_stats(struct dp_soc *soc, struct dp_peer *srcobj, void dp_update_vdev_stats(struct dp_soc *soc, struct dp_peer *srcobj,
void *arg) void *arg)
@@ -8513,6 +8582,8 @@ void dp_update_vdev_stats(struct dp_soc *soc, struct dp_peer *srcobj,
struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)arg; struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)arg;
struct dp_peer_per_pkt_stats *per_pkt_stats; struct dp_peer_per_pkt_stats *per_pkt_stats;
struct dp_peer_extd_stats *extd_stats; struct dp_peer_extd_stats *extd_stats;
uint8_t inx = 0;
uint8_t stats_arr_size = 0;
txrx_peer = dp_get_txrx_peer(srcobj); txrx_peer = dp_get_txrx_peer(srcobj);
if (qdf_unlikely(!txrx_peer)) if (qdf_unlikely(!txrx_peer))
@@ -8524,13 +8595,15 @@ void dp_update_vdev_stats(struct dp_soc *soc, struct dp_peer *srcobj,
if (!dp_peer_is_primary_link_peer(srcobj)) if (!dp_peer_is_primary_link_peer(srcobj))
return; return;
stats_arr_size = txrx_peer->stats_arr_size;
dp_update_vdev_basic_stats(txrx_peer, vdev_stats); dp_update_vdev_basic_stats(txrx_peer, vdev_stats);
per_pkt_stats = &txrx_peer->stats.per_pkt_stats; for (inx = 0; inx < stats_arr_size; inx++) {
DP_UPDATE_PER_PKT_STATS(vdev_stats, per_pkt_stats); per_pkt_stats = &txrx_peer->stats[inx].per_pkt_stats;
extd_stats = &txrx_peer->stats[inx].extd_stats;
extd_stats = &txrx_peer->stats.extd_stats; DP_UPDATE_EXTD_STATS(vdev_stats, extd_stats);
DP_UPDATE_EXTD_STATS(vdev_stats, extd_stats); DP_UPDATE_PER_PKT_STATS(vdev_stats, per_pkt_stats);
}
} }
void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev, void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
@@ -8540,18 +8613,22 @@ void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
struct dp_peer_per_pkt_stats *per_pkt_stats; struct dp_peer_per_pkt_stats *per_pkt_stats;
struct dp_peer_extd_stats *extd_stats; struct dp_peer_extd_stats *extd_stats;
struct cdp_vdev_stats *vdev_stats = &vdev->stats; struct cdp_vdev_stats *vdev_stats = &vdev->stats;
uint8_t inx = 0;
uint8_t stats_arr_size = 0;
txrx_peer = peer->txrx_peer; txrx_peer = dp_get_txrx_peer(peer);
if (!txrx_peer) if (!txrx_peer)
return; return;
stats_arr_size = txrx_peer->stats_arr_size;
dp_update_vdev_basic_stats(txrx_peer, vdev_stats); dp_update_vdev_basic_stats(txrx_peer, vdev_stats);
per_pkt_stats = &txrx_peer->stats.per_pkt_stats; for (inx = 0; inx < stats_arr_size; inx++) {
DP_UPDATE_PER_PKT_STATS(vdev_stats, per_pkt_stats); per_pkt_stats = &txrx_peer->stats[inx].per_pkt_stats;
extd_stats = &txrx_peer->stats[inx].extd_stats;
extd_stats = &txrx_peer->stats.extd_stats; DP_UPDATE_EXTD_STATS(vdev_stats, extd_stats);
DP_UPDATE_EXTD_STATS(vdev_stats, extd_stats); DP_UPDATE_PER_PKT_STATS(vdev_stats, per_pkt_stats);
}
} }
#endif #endif