qcacmn: Add Support of TX MLO Link Stats for ML Peer

Add Support to update TX per packet path, MLO Link
statistics for ML Peer.

Change-Id: If8aa5433221ecbb7d84b3f6777784524f43179a3
CRs-Fixed: 3397721
このコミットが含まれているのは:
Amrit Sahai
2023-01-24 12:19:16 +05:30
committed by Madan Koyyalamudi
コミット 24dc3d385c
6個のファイルの変更187行の追加68行の削除

ファイルの表示

@@ -106,6 +106,9 @@
HTT_TCL_METADATA_TYPE_VDEV_BASED
#endif
#define DP_GET_HW_LINK_ID_FRM_PPDU_ID(PPDU_ID, LINK_ID_OFFSET, LINK_ID_BITS) \
(((PPDU_ID) >> (LINK_ID_OFFSET)) & ((1 << (LINK_ID_BITS)) - 1))
/*mapping between hal encrypt type and cdp_sec_type*/
uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
HAL_TX_ENCRYPT_TYPE_WEP_128,
@@ -3159,6 +3162,7 @@ void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
qdf_nbuf_t nbuf_clone = NULL;
uint16_t peer_id = DP_INVALID_PEER;
struct dp_txrx_peer *txrx_peer;
uint8_t link_id = 0;
/* This check avoids pkt forwarding which is entered
* in the ast table but still doesn't have valid peerid.
@@ -3191,7 +3195,7 @@ void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
dp_tx_debug("multicast packet");
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
tx.nawds_mcast_drop,
1);
1, link_id);
continue;
}
@@ -3215,7 +3219,7 @@ void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
if (peer_id != DP_INVALID_PEER)
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
tx.nawds_mcast,
1, qdf_nbuf_len(nbuf));
1, qdf_nbuf_len(nbuf), link_id);
}
}
}
@@ -4463,19 +4467,21 @@ void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
#ifdef DISABLE_DP_STATS
static
inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf,
struct dp_txrx_peer *txrx_peer)
struct dp_txrx_peer *txrx_peer,
uint8_t link_id)
{
}
#else
static inline void
dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer)
dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer,
uint8_t link_id)
{
enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
if (subtype != QDF_PROTO_INVALID)
DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype],
1);
1, link_id);
}
#endif
@@ -4499,12 +4505,13 @@ dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
*
* @ts: Tx compltion status
* @txrx_peer: datapath txrx_peer handle
* @link_id: Link id
*
* Return: void
*/
static inline void
dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
struct dp_txrx_peer *txrx_peer)
struct dp_txrx_peer *txrx_peer, uint8_t link_id)
{
uint8_t mcs, pkt_type, dst_mcs_idx;
uint8_t retry_threshold = dp_tx_get_mpdu_retry_threshold(txrx_peer);
@@ -4519,36 +4526,73 @@ dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
DP_PEER_EXTD_STATS_INC(txrx_peer,
tx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
1);
1, link_id);
DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1);
DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1);
DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi);
DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1, link_id);
DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1, link_id);
DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi,
link_id);
DP_PEER_EXTD_STATS_INC(txrx_peer,
tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc);
DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc);
DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1);
tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1,
link_id);
DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc, link_id);
DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc, link_id);
DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1,
link_id);
if (ts->first_msdu) {
DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1,
ts->transmit_cnt > 1);
ts->transmit_cnt > 1, link_id);
if (!retry_threshold)
return;
DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries,
qdf_do_div(ts->transmit_cnt,
retry_threshold),
ts->transmit_cnt > retry_threshold);
ts->transmit_cnt > retry_threshold,
link_id);
}
}
#else
static inline void
dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
struct dp_txrx_peer *txrx_peer)
struct dp_txrx_peer *txrx_peer, uint8_t link_id)
{
}
#endif
#ifdef WLAN_FEATURE_11BE_MLO
static inline int
dp_tx_get_link_id_from_ppdu_id(struct dp_soc *soc,
struct hal_tx_completion_status *ts,
struct dp_txrx_peer *txrx_peer,
struct dp_vdev *vdev)
{
uint8_t hw_link_id = 0;
uint32_t ppdu_id;
uint8_t link_id_offset, link_id_bits;
if (!txrx_peer->is_mld_peer || !vdev->pdev->link_peer_stats)
return 0;
link_id_offset = soc->link_id_offset;
link_id_bits = soc->link_id_bits;
ppdu_id = ts->ppdu_id;
hw_link_id = DP_GET_HW_LINK_ID_FRM_PPDU_ID(ppdu_id, link_id_offset,
link_id_bits);
return (hw_link_id + 1);
}
#else
static inline int
dp_tx_get_link_id_from_ppdu_id(struct dp_soc *soc,
struct hal_tx_completion_status *ts,
struct dp_txrx_peer *txrx_peer,
struct dp_vdev *vdev)
{
return 0;
}
#endif
/**
* dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
* per wbm ring
@@ -4557,13 +4601,15 @@ dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
* @ts: Tx completion status
* @txrx_peer: peer handle
* @ring_id: ring number
* @link_id: Link id
*
* Return: None
*/
static inline void
dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
struct hal_tx_completion_status *ts,
struct dp_txrx_peer *txrx_peer, uint8_t ring_id)
struct dp_txrx_peer *txrx_peer, uint8_t ring_id,
uint8_t link_id)
{
struct dp_pdev *pdev = txrx_peer->vdev->pdev;
uint8_t tid = ts->tid;
@@ -4580,7 +4626,8 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
dp_err_rl("Release source:%d is not from TQM", ts->release_src);
DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1);
DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1,
link_id);
return;
}
@@ -4597,22 +4644,23 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
if (qdf_likely(ts->status == HAL_TX_TQM_RR_FRAME_ACKED)) {
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1,
ts->transmit_cnt > 1);
ts->transmit_cnt > 1, link_id);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count,
1, ts->transmit_cnt > 2);
1, ts->transmit_cnt > 2, link_id);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma,
link_id);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1,
ts->msdu_part_of_amsdu);
ts->msdu_part_of_amsdu, link_id);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1,
!ts->msdu_part_of_amsdu);
!ts->msdu_part_of_amsdu, link_id);
txrx_peer->stats.per_pkt_stats.tx.last_tx_ts =
txrx_peer->stats[link_id].per_pkt_stats.tx.last_tx_ts =
qdf_system_ticks();
dp_tx_update_peer_extd_stats(ts, txrx_peer);
dp_tx_update_peer_extd_stats(ts, txrx_peer, link_id);
return;
}
@@ -4628,44 +4676,58 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1,
ts->transmit_cnt > DP_RETRY_COUNT);
dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer);
ts->transmit_cnt > DP_RETRY_COUNT,
link_id);
dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer, link_id);
if (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1);
DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1,
link_id);
} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_REM) {
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.dropped.fw_rem, 1,
length);
length, link_id);
} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1);
DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1,
link_id);
} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TX) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1);
DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1,
link_id);
} else if (ts->status == HAL_TX_TQM_RR_FW_REASON1) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1);
DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1,
link_id);
} else if (ts->status == HAL_TX_TQM_RR_FW_REASON2) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1);
DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1,
link_id);
} else if (ts->status == HAL_TX_TQM_RR_FW_REASON3) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1);
DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1,
link_id);
} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
tx.dropped.fw_rem_queue_disable, 1);
tx.dropped.fw_rem_queue_disable, 1,
link_id);
} else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TILL_NONMATCHING) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
tx.dropped.fw_rem_no_match, 1);
tx.dropped.fw_rem_no_match, 1,
link_id);
} else if (ts->status == HAL_TX_TQM_RR_DROP_THRESHOLD) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
tx.dropped.drop_threshold, 1);
tx.dropped.drop_threshold, 1,
link_id);
} else if (ts->status == HAL_TX_TQM_RR_LINK_DESC_UNAVAILABLE) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
tx.dropped.drop_link_desc_na, 1);
tx.dropped.drop_link_desc_na, 1,
link_id);
} else if (ts->status == HAL_TX_TQM_RR_DROP_OR_INVALID_MSDU) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
tx.dropped.invalid_drop, 1);
tx.dropped.invalid_drop, 1,
link_id);
} else if (ts->status == HAL_TX_TQM_RR_MULTICAST_DROP) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
tx.dropped.mcast_vdev_drop, 1);
tx.dropped.mcast_vdev_drop, 1,
link_id);
} else {
DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1);
DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1,
link_id);
}
}
@@ -4765,6 +4827,7 @@ static inline void dp_tx_notify_completion(struct dp_soc *soc,
* @tid: tid value
* @txdesc_ts: timestamp from txdesc
* @ppdu_id: ppdu id
* @link_id: link id
*
* Return: none
*/
@@ -4773,7 +4836,8 @@ static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
struct dp_txrx_peer *txrx_peer,
uint8_t tid,
uint64_t txdesc_ts,
uint32_t ppdu_id)
uint32_t ppdu_id,
uint8_t link_id)
{
uint64_t delta_ms;
struct cdp_tx_sojourn_stats *sojourn_stats;
@@ -4807,12 +4871,13 @@ static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) -
txdesc_ts;
qdf_ewma_tx_lag_add(&txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid],
qdf_ewma_tx_lag_add(&txrx_peer->stats[link_id].per_pkt_stats.tx.avg_sojourn_msdu[tid],
delta_ms);
sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
sojourn_stats->num_msdus[tid] = 1;
sojourn_stats->avg_sojourn_msdu[tid].internal =
txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid].internal;
txrx_peer->stats[link_id].
per_pkt_stats.tx.avg_sojourn_msdu[tid].internal;
dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
pdev->sojourn_buf, HTT_INVALID_PEER,
WDI_NO_VAL, pdev->pdev_id);
@@ -5173,6 +5238,7 @@ void dp_tx_comp_process_tx_status(struct dp_soc *soc,
struct dp_vdev *vdev = NULL;
qdf_nbuf_t nbuf = tx_desc->nbuf;
enum qdf_dp_tx_rx_status dp_status;
uint8_t link_id = 0;
if (!nbuf) {
dp_info_rl("invalid tx descriptor. nbuf NULL");
@@ -5231,6 +5297,11 @@ void dp_tx_comp_process_tx_status(struct dp_soc *soc,
}
vdev = txrx_peer->vdev;
#ifdef DP_MLO_LINK_STATS_SUPPORT
link_id = dp_tx_get_link_id_from_ppdu_id(soc, ts, txrx_peer, vdev);
if (link_id < 1 || link_id > DP_MAX_MLO_LINKS)
link_id = 0;
#endif
dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
dp_tx_update_uplink_delay(soc, vdev, ts);
@@ -5249,30 +5320,32 @@ void dp_tx_comp_process_tx_status(struct dp_soc *soc,
vdev->opmode == wlan_op_mode_ap)) {
if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
length);
length, link_id);
if (txrx_peer->vdev->tx_encap_type ==
htt_cmn_pkt_type_ethernet &&
QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
tx.bcast, 1,
length);
length, link_id);
}
}
} else {
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length);
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length,
link_id);
if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success,
1, length);
1, length, link_id);
if (qdf_unlikely(txrx_peer->in_twt)) {
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
tx.tx_success_twt,
1, length);
1, length,
link_id);
}
}
}
dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id);
dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id, link_id);
dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts, ring_id);
dp_tx_update_peer_jitter_stats(txrx_peer, tx_desc, ts, ring_id);
dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc,
@@ -5283,7 +5356,7 @@ void dp_tx_comp_process_tx_status(struct dp_soc *soc,
if (soc->peerstats_enabled)
dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
qdf_ktime_to_ms(tx_desc->timestamp),
ts->ppdu_id);
ts->ppdu_id, link_id);
#endif
out: