|
@@ -3690,8 +3690,8 @@ static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * dp_tx_update_peer_ext_stats() - Update the peer extended stats
|
|
|
- * @peer: DP peer context
|
|
|
+ * dp_tx_update_peer_delay_stats() - Update the peer delay stats
|
|
|
+ * @txrx_peer: DP peer context
|
|
|
* @tx_desc: Tx software descriptor
|
|
|
* @tid: Transmission ID
|
|
|
* @ring_id: Rx CPU context ID/CPU_ID
|
|
@@ -3701,21 +3701,21 @@ static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
|
|
|
*
|
|
|
* Return: void
|
|
|
*/
|
|
|
-static void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
|
|
|
- struct dp_tx_desc_s *tx_desc,
|
|
|
- uint8_t tid, uint8_t ring_id)
|
|
|
+static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
|
|
|
+ struct dp_tx_desc_s *tx_desc,
|
|
|
+ uint8_t tid, uint8_t ring_id)
|
|
|
{
|
|
|
- struct dp_pdev *pdev = peer->vdev->pdev;
|
|
|
+ struct dp_pdev *pdev = txrx_peer->vdev->pdev;
|
|
|
struct dp_soc *soc = NULL;
|
|
|
- struct cdp_peer_ext_stats *pext_stats = NULL;
|
|
|
+ struct dp_peer_delay_stats *delay_stats = NULL;
|
|
|
|
|
|
soc = pdev->soc;
|
|
|
if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
|
|
|
return;
|
|
|
|
|
|
- pext_stats = peer->pext_stats;
|
|
|
+ delay_stats = txrx_peer->delay_stats;
|
|
|
|
|
|
- qdf_assert(pext_stats);
|
|
|
+ qdf_assert(delay_stats);
|
|
|
qdf_assert(ring < CDP_MAX_TXRX_CTX);
|
|
|
|
|
|
/*
|
|
@@ -3724,13 +3724,13 @@ static void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
|
|
|
if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
|
|
|
tid = CDP_MAX_DATA_TIDS - 1;
|
|
|
|
|
|
- dp_tx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
|
|
|
+ dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
|
|
|
tx_desc);
|
|
|
}
|
|
|
#else
|
|
|
-static inline void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
|
|
|
- struct dp_tx_desc_s *tx_desc,
|
|
|
- uint8_t tid, uint8_t ring_id)
|
|
|
+static inline void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
|
|
|
+ struct dp_tx_desc_s *tx_desc,
|
|
|
+ uint8_t tid, uint8_t ring_id)
|
|
|
{
|
|
|
}
|
|
|
#endif
|
|
@@ -3817,9 +3817,9 @@ dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer)
|
|
|
static inline void
|
|
|
dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
|
|
|
struct hal_tx_completion_status *ts,
|
|
|
- struct dp_peer *peer, uint8_t ring_id)
|
|
|
+ struct dp_txrx_peer *txrx_peer, uint8_t ring_id)
|
|
|
{
|
|
|
- struct dp_pdev *pdev = peer->vdev->pdev;
|
|
|
+ struct dp_pdev *pdev = txrx_peer->vdev->pdev;
|
|
|
struct dp_soc *soc = NULL;
|
|
|
uint8_t mcs, pkt_type;
|
|
|
uint8_t tid = ts->tid;
|
|
@@ -3844,10 +3844,10 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
|
|
|
}
|
|
|
|
|
|
length = qdf_nbuf_len(tx_desc->nbuf);
|
|
|
- DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
|
|
|
+ DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
|
|
|
|
|
|
if (qdf_unlikely(pdev->delay_stats_flag))
|
|
|
- dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
|
|
|
+ dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
|
|
|
DP_STATS_INCC(peer, tx.dropped.age_out, 1,
|
|
|
(ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
|
|
|
|
|
@@ -3868,7 +3868,6 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
|
|
|
|
|
|
DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
|
|
|
(ts->status == HAL_TX_TQM_RR_FW_REASON3));
|
|
|
-
|
|
|
/*
|
|
|
* tx_failed is ideally supposed to be updated from HTT ppdu completion
|
|
|
* stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
|
|
@@ -3876,14 +3875,14 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
|
|
|
* data path. Please note that if tx_failed is fixed to be from ppdu,
|
|
|
* then this has to be removed
|
|
|
*/
|
|
|
- peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
|
|
|
+ txrx_peer->tx_failed =
|
|
|
+ peer->stats.tx.dropped.fw_rem.num +
|
|
|
peer->stats.tx.dropped.fw_rem_notx +
|
|
|
peer->stats.tx.dropped.fw_rem_tx +
|
|
|
peer->stats.tx.dropped.age_out +
|
|
|
peer->stats.tx.dropped.fw_reason1 +
|
|
|
peer->stats.tx.dropped.fw_reason2 +
|
|
|
peer->stats.tx.dropped.fw_reason3;
|
|
|
-
|
|
|
if (ts->status < CDP_MAX_TX_TQM_STATUS) {
|
|
|
tid_stats->tqm_status_cnt[ts->status]++;
|
|
|
}
|
|
@@ -3894,6 +3893,7 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
|
|
|
dp_update_no_ack_stats(tx_desc->nbuf, peer);
|
|
|
return;
|
|
|
}
|
|
|
+
|
|
|
DP_STATS_INCC(peer, tx.retry_count, 1, ts->transmit_cnt > 1);
|
|
|
|
|
|
DP_STATS_INCC(peer, tx.multiple_retry_count, 1, ts->transmit_cnt > 2);
|
|
@@ -3902,14 +3902,12 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
|
|
|
|
|
|
DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
|
|
|
DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
|
|
|
-
|
|
|
/*
|
|
|
* Following Rate Statistics are updated from HTT PPDU events from FW.
|
|
|
* Return from here if HTT PPDU events are enabled.
|
|
|
*/
|
|
|
if (!(soc->process_tx_status))
|
|
|
return;
|
|
|
-
|
|
|
DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
|
|
|
((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
|
|
|
DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
|
|
@@ -4050,7 +4048,7 @@ static inline void dp_tx_notify_completion(struct dp_soc *soc,
|
|
|
*/
|
|
|
#ifdef FEATURE_PERPKT_INFO
|
|
|
static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
|
|
|
- struct dp_peer *peer,
|
|
|
+ struct dp_txrx_peer *txrx_peer,
|
|
|
uint8_t tid,
|
|
|
uint64_t txdesc_ts,
|
|
|
uint32_t ppdu_id)
|
|
@@ -4467,12 +4465,12 @@ void dp_tx_comp_process_tx_status(struct dp_soc *soc,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
|
|
|
- dp_tx_update_peer_ext_stats(peer, tx_desc, ts->tid, ring_id);
|
|
|
+ dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id);
|
|
|
+ dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts->tid, ring_id);
|
|
|
|
|
|
#ifdef QCA_SUPPORT_RDK_STATS
|
|
|
if (soc->rdkstats_enabled)
|
|
|
- dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
|
|
|
+ dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
|
|
|
tx_desc->timestamp,
|
|
|
ts->ppdu_id);
|
|
|
#endif
|
|
@@ -4492,33 +4490,36 @@ out:
|
|
|
*
|
|
|
* Return: none
|
|
|
*/
|
|
|
-void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length,
|
|
|
- uint8_t tx_status, bool update)
|
|
|
-{
|
|
|
- if ((!peer->hw_txrx_stats_en) || update) {
|
|
|
- DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
|
|
|
- DP_STATS_INCC(peer, tx.tx_failed, 1,
|
|
|
- tx_status != HAL_TX_TQM_RR_FRAME_ACKED);
|
|
|
+void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
|
|
|
+ uint32_t length, uint8_t tx_status,
|
|
|
+ bool update)
|
|
|
+{
|
|
|
+ if ((!txrx_peer->hw_txrx_stats_en) || update) {
|
|
|
+ DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
|
|
|
+ DP_STATS_FLAT_INC(txrx_peer, tx_failed, 1,
|
|
|
+ tx_status != HAL_TX_TQM_RR_FRAME_ACKED);
|
|
|
}
|
|
|
}
|
|
|
#elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
|
|
|
-void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length,
|
|
|
- uint8_t tx_status, bool update)
|
|
|
+void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
|
|
|
+ uint32_t length, uint8_t tx_status,
|
|
|
+ bool update)
|
|
|
{
|
|
|
if (!peer->hw_txrx_stats_en) {
|
|
|
- DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
|
|
|
- DP_STATS_INCC(peer, tx.tx_failed, 1,
|
|
|
- tx_status != HAL_TX_TQM_RR_FRAME_ACKED);
|
|
|
+ DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
|
|
|
+ DP_STATS_FLAT_INC(txrx_peer, tx_failed, 1,
|
|
|
+ tx_status != HAL_TX_TQM_RR_FRAME_ACKED);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
#else
|
|
|
-void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length,
|
|
|
- uint8_t tx_status, bool update)
|
|
|
+void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
|
|
|
+ uint32_t length, uint8_t tx_status,
|
|
|
+ bool update)
|
|
|
{
|
|
|
- DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
|
|
|
- DP_STATS_INCC(peer, tx.tx_failed, 1,
|
|
|
- tx_status != HAL_TX_TQM_RR_FRAME_ACKED);
|
|
|
+ DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
|
|
|
+ DP_STATS_FLAT_INC(txrx_peer, tx_failed, 1,
|
|
|
+ tx_status != HAL_TX_TQM_RR_FRAME_ACKED);
|
|
|
}
|
|
|
#endif
|
|
|
|
|
@@ -4594,8 +4595,8 @@ dp_tx_comp_process_desc_list(struct dp_soc *soc,
|
|
|
if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
|
|
|
struct dp_pdev *pdev = desc->pdev;
|
|
|
|
|
|
- if (qdf_likely(peer))
|
|
|
- dp_tx_update_peer_basic_stats(peer,
|
|
|
+ if (qdf_likely(txrx_peer))
|
|
|
+ dp_tx_update_peer_basic_stats(txrx_peer,
|
|
|
desc->length,
|
|
|
desc->tx_status,
|
|
|
false);
|