qcacmn: Move per packet stats params to txrx_peer

Move the stats parameters from the dp_peer which are used
in per packet path to txrx_peer

Change-Id: Ieb68b6950740791b37bbd2bfdc4815c3d7bc4834
CRs-Fixed: 3095637
This commit is contained in:
Pavankumar Nandeshwar
2021-12-13 02:44:55 -08:00
committed by Madan Koyyalamudi
parent 98b25a2ee6
commit b9038e9d4e
16 changed files with 202 additions and 163 deletions

View File

@@ -1062,17 +1062,6 @@ struct cdp_delay_tid_stats {
struct cdp_delay_rx_stats rx_delay; struct cdp_delay_rx_stats rx_delay;
}; };
/*
* cdp_peer_ext_stats: Peer extended stats
* @delay_stats: Per TID delay stats
*/
struct cdp_peer_ext_stats {
struct cdp_delay_tid_stats delay_stats[CDP_MAX_DATA_TIDS]
[CDP_MAX_TXRX_CTX];
/*Customer can add MSDU level Tx/Rx stats */
};
/* struct cdp_pkt_info - packet info /* struct cdp_pkt_info - packet info
* @num: no of packets * @num: no of packets
* @bytes: total no of bytes * @bytes: total no of bytes

View File

@@ -729,7 +729,7 @@ done:
/* Update the flow tag in SKB based on FSE metadata */ /* Update the flow tag in SKB based on FSE metadata */
dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
reo_ring_num, tid_stats); reo_ring_num, tid_stats);
if (qdf_unlikely(vdev->mesh_vdev)) { if (qdf_unlikely(vdev->mesh_vdev)) {
@@ -744,7 +744,8 @@ done:
nbuf = next; nbuf = next;
continue; continue;
} }
dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr,
txrx_peer);
} }
if (qdf_likely(vdev->rx_decap_type == if (qdf_likely(vdev->rx_decap_type ==
@@ -774,7 +775,9 @@ done:
DP_RX_LIST_APPEND(deliver_list_head, DP_RX_LIST_APPEND(deliver_list_head,
deliver_list_tail, deliver_list_tail,
nbuf); nbuf);
DP_PEER_TO_STACK_INCC_PKT(peer, 1, QDF_NBUF_CB_RX_PKT_LEN(nbuf),
DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
QDF_NBUF_CB_RX_PKT_LEN(nbuf),
enh_flag); enh_flag);
if (qdf_unlikely(txrx_peer->in_twt)) if (qdf_unlikely(txrx_peer->in_twt))
DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1, DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1,

View File

@@ -280,9 +280,9 @@ void dp_tx_process_htt_completion_be(struct dp_soc *soc,
txrx_peer = dp_txrx_peer_get_ref_by_id(soc, ts.peer_id, txrx_peer = dp_txrx_peer_get_ref_by_id(soc, ts.peer_id,
&txrx_ref_handle, &txrx_ref_handle,
DP_MOD_ID_HTT_COMP); DP_MOD_ID_HTT_COMP);
if (qdf_likely(peer)) if (qdf_likely(txrx_peer))
dp_tx_update_peer_basic_stats( dp_tx_update_peer_basic_stats(
peer, txrx_peer,
qdf_nbuf_len(tx_desc->nbuf), qdf_nbuf_len(tx_desc->nbuf),
tx_status, tx_status,
pdev->enhanced_stats_en); pdev->enhanced_stats_en);

View File

@@ -743,6 +743,12 @@ void DP_PRINT_STATS(const char *fmt, ...);
_handle->stats._field += _delta; \ _handle->stats._field += _delta; \
} }
#define DP_STATS_FLAT_INC(_handle, _field, _delta) \
{ \
if (likely(_handle)) \
_handle->_field += _delta; \
}
#define DP_STATS_INCC(_handle, _field, _delta, _cond) \ #define DP_STATS_INCC(_handle, _field, _delta, _cond) \
{ \ { \
if (_cond && likely(_handle)) \ if (_cond && likely(_handle)) \
@@ -755,6 +761,12 @@ void DP_PRINT_STATS(const char *fmt, ...);
_handle->stats._field -= _delta; \ _handle->stats._field -= _delta; \
} }
#define DP_STATS_FLAT_DEC(_handle, _field, _delta) \
{ \
if (likely(_handle)) \
_handle->_field -= _delta; \
}
#define DP_STATS_UPD(_handle, _field, _delta) \ #define DP_STATS_UPD(_handle, _field, _delta) \
{ \ { \
if (likely(_handle)) \ if (likely(_handle)) \
@@ -767,6 +779,12 @@ void DP_PRINT_STATS(const char *fmt, ...);
DP_STATS_INC(_handle, _field.bytes, _bytes) \ DP_STATS_INC(_handle, _field.bytes, _bytes) \
} }
#define DP_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) \
{ \
DP_STATS_FLAT_INC(_handle, _field.num, _count); \
DP_STATS_FLAT_INC(_handle, _field.bytes, _bytes) \
}
#define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \ #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \
{ \ { \
DP_STATS_INCC(_handle, _field.num, _count, _cond); \ DP_STATS_INCC(_handle, _field.num, _count, _cond); \
@@ -791,10 +809,13 @@ void DP_PRINT_STATS(const char *fmt, ...);
#else #else
#define DP_STATS_INC(_handle, _field, _delta) #define DP_STATS_INC(_handle, _field, _delta)
#define DP_STATS_FLAT_INC(_handle, _field, _delta)
#define DP_STATS_INCC(_handle, _field, _delta, _cond) #define DP_STATS_INCC(_handle, _field, _delta, _cond)
#define DP_STATS_DEC(_handle, _field, _delta) #define DP_STATS_DEC(_handle, _field, _delta)
#define DP_STATS_FLAT_DEC(_handle, _field, _delta)
#define DP_STATS_UPD(_handle, _field, _delta) #define DP_STATS_UPD(_handle, _field, _delta)
#define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes)
#define DP_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes)
#define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond)
#define DP_STATS_AGGR(_handle_a, _handle_b, _field) #define DP_STATS_AGGR(_handle_a, _handle_b, _field)
#define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field)
@@ -805,62 +826,62 @@ void DP_PRINT_STATS(const char *fmt, ...);
#define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \
{ \ { \
if (!(_handle->hw_txrx_stats_en) || _cond) \ if (!(_handle->hw_txrx_stats_en) || _cond) \
DP_STATS_INC_PKT(_handle, rx.to_stack, _count, _bytes); \ DP_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \
} }
#define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \
{ \ { \
if (!(_handle->hw_txrx_stats_en) || _cond) \ if (!(_handle->hw_txrx_stats_en) || _cond) \
DP_STATS_DEC(_handle, rx.to_stack.num, _count); \ DP_STATS_FLAT_DEC(_handle, to_stack.num, _count); \
} }
#define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \ #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \
{ \ { \
if (!(_handle->hw_txrx_stats_en) || _cond) \ if (!(_handle->hw_txrx_stats_en) || _cond) \
DP_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes); \ DP_STATS_FLAT_INC_PKT(_handle, multicast, _count, _bytes); \
} }
#define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \ #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \
{ \ { \
if (!(_handle->hw_txrx_stats_en) || _cond) \ if (!(_handle->hw_txrx_stats_en) || _cond) \
DP_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes); \ DP_STATS_FLAT_INC_PKT(_handle, bcast, _count, _bytes); \
} }
#elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
#define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \
{ \ { \
if (!(_handle->hw_txrx_stats_en)) \ if (!(_handle->hw_txrx_stats_en)) \
DP_STATS_INC_PKT(_handle, rx.to_stack, _count, _bytes); \ DP_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \
} }
#define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \
{ \ { \
if (!(_handle->hw_txrx_stats_en)) \ if (!(_handle->hw_txrx_stats_en)) \
DP_STATS_DEC(_handle, rx.to_stack.num, _count); \ DP_STATS_FLAT_DEC(_handle, to_stack.num, _count); \
} }
#define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \ #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \
{ \ { \
if (!(_handle->hw_txrx_stats_en)) \ if (!(_handle->hw_txrx_stats_en)) \
DP_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes); \ DP_STATS_FLAT_INC_PKT(_handle, multicast, _count, _bytes); \
} }
#define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \ #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \
{ \ { \
if (!(_handle->hw_txrx_stats_en)) \ if (!(_handle->hw_txrx_stats_en)) \
DP_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes); \ DP_STATS_FLAT_INC_PKT(_handle, bcast, _count, _bytes); \
} }
#else #else
#define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \
DP_STATS_INC_PKT(_handle, rx.to_stack, _count, _bytes); DP_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes);
#define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \
DP_STATS_DEC(_handle, rx.to_stack.num, _count); DP_STATS_FLAT_DEC(_handle, to_stack.num, _count);
#define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \ #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \
DP_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes); DP_STATS_FLAT_INC_PKT(_handle, multicast, _count, _bytes);
#define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \ #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \
DP_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes); DP_STATS_FLAT_INC_PKT(_handle, bcast, _count, _bytes);
#endif #endif
#ifdef ENABLE_DP_HIST_STATS #ifdef ENABLE_DP_HIST_STATS

View File

@@ -6823,6 +6823,10 @@ static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer)
peer->txrx_peer = NULL; peer->txrx_peer = NULL;
dp_peer_defrag_rx_tids_deinit(txrx_peer); dp_peer_defrag_rx_tids_deinit(txrx_peer);
/*
* Deallocate the extended stats contenxt
*/
dp_peer_delay_stats_ctx_dealloc(soc, txrx_peer);
dp_peer_rx_bufq_resources_deinit(txrx_peer); dp_peer_rx_bufq_resources_deinit(txrx_peer);
qdf_mem_free(txrx_peer); qdf_mem_free(txrx_peer);
@@ -6844,8 +6848,20 @@ static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
/* initialize the peer_id */ /* initialize the peer_id */
txrx_peer->vdev = peer->vdev; txrx_peer->vdev = peer->vdev;
dp_wds_ext_peer_init(peer); dp_wds_ext_peer_init(txrx_peer);
dp_peer_rx_bufq_resources_init(txrx_peer); dp_peer_rx_bufq_resources_init(txrx_peer);
dp_peer_hw_txrx_stats_init(soc, txrx_peer);
/*
* Allocate peer extended stats context. Fall through in
* case of failure as its not an implicit requirement to have
* this object for regular statistics updates.
*/
if (dp_peer_delay_stats_ctx_alloc(soc, txrx_peer) !=
QDF_STATUS_SUCCESS)
dp_warn("peer ext_stats ctx alloc failed");
dp_set_peer_isolation(txrx_peer, false);
dp_peer_defrag_rx_tids_init(txrx_peer); dp_peer_defrag_rx_tids_init(txrx_peer);
dp_txrx_peer_attach_add(soc, peer, txrx_peer); dp_txrx_peer_attach_add(soc, peer, txrx_peer);
@@ -8818,16 +8834,17 @@ void dp_print_napi_stats(struct dp_soc *soc)
#ifdef QCA_PEER_EXT_STATS #ifdef QCA_PEER_EXT_STATS
/** /**
* dp_txrx_host_peer_ext_stats_clr: Reinitialize the txrx peer ext stats * dp_txrx_host_peer_delay_stats_clr: Reinitialize the txrx peer delay stats
* *
*/ */
static inline void dp_txrx_host_peer_ext_stats_clr(struct dp_peer *peer) static inline void dp_txrx_host_peer_delay_stats_clr(struct dp_peer *peer)
{ {
if (peer->pext_stats) if (peer->txrx_peer->delay_stats)
qdf_mem_zero(peer->pext_stats, sizeof(*peer->pext_stats)); qdf_mem_zero(peer->txrx_peer->delay_stats,
sizeof(struct dp_peer_delay_stats));
} }
#else #else
static inline void dp_txrx_host_peer_ext_stats_clr(struct dp_peer *peer) static inline void dp_txrx_host_peer_delay_stats_clr(struct dp_peer *peer)
{ {
} }
#endif #endif
@@ -8855,7 +8872,7 @@ dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
DP_STATS_CLR(peer); DP_STATS_CLR(peer);
dp_txrx_host_peer_ext_stats_clr(peer); dp_txrx_host_peer_delay_stats_clr(peer);
#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc, dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
@@ -9465,7 +9482,7 @@ static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
struct dp_peer *peer = NULL; struct dp_peer *peer = NULL;
uint16_t peer_id, ring_id; uint16_t peer_id, ring_id;
uint8_t tid = qdf_nbuf_get_tid_val(nbuf); uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
struct cdp_peer_ext_stats *pext_stats = NULL; struct dp_peer_delay_stats *delay_stats = NULL;
peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
if (peer_id > soc->max_peer_id) if (peer_id > soc->max_peer_id)
@@ -9475,10 +9492,10 @@ static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
if (qdf_unlikely(!peer)) if (qdf_unlikely(!peer))
return; return;
if (qdf_likely(peer->pext_stats)) { if (qdf_likely(peer->txrx_peer->delay_stats)) {
pext_stats = peer->pext_stats; delay_stats = peer->txrx_peer->delay_stats;
ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
dp_rx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id], dp_rx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
nbuf); nbuf);
} }
dp_peer_unref_delete(peer, DP_MOD_ID_CDP); dp_peer_unref_delete(peer, DP_MOD_ID_CDP);

View File

@@ -4637,23 +4637,23 @@ dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
#ifdef QCA_PEER_EXT_STATS #ifdef QCA_PEER_EXT_STATS
/* /*
* dp_peer_ext_stats_ctx_alloc() - Allocate peer ext * dp_peer_delay_stats_ctx_alloc() - Allocate peer delay
* stats content * stats content
* @soc: DP SoC context * @soc: DP SoC context
* @peer: DP peer context * @txrx_peer: DP txrx peer context
* *
* Allocate the peer extended stats context * Allocate the peer delay stats context
* *
* Return: QDF_STATUS_SUCCESS if allocation is * Return: QDF_STATUS_SUCCESS if allocation is
* successful * successful
*/ */
QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc, QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
struct dp_peer *peer) struct dp_txrx_peer *txrx_peer)
{ {
uint8_t tid, ctx_id; uint8_t tid, ctx_id;
if (!soc || !peer) { if (!soc || !txrx_peer) {
dp_warn("Null soc%pK or peer%pK", soc, peer); dp_warn("Null soc%pK or peer%pK", soc, txrx_peer);
return QDF_STATUS_E_INVAL; return QDF_STATUS_E_INVAL;
} }
@@ -4663,8 +4663,9 @@ QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
/* /*
* Allocate memory for peer extended stats. * Allocate memory for peer extended stats.
*/ */
peer->pext_stats = qdf_mem_malloc(sizeof(struct cdp_peer_ext_stats)); txrx_peer->delay_stats =
if (!peer->pext_stats) { qdf_mem_malloc(sizeof(struct dp_peer_delay_stats));
if (!txrx_peer->delay_stats) {
dp_err("Peer extended stats obj alloc failed!!"); dp_err("Peer extended stats obj alloc failed!!");
return QDF_STATUS_E_NOMEM; return QDF_STATUS_E_NOMEM;
} }
@@ -4672,9 +4673,9 @@ QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) { for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) { for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) {
struct cdp_delay_tx_stats *tx_delay = struct cdp_delay_tx_stats *tx_delay =
&peer->pext_stats->delay_stats[tid][ctx_id].tx_delay; &txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].tx_delay;
struct cdp_delay_rx_stats *rx_delay = struct cdp_delay_rx_stats *rx_delay =
&peer->pext_stats->delay_stats[tid][ctx_id].rx_delay; &txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].rx_delay;
dp_hist_init(&tx_delay->tx_swq_delay, dp_hist_init(&tx_delay->tx_swq_delay,
CDP_HIST_TYPE_SW_ENQEUE_DELAY); CDP_HIST_TYPE_SW_ENQEUE_DELAY);
@@ -4689,16 +4690,17 @@ QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
} }
/* /*
* dp_peer_ext_stats_ctx_dealloc() - Dealloc the peer context * dp_peer_delay_stats_ctx_dealloc() - Dealloc the peer delay stats context
* @peer: DP peer context * @txrx_peer: txrx DP peer context
* *
* Free the peer extended stats context * Free the peer delay stats context
* *
* Return: Void * Return: Void
*/ */
void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc, struct dp_peer *peer) void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
struct dp_txrx_peer *txrx_peer)
{ {
if (!peer) { if (!txrx_peer) {
dp_warn("peer_ext dealloc failed due to NULL peer object"); dp_warn("peer_ext dealloc failed due to NULL peer object");
return; return;
} }
@@ -4706,11 +4708,11 @@ void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc, struct dp_peer *peer)
if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)) if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
return; return;
if (!peer->pext_stats) if (!txrx_peer->delay_stats)
return; return;
qdf_mem_free(peer->pext_stats); qdf_mem_free(txrx_peer->delay_stats);
peer->pext_stats = NULL; txrx_peer->delay_stats = NULL;
} }
#endif #endif

View File

@@ -886,19 +886,21 @@ void dp_rx_tid_delete_cb(struct dp_soc *soc,
union hal_reo_status *reo_status); union hal_reo_status *reo_status);
#ifdef QCA_PEER_EXT_STATS #ifdef QCA_PEER_EXT_STATS
QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc, QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
struct dp_peer *peer); struct dp_txrx_peer *txrx_peer);
void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc, void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
struct dp_peer *peer); struct dp_txrx_peer *txrx_peer);
#else #else
static inline QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc, static inline
struct dp_peer *peer) QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
struct dp_txrx_peer *txrx_peer)
{ {
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
} }
static inline void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc, static inline
struct dp_peer *peer) void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
struct dp_txrx_peer *txrx_peer)
{ {
} }
#endif #endif

View File

@@ -774,7 +774,7 @@ dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf)); DP_STATS_INC_PKT(txrx_peer, rx.raw, 1, qdf_nbuf_len(nbuf));
/* /*
* reset the chfrag_start and chfrag_end bits in nbuf cb * reset the chfrag_start and chfrag_end bits in nbuf cb
* as this is a non-amsdu pkt and RAW mode simulation expects * as this is a non-amsdu pkt and RAW mode simulation expects
@@ -1077,7 +1077,7 @@ QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
#else #else
void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, struct dp_peer *peer) uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer)
{ {
} }
@@ -1917,7 +1917,7 @@ dp_rx_validate_rx_callbacks(struct dp_soc *soc,
} else { } else {
num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev,
nbuf_head); nbuf_head);
DP_PEER_TO_STACK_DECC(peer, num_nbuf, DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf,
vdev->pdev->enhanced_stats_en); vdev->pdev->enhanced_stats_en);
} }
return QDF_STATUS_E_FAILURE; return QDF_STATUS_E_FAILURE;
@@ -1994,7 +1994,7 @@ QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
* @soc: core txrx main context * @soc: core txrx main context
* @nbuf: pointer to the first msdu of an amsdu. * @nbuf: pointer to the first msdu of an amsdu.
* @rx_tlv_hdr: pointer to the start of RX TLV headers. * @rx_tlv_hdr: pointer to the start of RX TLV headers.
* @peer: pointer to the peer object. * @txrx_peer: pointer to the txrx peer object.
* @ring_id: reo dest ring number on which pkt is reaped. * @ring_id: reo dest ring number on which pkt is reaped.
* @tid_stats: per tid rx stats. * @tid_stats: per tid rx stats.
* *
@@ -2002,26 +2002,25 @@ QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
* Return: void * Return: void
*/ */
void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, struct dp_peer *peer, uint8_t *rx_tlv_hdr,
struct dp_txrx_peer *txrx_peer,
uint8_t ring_id, uint8_t ring_id,
struct cdp_tid_rx_stats *tid_stats) struct cdp_tid_rx_stats *tid_stats)
{ {
bool is_ampdu, is_not_amsdu; bool is_ampdu, is_not_amsdu;
uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
struct dp_vdev *vdev = peer->vdev; struct dp_vdev *vdev = txrx_peer->vdev;
bool enh_flag; bool enh_flag;
qdf_ether_header_t *eh; qdf_ether_header_t *eh;
uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, peer); dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer);
is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
qdf_nbuf_is_rx_chfrag_end(nbuf); qdf_nbuf_is_rx_chfrag_end(nbuf);
DP_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1, msdu_len);
DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len); DP_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1, is_not_amsdu);
DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu); DP_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, !is_not_amsdu);
DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu);
DP_STATS_INCC(peer, rx.rx_retries, 1, qdf_nbuf_is_rx_retry_flag(nbuf)); DP_STATS_INCC(peer, rx.rx_retries, 1, qdf_nbuf_is_rx_retry_flag(nbuf));
tid_stats->msdu_cnt++; tid_stats->msdu_cnt++;
if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) && if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
@@ -2051,7 +2050,6 @@ void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr); is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr);
DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu); DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr); sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr);
mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr); mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
tid = qdf_nbuf_get_tid_val(nbuf); tid = qdf_nbuf_get_tid_val(nbuf);
@@ -2060,7 +2058,6 @@ void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
rx_tlv_hdr); rx_tlv_hdr);
nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr); pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
DP_STATS_INCC(peer, rx.rx_mpdu_cnt[mcs], 1, DP_STATS_INCC(peer, rx.rx_mpdu_cnt[mcs], 1,
((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf))); ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
DP_STATS_INCC(peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1, DP_STATS_INCC(peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1,
@@ -2074,7 +2071,6 @@ void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
pkt_type == DOT11_AC || pkt_type == DOT11_AC ||
pkt_type == DOT11_AX)) pkt_type == DOT11_AX))
DP_STATS_INC(peer, rx.nss[nss - 1], 1); DP_STATS_INC(peer, rx.nss[nss - 1], 1);
DP_STATS_INC(peer, rx.sgi_count[sgi], 1); DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
DP_STATS_INCC(peer, rx.err.mic_err, 1, DP_STATS_INCC(peer, rx.err.mic_err, 1,
hal_rx_tlv_mic_err_get(soc->hal_soc, rx_tlv_hdr)); hal_rx_tlv_mic_err_get(soc->hal_soc, rx_tlv_hdr));

View File

@@ -1491,7 +1491,7 @@ dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
uint32_t mac_id, uint32_t quota); uint32_t mac_id, uint32_t quota);
void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, struct dp_peer *peer); uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer);
QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr); uint8_t *rx_tlv_hdr);
@@ -1899,7 +1899,7 @@ void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
#endif #endif
void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, struct dp_peer *peer, uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer,
uint8_t ring_id, uint8_t ring_id,
struct cdp_tid_rx_stats *tid_stats); struct cdp_tid_rx_stats *tid_stats);

View File

@@ -1065,7 +1065,8 @@ dp_2k_jump_handle(struct dp_soc *soc,
IEEE80211_REASON_QOS_SETUP_REQUIRED; IEEE80211_REASON_QOS_SETUP_REQUIRED;
qdf_spin_unlock_bh(&rx_tid->tid_lock); qdf_spin_unlock_bh(&rx_tid->tid_lock);
if (soc->cdp_soc.ol_ops->send_delba) { if (soc->cdp_soc.ol_ops->send_delba) {
DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent, 1); DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent,
1);
soc->cdp_soc.ol_ops->send_delba( soc->cdp_soc.ol_ops->send_delba(
peer->vdev->pdev->soc->ctrl_psoc, peer->vdev->pdev->soc->ctrl_psoc,
peer->vdev->vdev_id, peer->vdev->vdev_id,
@@ -1403,8 +1404,7 @@ dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
if ((!soc->mec_fw_offload) && if ((!soc->mec_fw_offload) &&
dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) { dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) {
/* this is a looped back MCBC pkt, drop it */ /* this is a looped back MCBC pkt, drop it */
DP_STATS_INC_PKT(peer, rx.mec_drop, 1, DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
qdf_nbuf_len(nbuf));
goto drop_nbuf; goto drop_nbuf;
} }
@@ -1488,7 +1488,7 @@ dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
} else { } else {
enh_flag = vdev->pdev->enhanced_stats_en; enh_flag = vdev->pdev->enhanced_stats_en;
qdf_nbuf_set_next(nbuf, NULL); qdf_nbuf_set_next(nbuf, NULL);
DP_PEER_TO_STACK_INCC_PKT(peer, 1, qdf_nbuf_len(nbuf), DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
enh_flag); enh_flag);
/* /*
* Update the protocol tag in SKB based on * Update the protocol tag in SKB based on
@@ -1506,7 +1506,7 @@ dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
soc->hal_soc, rx_tlv_hdr) && soc->hal_soc, rx_tlv_hdr) &&
(vdev->rx_decap_type == (vdev->rx_decap_type ==
htt_cmn_pkt_type_ethernet))) { htt_cmn_pkt_type_ethernet))) {
DP_PEER_MC_INCC_PKT(peer, 1, qdf_nbuf_len(nbuf), DP_PEER_MC_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
enh_flag); enh_flag);
if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
@@ -1659,7 +1659,7 @@ process_mesh:
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
return; return;
} }
dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, txrx_peer);
} }
process_rx: process_rx:
if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
@@ -1684,7 +1684,7 @@ process_rx:
EXCEPTION_DEST_RING_ID, true, true); EXCEPTION_DEST_RING_ID, true, true);
/* Update the flow tag in SKB based on FSE metadata */ /* Update the flow tag in SKB based on FSE metadata */
dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
DP_STATS_INC(peer, rx.to_stack.num, 1); DP_STATS_FLAT_INC(txrx_peer, to_stack.num, 1);
qdf_nbuf_set_exc_frame(nbuf, 1); qdf_nbuf_set_exc_frame(nbuf, 1);
dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL); dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
} }
@@ -1906,7 +1906,8 @@ dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
/* Update the flow tag in SKB based on FSE metadata */ /* Update the flow tag in SKB based on FSE metadata */
dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
true); true);
DP_PEER_TO_STACK_INCC_PKT(peer, 1, qdf_nbuf_len(nbuf), DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
qdf_nbuf_len(nbuf),
vdev->pdev->enhanced_stats_en); vdev->pdev->enhanced_stats_en);
qdf_nbuf_set_exc_frame(nbuf, 1); qdf_nbuf_set_exc_frame(nbuf, 1);
qdf_nbuf_set_next(nbuf, NULL); qdf_nbuf_set_next(nbuf, NULL);
@@ -2809,8 +2810,8 @@ done:
case HAL_REO_ERR_PN_CHECK_FAILED: case HAL_REO_ERR_PN_CHECK_FAILED:
case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET: case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
if (peer) if (txrx_peer)
DP_STATS_INC(peer, DP_STATS_INC(txrx_peer,
rx.err.pn_err, 1); rx.err.pn_err, 1);
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
break; break;

View File

@@ -5769,7 +5769,7 @@ static void dp_accumulate_delay_tid_stats(struct dp_soc *soc,
void dp_peer_print_tx_delay_stats(struct dp_pdev *pdev, void dp_peer_print_tx_delay_stats(struct dp_pdev *pdev,
struct dp_peer *peer) struct dp_peer *peer)
{ {
struct cdp_peer_ext_stats *pext_stats; struct dp_peer_delay_stats *delay_stats;
struct dp_soc *soc = NULL; struct dp_soc *soc = NULL;
struct cdp_hist_stats hist_stats; struct cdp_hist_stats hist_stats;
uint8_t tid; uint8_t tid;
@@ -5781,22 +5781,22 @@ void dp_peer_print_tx_delay_stats(struct dp_pdev *pdev,
if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)) if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
return; return;
pext_stats = peer->pext_stats; delay_stats = peer->txrx_peer->delay_stats;
if (!pext_stats) if (!delay_stats)
return; return;
for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) { for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
DP_PRINT_STATS("----TID: %d----", tid); DP_PRINT_STATS("----TID: %d----", tid);
DP_PRINT_STATS("Software Enqueue Delay:"); DP_PRINT_STATS("Software Enqueue Delay:");
qdf_mem_zero(&hist_stats, sizeof(*(&hist_stats))); qdf_mem_zero(&hist_stats, sizeof(*(&hist_stats)));
dp_accumulate_delay_tid_stats(soc, pext_stats->delay_stats, dp_accumulate_delay_tid_stats(soc, delay_stats->delay_tid_stats,
&hist_stats, tid, &hist_stats, tid,
CDP_HIST_TYPE_SW_ENQEUE_DELAY); CDP_HIST_TYPE_SW_ENQEUE_DELAY);
dp_print_hist_stats(&hist_stats, CDP_HIST_TYPE_SW_ENQEUE_DELAY); dp_print_hist_stats(&hist_stats, CDP_HIST_TYPE_SW_ENQEUE_DELAY);
qdf_mem_zero(&hist_stats, sizeof(*(&hist_stats))); qdf_mem_zero(&hist_stats, sizeof(*(&hist_stats)));
DP_PRINT_STATS("Hardware Transmission Delay:"); DP_PRINT_STATS("Hardware Transmission Delay:");
dp_accumulate_delay_tid_stats(soc, pext_stats->delay_stats, dp_accumulate_delay_tid_stats(soc, delay_stats->delay_tid_stats,
&hist_stats, tid, &hist_stats, tid,
CDP_HIST_TYPE_HW_COMP_DELAY); CDP_HIST_TYPE_HW_COMP_DELAY);
dp_print_hist_stats(&hist_stats, CDP_HIST_TYPE_HW_COMP_DELAY); dp_print_hist_stats(&hist_stats, CDP_HIST_TYPE_HW_COMP_DELAY);
@@ -5813,7 +5813,7 @@ void dp_peer_print_tx_delay_stats(struct dp_pdev *pdev,
void dp_peer_print_rx_delay_stats(struct dp_pdev *pdev, void dp_peer_print_rx_delay_stats(struct dp_pdev *pdev,
struct dp_peer *peer) struct dp_peer *peer)
{ {
struct cdp_peer_ext_stats *pext_stats; struct dp_peer_delay_stats *delay_stats;
struct dp_soc *soc = NULL; struct dp_soc *soc = NULL;
struct cdp_hist_stats hist_stats; struct cdp_hist_stats hist_stats;
uint8_t tid; uint8_t tid;
@@ -5825,15 +5825,15 @@ void dp_peer_print_rx_delay_stats(struct dp_pdev *pdev,
if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)) if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
return; return;
pext_stats = peer->pext_stats; delay_stats = peer->txrx_peer->delay_stats;
if (!pext_stats) if (!delay_stats)
return; return;
for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) { for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
DP_PRINT_STATS("----TID: %d----", tid); DP_PRINT_STATS("----TID: %d----", tid);
DP_PRINT_STATS("Rx Reap2stack Deliver Delay:"); DP_PRINT_STATS("Rx Reap2stack Deliver Delay:");
qdf_mem_zero(&hist_stats, sizeof(*(&hist_stats))); qdf_mem_zero(&hist_stats, sizeof(*(&hist_stats)));
dp_accumulate_delay_tid_stats(soc, pext_stats->delay_stats, dp_accumulate_delay_tid_stats(soc, delay_stats->delay_tid_stats,
&hist_stats, tid, &hist_stats, tid,
CDP_HIST_TYPE_REAP_STACK); CDP_HIST_TYPE_REAP_STACK);
dp_print_hist_stats(&hist_stats, CDP_HIST_TYPE_REAP_STACK); dp_print_hist_stats(&hist_stats, CDP_HIST_TYPE_REAP_STACK);
@@ -5963,9 +5963,9 @@ void dp_print_peer_stats(struct dp_peer *peer)
DP_PRINT_STATS("Node Tx Stats:\n"); DP_PRINT_STATS("Node Tx Stats:\n");
DP_PRINT_STATS("Total Packet Completions = %d", DP_PRINT_STATS("Total Packet Completions = %d",
peer->stats.tx.comp_pkt.num); peer->txrx_peer->comp_pkt.num);
DP_PRINT_STATS("Total Bytes Completions = %llu", DP_PRINT_STATS("Total Bytes Completions = %llu",
peer->stats.tx.comp_pkt.bytes); peer->txrx_peer->comp_pkt.bytes);
DP_PRINT_STATS("Success Packets = %d", DP_PRINT_STATS("Success Packets = %d",
peer->stats.tx.tx_success.num); peer->stats.tx.tx_success.num);
DP_PRINT_STATS("Success Bytes = %llu", DP_PRINT_STATS("Success Bytes = %llu",
@@ -6088,9 +6088,9 @@ void dp_print_peer_stats(struct dp_peer *peer)
DP_PRINT_STATS("Node Rx Stats:"); DP_PRINT_STATS("Node Rx Stats:");
DP_PRINT_STATS("Packets Sent To Stack = %d", DP_PRINT_STATS("Packets Sent To Stack = %d",
peer->stats.rx.to_stack.num); peer->txrx_peer->to_stack.num);
DP_PRINT_STATS("Bytes Sent To Stack = %llu", DP_PRINT_STATS("Bytes Sent To Stack = %llu",
peer->stats.rx.to_stack.bytes); peer->txrx_peer->to_stack.bytes);
for (i = 0; i < CDP_MAX_RX_RINGS; i++) { for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
DP_PRINT_STATS("Ring Id = %d", i); DP_PRINT_STATS("Ring Id = %d", i);
DP_PRINT_STATS(" Packets Received = %d", DP_PRINT_STATS(" Packets Received = %d",

View File

@@ -3690,8 +3690,8 @@ static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
} }
/* /*
* dp_tx_update_peer_ext_stats() - Update the peer extended stats * dp_tx_update_peer_delay_stats() - Update the peer delay stats
* @peer: DP peer context * @txrx_peer: DP peer context
* @tx_desc: Tx software descriptor * @tx_desc: Tx software descriptor
* @tid: Transmission ID * @tid: Transmission ID
* @ring_id: Rx CPU context ID/CPU_ID * @ring_id: Rx CPU context ID/CPU_ID
@@ -3701,21 +3701,21 @@ static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
* *
* Return: void * Return: void
*/ */
static void dp_tx_update_peer_ext_stats(struct dp_peer *peer, static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
struct dp_tx_desc_s *tx_desc, struct dp_tx_desc_s *tx_desc,
uint8_t tid, uint8_t ring_id) uint8_t tid, uint8_t ring_id)
{ {
struct dp_pdev *pdev = peer->vdev->pdev; struct dp_pdev *pdev = txrx_peer->vdev->pdev;
struct dp_soc *soc = NULL; struct dp_soc *soc = NULL;
struct cdp_peer_ext_stats *pext_stats = NULL; struct dp_peer_delay_stats *delay_stats = NULL;
soc = pdev->soc; soc = pdev->soc;
if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))) if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
return; return;
pext_stats = peer->pext_stats; delay_stats = txrx_peer->delay_stats;
qdf_assert(pext_stats); qdf_assert(delay_stats);
qdf_assert(ring < CDP_MAX_TXRX_CTX); qdf_assert(ring < CDP_MAX_TXRX_CTX);
/* /*
@@ -3724,13 +3724,13 @@ static void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
tid = CDP_MAX_DATA_TIDS - 1; tid = CDP_MAX_DATA_TIDS - 1;
dp_tx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id], dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
tx_desc); tx_desc);
} }
#else #else
static inline void dp_tx_update_peer_ext_stats(struct dp_peer *peer, static inline void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
struct dp_tx_desc_s *tx_desc, struct dp_tx_desc_s *tx_desc,
uint8_t tid, uint8_t ring_id) uint8_t tid, uint8_t ring_id)
{ {
} }
#endif #endif
@@ -3817,9 +3817,9 @@ dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer)
static inline void static inline void
dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc, dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
struct hal_tx_completion_status *ts, struct hal_tx_completion_status *ts,
struct dp_peer *peer, uint8_t ring_id) struct dp_txrx_peer *txrx_peer, uint8_t ring_id)
{ {
struct dp_pdev *pdev = peer->vdev->pdev; struct dp_pdev *pdev = txrx_peer->vdev->pdev;
struct dp_soc *soc = NULL; struct dp_soc *soc = NULL;
uint8_t mcs, pkt_type; uint8_t mcs, pkt_type;
uint8_t tid = ts->tid; uint8_t tid = ts->tid;
@@ -3844,10 +3844,10 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
} }
length = qdf_nbuf_len(tx_desc->nbuf); length = qdf_nbuf_len(tx_desc->nbuf);
DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length); DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
if (qdf_unlikely(pdev->delay_stats_flag)) if (qdf_unlikely(pdev->delay_stats_flag))
dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id); dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
DP_STATS_INCC(peer, tx.dropped.age_out, 1, DP_STATS_INCC(peer, tx.dropped.age_out, 1,
(ts->status == HAL_TX_TQM_RR_REM_CMD_AGED)); (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
@@ -3868,7 +3868,6 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1, DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
(ts->status == HAL_TX_TQM_RR_FW_REASON3)); (ts->status == HAL_TX_TQM_RR_FW_REASON3));
/* /*
* tx_failed is ideally supposed to be updated from HTT ppdu completion * tx_failed is ideally supposed to be updated from HTT ppdu completion
* stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
@@ -3876,14 +3875,14 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
* data path. Please note that if tx_failed is fixed to be from ppdu, * data path. Please note that if tx_failed is fixed to be from ppdu,
* then this has to be removed * then this has to be removed
*/ */
peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num + txrx_peer->tx_failed =
peer->stats.tx.dropped.fw_rem.num +
peer->stats.tx.dropped.fw_rem_notx + peer->stats.tx.dropped.fw_rem_notx +
peer->stats.tx.dropped.fw_rem_tx + peer->stats.tx.dropped.fw_rem_tx +
peer->stats.tx.dropped.age_out + peer->stats.tx.dropped.age_out +
peer->stats.tx.dropped.fw_reason1 + peer->stats.tx.dropped.fw_reason1 +
peer->stats.tx.dropped.fw_reason2 + peer->stats.tx.dropped.fw_reason2 +
peer->stats.tx.dropped.fw_reason3; peer->stats.tx.dropped.fw_reason3;
if (ts->status < CDP_MAX_TX_TQM_STATUS) { if (ts->status < CDP_MAX_TX_TQM_STATUS) {
tid_stats->tqm_status_cnt[ts->status]++; tid_stats->tqm_status_cnt[ts->status]++;
} }
@@ -3894,6 +3893,7 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
dp_update_no_ack_stats(tx_desc->nbuf, peer); dp_update_no_ack_stats(tx_desc->nbuf, peer);
return; return;
} }
DP_STATS_INCC(peer, tx.retry_count, 1, ts->transmit_cnt > 1); DP_STATS_INCC(peer, tx.retry_count, 1, ts->transmit_cnt > 1);
DP_STATS_INCC(peer, tx.multiple_retry_count, 1, ts->transmit_cnt > 2); DP_STATS_INCC(peer, tx.multiple_retry_count, 1, ts->transmit_cnt > 2);
@@ -3902,14 +3902,12 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu); DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu); DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
/* /*
* Following Rate Statistics are updated from HTT PPDU events from FW. * Following Rate Statistics are updated from HTT PPDU events from FW.
* Return from here if HTT PPDU events are enabled. * Return from here if HTT PPDU events are enabled.
*/ */
if (!(soc->process_tx_status)) if (!(soc->process_tx_status))
return; return;
DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1, DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A))); ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1, DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
@@ -4050,7 +4048,7 @@ static inline void dp_tx_notify_completion(struct dp_soc *soc,
*/ */
#ifdef FEATURE_PERPKT_INFO #ifdef FEATURE_PERPKT_INFO
static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev, static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
struct dp_peer *peer, struct dp_txrx_peer *txrx_peer,
uint8_t tid, uint8_t tid,
uint64_t txdesc_ts, uint64_t txdesc_ts,
uint32_t ppdu_id) uint32_t ppdu_id)
@@ -4467,12 +4465,12 @@ void dp_tx_comp_process_tx_status(struct dp_soc *soc,
} }
} }
dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id); dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id);
dp_tx_update_peer_ext_stats(peer, tx_desc, ts->tid, ring_id); dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts->tid, ring_id);
#ifdef QCA_SUPPORT_RDK_STATS #ifdef QCA_SUPPORT_RDK_STATS
if (soc->rdkstats_enabled) if (soc->rdkstats_enabled)
dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid, dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
tx_desc->timestamp, tx_desc->timestamp,
ts->ppdu_id); ts->ppdu_id);
#endif #endif
@@ -4492,33 +4490,36 @@ out:
* *
* Return: none * Return: none
*/ */
void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length, void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
uint8_t tx_status, bool update) uint32_t length, uint8_t tx_status,
bool update)
{ {
if ((!peer->hw_txrx_stats_en) || update) { if ((!txrx_peer->hw_txrx_stats_en) || update) {
DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length); DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
DP_STATS_INCC(peer, tx.tx_failed, 1, DP_STATS_FLAT_INC(txrx_peer, tx_failed, 1,
tx_status != HAL_TX_TQM_RR_FRAME_ACKED); tx_status != HAL_TX_TQM_RR_FRAME_ACKED);
} }
} }
#elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length, void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
uint8_t tx_status, bool update) uint32_t length, uint8_t tx_status,
bool update)
{ {
if (!peer->hw_txrx_stats_en) { if (!peer->hw_txrx_stats_en) {
DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length); DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
DP_STATS_INCC(peer, tx.tx_failed, 1, DP_STATS_FLAT_INC(txrx_peer, tx_failed, 1,
tx_status != HAL_TX_TQM_RR_FRAME_ACKED); tx_status != HAL_TX_TQM_RR_FRAME_ACKED);
} }
} }
#else #else
void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length, void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
uint8_t tx_status, bool update) uint32_t length, uint8_t tx_status,
bool update)
{ {
DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length); DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
DP_STATS_INCC(peer, tx.tx_failed, 1, DP_STATS_FLAT_INC(txrx_peer, tx_failed, 1,
tx_status != HAL_TX_TQM_RR_FRAME_ACKED); tx_status != HAL_TX_TQM_RR_FRAME_ACKED);
} }
#endif #endif
@@ -4594,8 +4595,8 @@ dp_tx_comp_process_desc_list(struct dp_soc *soc,
if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) { if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
struct dp_pdev *pdev = desc->pdev; struct dp_pdev *pdev = desc->pdev;
if (qdf_likely(peer)) if (qdf_likely(txrx_peer))
dp_tx_update_peer_basic_stats(peer, dp_tx_update_peer_basic_stats(txrx_peer,
desc->length, desc->length,
desc->tx_status, desc->tx_status,
false); false);

View File

@@ -262,8 +262,9 @@ void dp_tx_inspect_handler(struct dp_soc *soc,
struct dp_vdev *vdev, struct dp_vdev *vdev,
struct dp_tx_desc_s *tx_desc, struct dp_tx_desc_s *tx_desc,
uint8_t *status); uint8_t *status);
void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length, void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
uint8_t tx_status, bool update); uint32_t length, uint8_t tx_status,
bool update);
#ifndef QCA_HOST_MODE_WIFI_DISABLED #ifndef QCA_HOST_MODE_WIFI_DISABLED
/** /**

View File

@@ -766,6 +766,11 @@ struct dp_reo_cmd_info {
TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem; TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem;
}; };
struct dp_peer_delay_stats {
struct cdp_delay_tid_stats delay_tid_stats[CDP_MAX_DATA_TIDS]
[CDP_MAX_TXRX_CTX];
};
/* Rx TID defrag*/ /* Rx TID defrag*/
struct dp_rx_tid_defrag { struct dp_rx_tid_defrag {
/* TID */ /* TID */
@@ -3367,6 +3372,9 @@ struct dp_txrx_peer {
mld_peer:1; /* MLD peer*/ mld_peer:1; /* MLD peer*/
uint32_t tx_failed; uint32_t tx_failed;
struct cdp_pkt_info comp_pkt;
struct cdp_pkt_info to_stack;
struct dp_peer_delay_stats *delay_stats;
struct { struct {
enum cdp_sec_type sec_type; enum cdp_sec_type sec_type;
@@ -3450,9 +3458,6 @@ struct dp_peer {
/* Peer Stats */ /* Peer Stats */
struct cdp_peer_stats stats; struct cdp_peer_stats stats;
/* Peer extended stats */
struct cdp_peer_ext_stats *pext_stats;
TAILQ_HEAD(, dp_ast_entry) ast_entry_list; TAILQ_HEAD(, dp_ast_entry) ast_entry_list;
/* TBD */ /* TBD */

View File

@@ -838,7 +838,8 @@ done:
nbuf = next; nbuf = next;
continue; continue;
} }
dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr,
txrx_peer);
} }
if (qdf_likely(vdev->rx_decap_type == if (qdf_likely(vdev->rx_decap_type ==
@@ -903,8 +904,8 @@ done:
DP_RX_LIST_APPEND(deliver_list_head, DP_RX_LIST_APPEND(deliver_list_head,
deliver_list_tail, deliver_list_tail,
nbuf); nbuf);
DP_STATS_INC_PKT(peer, rx.to_stack, 1, DP_STATS_FLAT_INC_PKT(txrx_peer, to_stack, 1,
QDF_NBUF_CB_RX_PKT_LEN(nbuf)); QDF_NBUF_CB_RX_PKT_LEN(nbuf));
if (qdf_unlikely(txrx_peer->in_twt)) if (qdf_unlikely(txrx_peer->in_twt))
DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1, DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1,
QDF_NBUF_CB_RX_PKT_LEN(nbuf)); QDF_NBUF_CB_RX_PKT_LEN(nbuf));

View File

@@ -192,11 +192,11 @@ void dp_tx_process_htt_completion_li(struct dp_soc *soc,
txrx_peer = dp_txrx_peer_get_ref_by_id(soc, ts.peer_id, txrx_peer = dp_txrx_peer_get_ref_by_id(soc, ts.peer_id,
&txrx_ref_handle, &txrx_ref_handle,
DP_MOD_ID_HTT_COMP); DP_MOD_ID_HTT_COMP);
if (qdf_likely(peer)) { if (qdf_likely(txrx_peer)) {
DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1,
qdf_nbuf_len(tx_desc->nbuf)); qdf_nbuf_len(tx_desc->nbuf));
DP_STATS_INCC(peer, tx.tx_failed, 1, DP_STATS_FLAT_INC(txrx_peer, tx_failed, 1,
tx_status != HTT_TX_FW2WBM_TX_STATUS_OK); tx_status != HTT_TX_FW2WBM_TX_STATUS_OK);
} }
dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer, dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer,