qcacmn: Move per packet stats params to txrx_peer

Move the stats parameters from the dp_peer which are used
in per packet path to txrx_peer

Change-Id: Ieb68b6950740791b37bbd2bfdc4815c3d7bc4834
CRs-Fixed: 3095637
This commit is contained in:
Pavankumar Nandeshwar
2021-12-13 02:44:55 -08:00
committed by Madan Koyyalamudi
szülő 98b25a2ee6
commit b9038e9d4e
16 fájl változott, egészen pontosan 202 új sor hozzáadva és 163 régi sor törölve

Fájl megtekintése

@@ -1062,17 +1062,6 @@ struct cdp_delay_tid_stats {
struct cdp_delay_rx_stats rx_delay;
};
/*
* cdp_peer_ext_stats: Peer extended stats
* @delay_stats: Per TID delay stats
*/
struct cdp_peer_ext_stats {
struct cdp_delay_tid_stats delay_stats[CDP_MAX_DATA_TIDS]
[CDP_MAX_TXRX_CTX];
/*Customer can add MSDU level Tx/Rx stats */
};
/* struct cdp_pkt_info - packet info
* @num: no of packets
* @bytes: total no of bytes

Fájl megtekintése

@@ -729,7 +729,7 @@ done:
/* Update the flow tag in SKB based on FSE metadata */
dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer,
dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
reo_ring_num, tid_stats);
if (qdf_unlikely(vdev->mesh_vdev)) {
@@ -744,7 +744,8 @@ done:
nbuf = next;
continue;
}
dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr,
txrx_peer);
}
if (qdf_likely(vdev->rx_decap_type ==
@@ -774,7 +775,9 @@ done:
DP_RX_LIST_APPEND(deliver_list_head,
deliver_list_tail,
nbuf);
DP_PEER_TO_STACK_INCC_PKT(peer, 1, QDF_NBUF_CB_RX_PKT_LEN(nbuf),
DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
QDF_NBUF_CB_RX_PKT_LEN(nbuf),
enh_flag);
if (qdf_unlikely(txrx_peer->in_twt))
DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1,

Fájl megtekintése

@@ -280,9 +280,9 @@ void dp_tx_process_htt_completion_be(struct dp_soc *soc,
txrx_peer = dp_txrx_peer_get_ref_by_id(soc, ts.peer_id,
&txrx_ref_handle,
DP_MOD_ID_HTT_COMP);
if (qdf_likely(peer))
if (qdf_likely(txrx_peer))
dp_tx_update_peer_basic_stats(
peer,
txrx_peer,
qdf_nbuf_len(tx_desc->nbuf),
tx_status,
pdev->enhanced_stats_en);

Fájl megtekintése

@@ -743,6 +743,12 @@ void DP_PRINT_STATS(const char *fmt, ...);
_handle->stats._field += _delta; \
}
#define DP_STATS_FLAT_INC(_handle, _field, _delta) \
{ \
if (likely(_handle)) \
_handle->_field += _delta; \
}
#define DP_STATS_INCC(_handle, _field, _delta, _cond) \
{ \
if (_cond && likely(_handle)) \
@@ -755,6 +761,12 @@ void DP_PRINT_STATS(const char *fmt, ...);
_handle->stats._field -= _delta; \
}
#define DP_STATS_FLAT_DEC(_handle, _field, _delta) \
{ \
if (likely(_handle)) \
_handle->_field -= _delta; \
}
#define DP_STATS_UPD(_handle, _field, _delta) \
{ \
if (likely(_handle)) \
@@ -767,6 +779,12 @@ void DP_PRINT_STATS(const char *fmt, ...);
DP_STATS_INC(_handle, _field.bytes, _bytes) \
}
#define DP_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) \
{ \
DP_STATS_FLAT_INC(_handle, _field.num, _count); \
DP_STATS_FLAT_INC(_handle, _field.bytes, _bytes) \
}
#define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \
{ \
DP_STATS_INCC(_handle, _field.num, _count, _cond); \
@@ -791,10 +809,13 @@ void DP_PRINT_STATS(const char *fmt, ...);
#else
#define DP_STATS_INC(_handle, _field, _delta)
#define DP_STATS_FLAT_INC(_handle, _field, _delta)
#define DP_STATS_INCC(_handle, _field, _delta, _cond)
#define DP_STATS_DEC(_handle, _field, _delta)
#define DP_STATS_FLAT_DEC(_handle, _field, _delta)
#define DP_STATS_UPD(_handle, _field, _delta)
#define DP_STATS_INC_PKT(_handle, _field, _count, _bytes)
#define DP_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes)
#define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond)
#define DP_STATS_AGGR(_handle_a, _handle_b, _field)
#define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field)
@@ -805,62 +826,62 @@ void DP_PRINT_STATS(const char *fmt, ...);
#define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \
{ \
if (!(_handle->hw_txrx_stats_en) || _cond) \
DP_STATS_INC_PKT(_handle, rx.to_stack, _count, _bytes); \
DP_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \
}
#define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \
{ \
if (!(_handle->hw_txrx_stats_en) || _cond) \
DP_STATS_DEC(_handle, rx.to_stack.num, _count); \
DP_STATS_FLAT_DEC(_handle, to_stack.num, _count); \
}
#define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \
{ \
if (!(_handle->hw_txrx_stats_en) || _cond) \
DP_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes); \
DP_STATS_FLAT_INC_PKT(_handle, multicast, _count, _bytes); \
}
#define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \
{ \
if (!(_handle->hw_txrx_stats_en) || _cond) \
DP_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes); \
DP_STATS_FLAT_INC_PKT(_handle, bcast, _count, _bytes); \
}
#elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
#define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \
{ \
if (!(_handle->hw_txrx_stats_en)) \
DP_STATS_INC_PKT(_handle, rx.to_stack, _count, _bytes); \
DP_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \
}
#define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \
{ \
if (!(_handle->hw_txrx_stats_en)) \
DP_STATS_DEC(_handle, rx.to_stack.num, _count); \
DP_STATS_FLAT_DEC(_handle, to_stack.num, _count); \
}
#define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \
{ \
if (!(_handle->hw_txrx_stats_en)) \
DP_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes); \
DP_STATS_FLAT_INC_PKT(_handle, multicast, _count, _bytes); \
}
#define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \
{ \
if (!(_handle->hw_txrx_stats_en)) \
DP_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes); \
DP_STATS_FLAT_INC_PKT(_handle, bcast, _count, _bytes); \
}
#else
#define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \
DP_STATS_INC_PKT(_handle, rx.to_stack, _count, _bytes);
DP_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes);
#define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \
DP_STATS_DEC(_handle, rx.to_stack.num, _count);
DP_STATS_FLAT_DEC(_handle, to_stack.num, _count);
#define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \
DP_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes);
DP_STATS_FLAT_INC_PKT(_handle, multicast, _count, _bytes);
#define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \
DP_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes);
DP_STATS_FLAT_INC_PKT(_handle, bcast, _count, _bytes);
#endif
#ifdef ENABLE_DP_HIST_STATS

Fájl megtekintése

@@ -6823,6 +6823,10 @@ static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer)
peer->txrx_peer = NULL;
dp_peer_defrag_rx_tids_deinit(txrx_peer);
/*
* Deallocate the extended stats contenxt
*/
dp_peer_delay_stats_ctx_dealloc(soc, txrx_peer);
dp_peer_rx_bufq_resources_deinit(txrx_peer);
qdf_mem_free(txrx_peer);
@@ -6844,8 +6848,20 @@ static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
/* initialize the peer_id */
txrx_peer->vdev = peer->vdev;
dp_wds_ext_peer_init(peer);
dp_wds_ext_peer_init(txrx_peer);
dp_peer_rx_bufq_resources_init(txrx_peer);
dp_peer_hw_txrx_stats_init(soc, txrx_peer);
/*
* Allocate peer extended stats context. Fall through in
* case of failure as its not an implicit requirement to have
* this object for regular statistics updates.
*/
if (dp_peer_delay_stats_ctx_alloc(soc, txrx_peer) !=
QDF_STATUS_SUCCESS)
dp_warn("peer ext_stats ctx alloc failed");
dp_set_peer_isolation(txrx_peer, false);
dp_peer_defrag_rx_tids_init(txrx_peer);
dp_txrx_peer_attach_add(soc, peer, txrx_peer);
@@ -8818,16 +8834,17 @@ void dp_print_napi_stats(struct dp_soc *soc)
#ifdef QCA_PEER_EXT_STATS
/**
* dp_txrx_host_peer_ext_stats_clr: Reinitialize the txrx peer ext stats
* dp_txrx_host_peer_delay_stats_clr: Reinitialize the txrx peer delay stats
*
*/
static inline void dp_txrx_host_peer_ext_stats_clr(struct dp_peer *peer)
static inline void dp_txrx_host_peer_delay_stats_clr(struct dp_peer *peer)
{
if (peer->pext_stats)
qdf_mem_zero(peer->pext_stats, sizeof(*peer->pext_stats));
if (peer->txrx_peer->delay_stats)
qdf_mem_zero(peer->txrx_peer->delay_stats,
sizeof(struct dp_peer_delay_stats));
}
#else
static inline void dp_txrx_host_peer_ext_stats_clr(struct dp_peer *peer)
static inline void dp_txrx_host_peer_delay_stats_clr(struct dp_peer *peer)
{
}
#endif
@@ -8855,7 +8872,7 @@ dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
DP_STATS_CLR(peer);
dp_txrx_host_peer_ext_stats_clr(peer);
dp_txrx_host_peer_delay_stats_clr(peer);
#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
@@ -9465,7 +9482,7 @@ static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
struct dp_peer *peer = NULL;
uint16_t peer_id, ring_id;
uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
struct cdp_peer_ext_stats *pext_stats = NULL;
struct dp_peer_delay_stats *delay_stats = NULL;
peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
if (peer_id > soc->max_peer_id)
@@ -9475,10 +9492,10 @@ static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
if (qdf_unlikely(!peer))
return;
if (qdf_likely(peer->pext_stats)) {
pext_stats = peer->pext_stats;
if (qdf_likely(peer->txrx_peer->delay_stats)) {
delay_stats = peer->txrx_peer->delay_stats;
ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
dp_rx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
dp_rx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
nbuf);
}
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);

Fájl megtekintése

@@ -4637,23 +4637,23 @@ dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
#ifdef QCA_PEER_EXT_STATS
/*
* dp_peer_ext_stats_ctx_alloc() - Allocate peer ext
* dp_peer_delay_stats_ctx_alloc() - Allocate peer delay
* stats content
* @soc: DP SoC context
* @peer: DP peer context
* @txrx_peer: DP txrx peer context
*
* Allocate the peer extended stats context
* Allocate the peer delay stats context
*
* Return: QDF_STATUS_SUCCESS if allocation is
* successful
*/
QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
struct dp_peer *peer)
QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
struct dp_txrx_peer *txrx_peer)
{
uint8_t tid, ctx_id;
if (!soc || !peer) {
dp_warn("Null soc%pK or peer%pK", soc, peer);
if (!soc || !txrx_peer) {
dp_warn("Null soc%pK or peer%pK", soc, txrx_peer);
return QDF_STATUS_E_INVAL;
}
@@ -4663,8 +4663,9 @@ QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
/*
* Allocate memory for peer extended stats.
*/
peer->pext_stats = qdf_mem_malloc(sizeof(struct cdp_peer_ext_stats));
if (!peer->pext_stats) {
txrx_peer->delay_stats =
qdf_mem_malloc(sizeof(struct dp_peer_delay_stats));
if (!txrx_peer->delay_stats) {
dp_err("Peer extended stats obj alloc failed!!");
return QDF_STATUS_E_NOMEM;
}
@@ -4672,9 +4673,9 @@ QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) {
struct cdp_delay_tx_stats *tx_delay =
&peer->pext_stats->delay_stats[tid][ctx_id].tx_delay;
&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].tx_delay;
struct cdp_delay_rx_stats *rx_delay =
&peer->pext_stats->delay_stats[tid][ctx_id].rx_delay;
&txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].rx_delay;
dp_hist_init(&tx_delay->tx_swq_delay,
CDP_HIST_TYPE_SW_ENQEUE_DELAY);
@@ -4689,16 +4690,17 @@ QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
}
/*
* dp_peer_ext_stats_ctx_dealloc() - Dealloc the peer context
* @peer: DP peer context
* dp_peer_delay_stats_ctx_dealloc() - Dealloc the peer delay stats context
* @txrx_peer: txrx DP peer context
*
* Free the peer extended stats context
* Free the peer delay stats context
*
* Return: Void
*/
void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc, struct dp_peer *peer)
void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
struct dp_txrx_peer *txrx_peer)
{
if (!peer) {
if (!txrx_peer) {
dp_warn("peer_ext dealloc failed due to NULL peer object");
return;
}
@@ -4706,11 +4708,11 @@ void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc, struct dp_peer *peer)
if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
return;
if (!peer->pext_stats)
if (!txrx_peer->delay_stats)
return;
qdf_mem_free(peer->pext_stats);
peer->pext_stats = NULL;
qdf_mem_free(txrx_peer->delay_stats);
txrx_peer->delay_stats = NULL;
}
#endif

Fájl megtekintése

@@ -886,19 +886,21 @@ void dp_rx_tid_delete_cb(struct dp_soc *soc,
union hal_reo_status *reo_status);
#ifdef QCA_PEER_EXT_STATS
QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
struct dp_peer *peer);
void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
struct dp_peer *peer);
QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
struct dp_txrx_peer *txrx_peer);
void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
struct dp_txrx_peer *txrx_peer);
#else
static inline QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
struct dp_peer *peer)
static inline
QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
struct dp_txrx_peer *txrx_peer)
{
return QDF_STATUS_SUCCESS;
}
static inline void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
struct dp_peer *peer)
static inline
void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
struct dp_txrx_peer *txrx_peer)
{
}
#endif

Fájl megtekintése

@@ -774,7 +774,7 @@ dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf));
DP_STATS_INC_PKT(txrx_peer, rx.raw, 1, qdf_nbuf_len(nbuf));
/*
* reset the chfrag_start and chfrag_end bits in nbuf cb
* as this is a non-amsdu pkt and RAW mode simulation expects
@@ -1077,7 +1077,7 @@ QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
#else
void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, struct dp_peer *peer)
uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer)
{
}
@@ -1917,7 +1917,7 @@ dp_rx_validate_rx_callbacks(struct dp_soc *soc,
} else {
num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev,
nbuf_head);
DP_PEER_TO_STACK_DECC(peer, num_nbuf,
DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf,
vdev->pdev->enhanced_stats_en);
}
return QDF_STATUS_E_FAILURE;
@@ -1994,7 +1994,7 @@ QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
* @soc: core txrx main context
* @nbuf: pointer to the first msdu of an amsdu.
* @rx_tlv_hdr: pointer to the start of RX TLV headers.
* @peer: pointer to the peer object.
* @txrx_peer: pointer to the txrx peer object.
* @ring_id: reo dest ring number on which pkt is reaped.
* @tid_stats: per tid rx stats.
*
@@ -2002,26 +2002,25 @@ QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
* Return: void
*/
void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, struct dp_peer *peer,
uint8_t *rx_tlv_hdr,
struct dp_txrx_peer *txrx_peer,
uint8_t ring_id,
struct cdp_tid_rx_stats *tid_stats)
{
bool is_ampdu, is_not_amsdu;
uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
struct dp_vdev *vdev = peer->vdev;
struct dp_vdev *vdev = txrx_peer->vdev;
bool enh_flag;
qdf_ether_header_t *eh;
uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, peer);
dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer);
is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
qdf_nbuf_is_rx_chfrag_end(nbuf);
DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len);
DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu);
DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu);
DP_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1, msdu_len);
DP_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1, is_not_amsdu);
DP_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, !is_not_amsdu);
DP_STATS_INCC(peer, rx.rx_retries, 1, qdf_nbuf_is_rx_retry_flag(nbuf));
tid_stats->msdu_cnt++;
if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
@@ -2051,7 +2050,6 @@ void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr);
DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr);
mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
tid = qdf_nbuf_get_tid_val(nbuf);
@@ -2060,7 +2058,6 @@ void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
rx_tlv_hdr);
nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
DP_STATS_INCC(peer, rx.rx_mpdu_cnt[mcs], 1,
((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
DP_STATS_INCC(peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1,
@@ -2074,7 +2071,6 @@ void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
pkt_type == DOT11_AC ||
pkt_type == DOT11_AX))
DP_STATS_INC(peer, rx.nss[nss - 1], 1);
DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
DP_STATS_INCC(peer, rx.err.mic_err, 1,
hal_rx_tlv_mic_err_get(soc->hal_soc, rx_tlv_hdr));

Fájl megtekintése

@@ -1491,7 +1491,7 @@ dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
uint32_t mac_id, uint32_t quota);
void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, struct dp_peer *peer);
uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer);
QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr);
@@ -1899,7 +1899,7 @@ void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
#endif
void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, struct dp_peer *peer,
uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer,
uint8_t ring_id,
struct cdp_tid_rx_stats *tid_stats);

Fájl megtekintése

@@ -1065,7 +1065,8 @@ dp_2k_jump_handle(struct dp_soc *soc,
IEEE80211_REASON_QOS_SETUP_REQUIRED;
qdf_spin_unlock_bh(&rx_tid->tid_lock);
if (soc->cdp_soc.ol_ops->send_delba) {
DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent, 1);
DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent,
1);
soc->cdp_soc.ol_ops->send_delba(
peer->vdev->pdev->soc->ctrl_psoc,
peer->vdev->vdev_id,
@@ -1403,8 +1404,7 @@ dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
if ((!soc->mec_fw_offload) &&
dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) {
/* this is a looped back MCBC pkt, drop it */
DP_STATS_INC_PKT(peer, rx.mec_drop, 1,
qdf_nbuf_len(nbuf));
DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
goto drop_nbuf;
}
@@ -1488,7 +1488,7 @@ dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
} else {
enh_flag = vdev->pdev->enhanced_stats_en;
qdf_nbuf_set_next(nbuf, NULL);
DP_PEER_TO_STACK_INCC_PKT(peer, 1, qdf_nbuf_len(nbuf),
DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
enh_flag);
/*
* Update the protocol tag in SKB based on
@@ -1506,7 +1506,7 @@ dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
soc->hal_soc, rx_tlv_hdr) &&
(vdev->rx_decap_type ==
htt_cmn_pkt_type_ethernet))) {
DP_PEER_MC_INCC_PKT(peer, 1, qdf_nbuf_len(nbuf),
DP_PEER_MC_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
enh_flag);
if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
@@ -1659,7 +1659,7 @@ process_mesh:
dp_rx_nbuf_free(nbuf);
return;
}
dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, txrx_peer);
}
process_rx:
if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
@@ -1684,7 +1684,7 @@ process_rx:
EXCEPTION_DEST_RING_ID, true, true);
/* Update the flow tag in SKB based on FSE metadata */
dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
DP_STATS_INC(peer, rx.to_stack.num, 1);
DP_STATS_FLAT_INC(txrx_peer, to_stack.num, 1);
qdf_nbuf_set_exc_frame(nbuf, 1);
dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
}
@@ -1906,7 +1906,8 @@ dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
/* Update the flow tag in SKB based on FSE metadata */
dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
true);
DP_PEER_TO_STACK_INCC_PKT(peer, 1, qdf_nbuf_len(nbuf),
DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
qdf_nbuf_len(nbuf),
vdev->pdev->enhanced_stats_en);
qdf_nbuf_set_exc_frame(nbuf, 1);
qdf_nbuf_set_next(nbuf, NULL);
@@ -2809,8 +2810,8 @@ done:
case HAL_REO_ERR_PN_CHECK_FAILED:
case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
if (peer)
DP_STATS_INC(peer,
if (txrx_peer)
DP_STATS_INC(txrx_peer,
rx.err.pn_err, 1);
dp_rx_nbuf_free(nbuf);
break;

Fájl megtekintése

@@ -5769,7 +5769,7 @@ static void dp_accumulate_delay_tid_stats(struct dp_soc *soc,
void dp_peer_print_tx_delay_stats(struct dp_pdev *pdev,
struct dp_peer *peer)
{
struct cdp_peer_ext_stats *pext_stats;
struct dp_peer_delay_stats *delay_stats;
struct dp_soc *soc = NULL;
struct cdp_hist_stats hist_stats;
uint8_t tid;
@@ -5781,22 +5781,22 @@ void dp_peer_print_tx_delay_stats(struct dp_pdev *pdev,
if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
return;
pext_stats = peer->pext_stats;
if (!pext_stats)
delay_stats = peer->txrx_peer->delay_stats;
if (!delay_stats)
return;
for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
DP_PRINT_STATS("----TID: %d----", tid);
DP_PRINT_STATS("Software Enqueue Delay:");
qdf_mem_zero(&hist_stats, sizeof(*(&hist_stats)));
dp_accumulate_delay_tid_stats(soc, pext_stats->delay_stats,
dp_accumulate_delay_tid_stats(soc, delay_stats->delay_tid_stats,
&hist_stats, tid,
CDP_HIST_TYPE_SW_ENQEUE_DELAY);
dp_print_hist_stats(&hist_stats, CDP_HIST_TYPE_SW_ENQEUE_DELAY);
qdf_mem_zero(&hist_stats, sizeof(*(&hist_stats)));
DP_PRINT_STATS("Hardware Transmission Delay:");
dp_accumulate_delay_tid_stats(soc, pext_stats->delay_stats,
dp_accumulate_delay_tid_stats(soc, delay_stats->delay_tid_stats,
&hist_stats, tid,
CDP_HIST_TYPE_HW_COMP_DELAY);
dp_print_hist_stats(&hist_stats, CDP_HIST_TYPE_HW_COMP_DELAY);
@@ -5813,7 +5813,7 @@ void dp_peer_print_tx_delay_stats(struct dp_pdev *pdev,
void dp_peer_print_rx_delay_stats(struct dp_pdev *pdev,
struct dp_peer *peer)
{
struct cdp_peer_ext_stats *pext_stats;
struct dp_peer_delay_stats *delay_stats;
struct dp_soc *soc = NULL;
struct cdp_hist_stats hist_stats;
uint8_t tid;
@@ -5825,15 +5825,15 @@ void dp_peer_print_rx_delay_stats(struct dp_pdev *pdev,
if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
return;
pext_stats = peer->pext_stats;
if (!pext_stats)
delay_stats = peer->txrx_peer->delay_stats;
if (!delay_stats)
return;
for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
DP_PRINT_STATS("----TID: %d----", tid);
DP_PRINT_STATS("Rx Reap2stack Deliver Delay:");
qdf_mem_zero(&hist_stats, sizeof(*(&hist_stats)));
dp_accumulate_delay_tid_stats(soc, pext_stats->delay_stats,
dp_accumulate_delay_tid_stats(soc, delay_stats->delay_tid_stats,
&hist_stats, tid,
CDP_HIST_TYPE_REAP_STACK);
dp_print_hist_stats(&hist_stats, CDP_HIST_TYPE_REAP_STACK);
@@ -5963,9 +5963,9 @@ void dp_print_peer_stats(struct dp_peer *peer)
DP_PRINT_STATS("Node Tx Stats:\n");
DP_PRINT_STATS("Total Packet Completions = %d",
peer->stats.tx.comp_pkt.num);
peer->txrx_peer->comp_pkt.num);
DP_PRINT_STATS("Total Bytes Completions = %llu",
peer->stats.tx.comp_pkt.bytes);
peer->txrx_peer->comp_pkt.bytes);
DP_PRINT_STATS("Success Packets = %d",
peer->stats.tx.tx_success.num);
DP_PRINT_STATS("Success Bytes = %llu",
@@ -6088,9 +6088,9 @@ void dp_print_peer_stats(struct dp_peer *peer)
DP_PRINT_STATS("Node Rx Stats:");
DP_PRINT_STATS("Packets Sent To Stack = %d",
peer->stats.rx.to_stack.num);
peer->txrx_peer->to_stack.num);
DP_PRINT_STATS("Bytes Sent To Stack = %llu",
peer->stats.rx.to_stack.bytes);
peer->txrx_peer->to_stack.bytes);
for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
DP_PRINT_STATS("Ring Id = %d", i);
DP_PRINT_STATS(" Packets Received = %d",

Fájl megtekintése

@@ -3690,8 +3690,8 @@ static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
}
/*
* dp_tx_update_peer_ext_stats() - Update the peer extended stats
* @peer: DP peer context
* dp_tx_update_peer_delay_stats() - Update the peer delay stats
* @txrx_peer: DP peer context
* @tx_desc: Tx software descriptor
* @tid: Transmission ID
* @ring_id: Rx CPU context ID/CPU_ID
@@ -3701,21 +3701,21 @@ static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
*
* Return: void
*/
static void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
struct dp_tx_desc_s *tx_desc,
uint8_t tid, uint8_t ring_id)
static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
struct dp_tx_desc_s *tx_desc,
uint8_t tid, uint8_t ring_id)
{
struct dp_pdev *pdev = peer->vdev->pdev;
struct dp_pdev *pdev = txrx_peer->vdev->pdev;
struct dp_soc *soc = NULL;
struct cdp_peer_ext_stats *pext_stats = NULL;
struct dp_peer_delay_stats *delay_stats = NULL;
soc = pdev->soc;
if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
return;
pext_stats = peer->pext_stats;
delay_stats = txrx_peer->delay_stats;
qdf_assert(pext_stats);
qdf_assert(delay_stats);
qdf_assert(ring < CDP_MAX_TXRX_CTX);
/*
@@ -3724,13 +3724,13 @@ static void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
tid = CDP_MAX_DATA_TIDS - 1;
dp_tx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
tx_desc);
}
#else
static inline void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
struct dp_tx_desc_s *tx_desc,
uint8_t tid, uint8_t ring_id)
static inline void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
struct dp_tx_desc_s *tx_desc,
uint8_t tid, uint8_t ring_id)
{
}
#endif
@@ -3817,9 +3817,9 @@ dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer)
static inline void
dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
struct hal_tx_completion_status *ts,
struct dp_peer *peer, uint8_t ring_id)
struct dp_txrx_peer *txrx_peer, uint8_t ring_id)
{
struct dp_pdev *pdev = peer->vdev->pdev;
struct dp_pdev *pdev = txrx_peer->vdev->pdev;
struct dp_soc *soc = NULL;
uint8_t mcs, pkt_type;
uint8_t tid = ts->tid;
@@ -3844,10 +3844,10 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
}
length = qdf_nbuf_len(tx_desc->nbuf);
DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
if (qdf_unlikely(pdev->delay_stats_flag))
dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
DP_STATS_INCC(peer, tx.dropped.age_out, 1,
(ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
@@ -3868,7 +3868,6 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
(ts->status == HAL_TX_TQM_RR_FW_REASON3));
/*
* tx_failed is ideally supposed to be updated from HTT ppdu completion
* stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
@@ -3876,14 +3875,14 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
* data path. Please note that if tx_failed is fixed to be from ppdu,
* then this has to be removed
*/
peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
txrx_peer->tx_failed =
peer->stats.tx.dropped.fw_rem.num +
peer->stats.tx.dropped.fw_rem_notx +
peer->stats.tx.dropped.fw_rem_tx +
peer->stats.tx.dropped.age_out +
peer->stats.tx.dropped.fw_reason1 +
peer->stats.tx.dropped.fw_reason2 +
peer->stats.tx.dropped.fw_reason3;
if (ts->status < CDP_MAX_TX_TQM_STATUS) {
tid_stats->tqm_status_cnt[ts->status]++;
}
@@ -3894,6 +3893,7 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
dp_update_no_ack_stats(tx_desc->nbuf, peer);
return;
}
DP_STATS_INCC(peer, tx.retry_count, 1, ts->transmit_cnt > 1);
DP_STATS_INCC(peer, tx.multiple_retry_count, 1, ts->transmit_cnt > 2);
@@ -3902,14 +3902,12 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
/*
* Following Rate Statistics are updated from HTT PPDU events from FW.
* Return from here if HTT PPDU events are enabled.
*/
if (!(soc->process_tx_status))
return;
DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
@@ -4050,7 +4048,7 @@ static inline void dp_tx_notify_completion(struct dp_soc *soc,
*/
#ifdef FEATURE_PERPKT_INFO
static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
struct dp_peer *peer,
struct dp_txrx_peer *txrx_peer,
uint8_t tid,
uint64_t txdesc_ts,
uint32_t ppdu_id)
@@ -4467,12 +4465,12 @@ void dp_tx_comp_process_tx_status(struct dp_soc *soc,
}
}
dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
dp_tx_update_peer_ext_stats(peer, tx_desc, ts->tid, ring_id);
dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id);
dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts->tid, ring_id);
#ifdef QCA_SUPPORT_RDK_STATS
if (soc->rdkstats_enabled)
dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
tx_desc->timestamp,
ts->ppdu_id);
#endif
@@ -4492,33 +4490,36 @@ out:
*
* Return: none
*/
void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length,
uint8_t tx_status, bool update)
void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
uint32_t length, uint8_t tx_status,
bool update)
{
if ((!peer->hw_txrx_stats_en) || update) {
DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
DP_STATS_INCC(peer, tx.tx_failed, 1,
tx_status != HAL_TX_TQM_RR_FRAME_ACKED);
if ((!txrx_peer->hw_txrx_stats_en) || update) {
DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
DP_STATS_FLAT_INC(txrx_peer, tx_failed, 1,
tx_status != HAL_TX_TQM_RR_FRAME_ACKED);
}
}
#elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length,
uint8_t tx_status, bool update)
void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
uint32_t length, uint8_t tx_status,
bool update)
{
if (!peer->hw_txrx_stats_en) {
DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
DP_STATS_INCC(peer, tx.tx_failed, 1,
tx_status != HAL_TX_TQM_RR_FRAME_ACKED);
DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
DP_STATS_FLAT_INC(txrx_peer, tx_failed, 1,
tx_status != HAL_TX_TQM_RR_FRAME_ACKED);
}
}
#else
void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length,
uint8_t tx_status, bool update)
void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
uint32_t length, uint8_t tx_status,
bool update)
{
DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
DP_STATS_INCC(peer, tx.tx_failed, 1,
tx_status != HAL_TX_TQM_RR_FRAME_ACKED);
DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
DP_STATS_FLAT_INC(txrx_peer, tx_failed, 1,
tx_status != HAL_TX_TQM_RR_FRAME_ACKED);
}
#endif
@@ -4594,8 +4595,8 @@ dp_tx_comp_process_desc_list(struct dp_soc *soc,
if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
struct dp_pdev *pdev = desc->pdev;
if (qdf_likely(peer))
dp_tx_update_peer_basic_stats(peer,
if (qdf_likely(txrx_peer))
dp_tx_update_peer_basic_stats(txrx_peer,
desc->length,
desc->tx_status,
false);

Fájl megtekintése

@@ -262,8 +262,9 @@ void dp_tx_inspect_handler(struct dp_soc *soc,
struct dp_vdev *vdev,
struct dp_tx_desc_s *tx_desc,
uint8_t *status);
void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length,
uint8_t tx_status, bool update);
void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
uint32_t length, uint8_t tx_status,
bool update);
#ifndef QCA_HOST_MODE_WIFI_DISABLED
/**

Fájl megtekintése

@@ -766,6 +766,11 @@ struct dp_reo_cmd_info {
TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem;
};
struct dp_peer_delay_stats {
struct cdp_delay_tid_stats delay_tid_stats[CDP_MAX_DATA_TIDS]
[CDP_MAX_TXRX_CTX];
};
/* Rx TID defrag*/
struct dp_rx_tid_defrag {
/* TID */
@@ -3367,6 +3372,9 @@ struct dp_txrx_peer {
mld_peer:1; /* MLD peer*/
uint32_t tx_failed;
struct cdp_pkt_info comp_pkt;
struct cdp_pkt_info to_stack;
struct dp_peer_delay_stats *delay_stats;
struct {
enum cdp_sec_type sec_type;
@@ -3450,9 +3458,6 @@ struct dp_peer {
/* Peer Stats */
struct cdp_peer_stats stats;
/* Peer extended stats */
struct cdp_peer_ext_stats *pext_stats;
TAILQ_HEAD(, dp_ast_entry) ast_entry_list;
/* TBD */

Fájl megtekintése

@@ -838,7 +838,8 @@ done:
nbuf = next;
continue;
}
dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr,
txrx_peer);
}
if (qdf_likely(vdev->rx_decap_type ==
@@ -903,8 +904,8 @@ done:
DP_RX_LIST_APPEND(deliver_list_head,
deliver_list_tail,
nbuf);
DP_STATS_INC_PKT(peer, rx.to_stack, 1,
QDF_NBUF_CB_RX_PKT_LEN(nbuf));
DP_STATS_FLAT_INC_PKT(txrx_peer, to_stack, 1,
QDF_NBUF_CB_RX_PKT_LEN(nbuf));
if (qdf_unlikely(txrx_peer->in_twt))
DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1,
QDF_NBUF_CB_RX_PKT_LEN(nbuf));

Fájl megtekintése

@@ -192,11 +192,11 @@ void dp_tx_process_htt_completion_li(struct dp_soc *soc,
txrx_peer = dp_txrx_peer_get_ref_by_id(soc, ts.peer_id,
&txrx_ref_handle,
DP_MOD_ID_HTT_COMP);
if (qdf_likely(peer)) {
DP_STATS_INC_PKT(peer, tx.comp_pkt, 1,
qdf_nbuf_len(tx_desc->nbuf));
DP_STATS_INCC(peer, tx.tx_failed, 1,
tx_status != HTT_TX_FW2WBM_TX_STATUS_OK);
if (qdf_likely(txrx_peer)) {
DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1,
qdf_nbuf_len(tx_desc->nbuf));
DP_STATS_FLAT_INC(txrx_peer, tx_failed, 1,
tx_status != HTT_TX_FW2WBM_TX_STATUS_OK);
}
dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer,