qcacmn: Update stats in dp_txrx_peer in per packet path

Update stats in dp_txrx_peer in per packet Tx completion and
Rx path.

Change-Id: I807cb5ca9fe2aeeabdd4cb95d6e30cb9781560f4
CRs-Fixed: 3092123
This commit is contained in:
Harsh Kumar Bijlani
2021-12-19 21:59:25 +05:30
committed by Madan Koyyalamudi
parent 27e9e64532
commit 93cd9e0af8
10 changed files with 476 additions and 272 deletions

View File

@@ -646,7 +646,9 @@ done:
if (qdf_nbuf_is_raw_frame(nbuf)) { if (qdf_nbuf_is_raw_frame(nbuf)) {
DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len); DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
rx.raw, 1,
msdu_len);
} else { } else {
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
DP_STATS_INC(soc, rx.err.scatter_msdu, 1); DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
@@ -671,7 +673,9 @@ done:
if (qdf_unlikely(vdev->multipass_en)) { if (qdf_unlikely(vdev->multipass_en)) {
if (dp_rx_multipass_process(txrx_peer, nbuf, if (dp_rx_multipass_process(txrx_peer, nbuf,
tid) == false) { tid) == false) {
DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1); DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.multipass_rx_pkt_drop,
1);
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
nbuf = next; nbuf = next;
continue; continue;
@@ -680,7 +684,8 @@ done:
if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
dp_rx_err("%pK: Policy Check Drop pkt", soc); dp_rx_err("%pK: Policy Check Drop pkt", soc);
DP_STATS_INC(peer, rx.policy_check_drop, 1); DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.policy_check_drop, 1);
tid_stats->fail_cnt[POLICY_CHECK_DROP]++; tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
/* Drop & free packet */ /* Drop & free packet */
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
@@ -695,7 +700,8 @@ done:
rx_tlv_hdr) == rx_tlv_hdr) ==
false))) { false))) {
tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.nawds_mcast_drop, 1);
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
nbuf = next; nbuf = next;
continue; continue;
@@ -711,8 +717,9 @@ done:
qdf_nbuf_is_ipv4_wapi_pkt(nbuf); qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
if (!is_eapol) { if (!is_eapol) {
DP_STATS_INC(peer, DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.peer_unauth_rx_pkt_drop, 1); rx.peer_unauth_rx_pkt_drop,
1);
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
nbuf = next; nbuf = next;
continue; continue;
@@ -780,8 +787,9 @@ done:
QDF_NBUF_CB_RX_PKT_LEN(nbuf), QDF_NBUF_CB_RX_PKT_LEN(nbuf),
enh_flag); enh_flag);
if (qdf_unlikely(txrx_peer->in_twt)) if (qdf_unlikely(txrx_peer->in_twt))
DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1, DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
QDF_NBUF_CB_RX_PKT_LEN(nbuf)); rx.to_stack_twt, 1,
QDF_NBUF_CB_RX_PKT_LEN(nbuf));
tid_stats->delivered_to_stack++; tid_stats->delivered_to_stack++;
nbuf = next; nbuf = next;

View File

@@ -754,7 +754,7 @@ void DP_PRINT_STATS(const char *fmt, ...);
_handle->stats._field += _delta; \ _handle->stats._field += _delta; \
} }
#define DP_STATS_FLAT_INC(_handle, _field, _delta) \ #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta) \
{ \ { \
if (likely(_handle)) \ if (likely(_handle)) \
_handle->_field += _delta; \ _handle->_field += _delta; \
@@ -772,7 +772,7 @@ void DP_PRINT_STATS(const char *fmt, ...);
_handle->stats._field -= _delta; \ _handle->stats._field -= _delta; \
} }
#define DP_STATS_FLAT_DEC(_handle, _field, _delta) \ #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta) \
{ \ { \
if (likely(_handle)) \ if (likely(_handle)) \
_handle->_field -= _delta; \ _handle->_field -= _delta; \
@@ -790,10 +790,10 @@ void DP_PRINT_STATS(const char *fmt, ...);
DP_STATS_INC(_handle, _field.bytes, _bytes) \ DP_STATS_INC(_handle, _field.bytes, _bytes) \
} }
#define DP_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) \ #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) \
{ \ { \
DP_STATS_FLAT_INC(_handle, _field.num, _count); \ DP_PEER_STATS_FLAT_INC(_handle, _field.num, _count); \
DP_STATS_FLAT_INC(_handle, _field.bytes, _bytes) \ DP_PEER_STATS_FLAT_INC(_handle, _field.bytes, _bytes) \
} }
#define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \ #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \
@@ -820,79 +820,118 @@ void DP_PRINT_STATS(const char *fmt, ...);
#else #else
#define DP_STATS_INC(_handle, _field, _delta) #define DP_STATS_INC(_handle, _field, _delta)
#define DP_STATS_FLAT_INC(_handle, _field, _delta) #define DP_PEER_STATS_FLAT_INC(_handle, _field, _delta)
#define DP_STATS_INCC(_handle, _field, _delta, _cond) #define DP_STATS_INCC(_handle, _field, _delta, _cond)
#define DP_STATS_DEC(_handle, _field, _delta) #define DP_STATS_DEC(_handle, _field, _delta)
#define DP_STATS_FLAT_DEC(_handle, _field, _delta) #define DP_PEER_STATS_FLAT_DEC(_handle, _field, _delta)
#define DP_STATS_UPD(_handle, _field, _delta) #define DP_STATS_UPD(_handle, _field, _delta)
#define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) #define DP_STATS_INC_PKT(_handle, _field, _count, _bytes)
#define DP_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes) #define DP_PEER_STATS_FLAT_INC_PKT(_handle, _field, _count, _bytes)
#define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) #define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond)
#define DP_STATS_AGGR(_handle_a, _handle_b, _field) #define DP_STATS_AGGR(_handle_a, _handle_b, _field)
#define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) #define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field)
#endif #endif
#define DP_PEER_PER_PKT_STATS_INC(_handle, _field, _delta) \
{ \
DP_STATS_INC(_handle, per_pkt_stats._field, _delta); \
}
#define DP_PEER_PER_PKT_STATS_INCC(_handle, _field, _delta, _cond) \
{ \
DP_STATS_INCC(_handle, per_pkt_stats._field, _delta, _cond); \
}
#define DP_PEER_PER_PKT_STATS_INC_PKT(_handle, _field, _count, _bytes) \
{ \
DP_PEER_PER_PKT_STATS_INC(_handle, _field.num, _count); \
DP_PEER_PER_PKT_STATS_INC(_handle, _field.bytes, _bytes) \
}
#define DP_PEER_PER_PKT_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \
{ \
DP_PEER_PER_PKT_STATS_INCC(_handle, _field.num, _count, _cond); \
DP_PEER_PER_PKT_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \
}
#ifndef QCA_ENHANCED_STATS_SUPPORT
#define DP_PEER_EXTD_STATS_INC(_handle, _field, _delta) \
{ \
DP_STATS_INC(_handle, extd_stats._field, _delta); \
}
#define DP_PEER_EXTD_STATS_INCC(_handle, _field, _delta, _cond) \
{ \
DP_STATS_INCC(_handle, extd_stats._field, _delta, _cond); \
}
#define DP_PEER_EXTD_STATS_UPD(_handle, _field, _delta) \
{ \
DP_STATS_UPD(_handle, extd_stats._field, _delta); \
}
#endif
#if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \ #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \
defined(QCA_ENHANCED_STATS_SUPPORT) defined(QCA_ENHANCED_STATS_SUPPORT)
#define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \
{ \ { \
if (!(_handle->hw_txrx_stats_en) || _cond) \ if (!(_handle->hw_txrx_stats_en) || _cond) \
DP_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \ DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \
} }
#define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \
{ \ { \
if (!(_handle->hw_txrx_stats_en) || _cond) \ if (!(_handle->hw_txrx_stats_en) || _cond) \
DP_STATS_FLAT_DEC(_handle, to_stack.num, _count); \ DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \
} }
#define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \ #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \
{ \ { \
if (!(_handle->hw_txrx_stats_en) || _cond) \ if (!(_handle->hw_txrx_stats_en) || _cond) \
DP_STATS_FLAT_INC_PKT(_handle, multicast, _count, _bytes); \ DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes); \
} }
#define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \ #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \
{ \ { \
if (!(_handle->hw_txrx_stats_en) || _cond) \ if (!(_handle->hw_txrx_stats_en) || _cond) \
DP_STATS_FLAT_INC_PKT(_handle, bcast, _count, _bytes); \ DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes); \
} }
#elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
#define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \
{ \ { \
if (!(_handle->hw_txrx_stats_en)) \ if (!(_handle->hw_txrx_stats_en)) \
DP_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \ DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); \
} }
#define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \
{ \ { \
if (!(_handle->hw_txrx_stats_en)) \ if (!(_handle->hw_txrx_stats_en)) \
DP_STATS_FLAT_DEC(_handle, to_stack.num, _count); \ DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count); \
} }
#define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \ #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \
{ \ { \
if (!(_handle->hw_txrx_stats_en)) \ if (!(_handle->hw_txrx_stats_en)) \
DP_STATS_FLAT_INC_PKT(_handle, multicast, _count, _bytes); \ DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes); \
} }
#define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \ #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \
{ \ { \
if (!(_handle->hw_txrx_stats_en)) \ if (!(_handle->hw_txrx_stats_en)) \
DP_STATS_FLAT_INC_PKT(_handle, bcast, _count, _bytes); \ DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes); \
} }
#else #else
#define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \ #define DP_PEER_TO_STACK_INCC_PKT(_handle, _count, _bytes, _cond) \
DP_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes); DP_PEER_STATS_FLAT_INC_PKT(_handle, to_stack, _count, _bytes);
#define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \ #define DP_PEER_TO_STACK_DECC(_handle, _count, _cond) \
DP_STATS_FLAT_DEC(_handle, to_stack.num, _count); DP_PEER_STATS_FLAT_DEC(_handle, to_stack.num, _count);
#define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \ #define DP_PEER_MC_INCC_PKT(_handle, _count, _bytes, _cond) \
DP_STATS_FLAT_INC_PKT(_handle, multicast, _count, _bytes); DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.multicast, _count, _bytes);
#define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \ #define DP_PEER_BC_INCC_PKT(_handle, _count, _bytes, _cond) \
DP_STATS_FLAT_INC_PKT(_handle, bcast, _count, _bytes); DP_PEER_PER_PKT_STATS_INC_PKT(_handle, rx.bcast, _count, _bytes);
#endif #endif
#ifdef ENABLE_DP_HIST_STATS #ifdef ENABLE_DP_HIST_STATS
@@ -2072,7 +2111,7 @@ dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context,
* dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters * dp_vdev_peer_stats_update_protocol_cnt() - update per-peer protocol counters
* @vdev: VDEV DP object * @vdev: VDEV DP object
* @nbuf: data packet * @nbuf: data packet
* @peer: Peer DP object * @peer: DP TXRX Peer object
* @is_egress: whether egress or ingress * @is_egress: whether egress or ingress
* @is_rx: whether rx or tx * @is_rx: whether rx or tx
* *
@@ -2081,7 +2120,7 @@ dp_hif_update_pipe_callback(struct dp_soc *dp_soc, void *cb_context,
*/ */
void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev, void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev,
qdf_nbuf_t nbuf, qdf_nbuf_t nbuf,
struct dp_peer *peer, struct dp_txrx_peer *txrx_peer,
bool is_egress, bool is_egress,
bool is_rx); bool is_rx);
@@ -2107,7 +2146,7 @@ void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
qdf_nbuf_t nbuf); qdf_nbuf_t nbuf);
#else #else
#define dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, peer, \ #define dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, txrx_peer, \
is_egress, is_rx) is_egress, is_rx)
static inline static inline

View File

@@ -2748,16 +2748,23 @@ static qdf_nbuf_t dp_ipa_intrabss_send(struct dp_pdev *pdev,
if (qdf_unlikely(!vdev_peer)) if (qdf_unlikely(!vdev_peer))
return nbuf; return nbuf;
qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb)); if (qdf_unlikely(!vdev_peer->txrx_peer)) {
len = qdf_nbuf_len(nbuf);
if (dp_tx_send((struct cdp_soc_t *)pdev->soc, vdev->vdev_id, nbuf)) {
DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.fail, 1, len);
dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA); dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
return nbuf; return nbuf;
} }
DP_STATS_INC_PKT(vdev_peer, rx.intra_bss.pkts, 1, len); qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
len = qdf_nbuf_len(nbuf);
if (dp_tx_send((struct cdp_soc_t *)pdev->soc, vdev->vdev_id, nbuf)) {
DP_PEER_PER_PKT_STATS_INC_PKT(vdev_peer->txrx_peer,
rx.intra_bss.fail, 1, len);
dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
return nbuf;
}
DP_PEER_PER_PKT_STATS_INC_PKT(vdev_peer->txrx_peer,
rx.intra_bss.pkts, 1, len);
dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA); dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
return NULL; return NULL;
} }

View File

@@ -774,7 +774,8 @@ dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf); DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
DP_STATS_INC_PKT(txrx_peer, rx.raw, 1, qdf_nbuf_len(nbuf)); DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.raw, 1,
qdf_nbuf_len(nbuf));
/* /*
* reset the chfrag_start and chfrag_end bits in nbuf cb * reset the chfrag_start and chfrag_end bits in nbuf cb
* as this is a non-amsdu pkt and RAW mode simulation expects * as this is a non-amsdu pkt and RAW mode simulation expects
@@ -843,11 +844,13 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
CB_FTYPE_INTRABSS_FWD); CB_FTYPE_INTRABSS_FWD);
if (dp_tx_send((struct cdp_soc_t *)soc, if (dp_tx_send((struct cdp_soc_t *)soc,
ta_peer->vdev->vdev_id, nbuf_copy)) { ta_peer->vdev->vdev_id, nbuf_copy)) {
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len); DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
len);
tid_stats->fail_cnt[INTRABSS_DROP]++; tid_stats->fail_cnt[INTRABSS_DROP]++;
dp_rx_nbuf_free(nbuf_copy); dp_rx_nbuf_free(nbuf_copy);
} else { } else {
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len); DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
len);
tid_stats->intrabss_cnt++; tid_stats->intrabss_cnt++;
} }
return false; return false;
@@ -883,8 +886,9 @@ bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
nbuf = qdf_nbuf_unshare(nbuf); nbuf = qdf_nbuf_unshare(nbuf);
if (!nbuf) { if (!nbuf) {
DP_STATS_INC_PKT(ta_peer, DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer,
rx.intra_bss.fail, 1, len); rx.intra_bss.fail,
1, len);
/* return true even though the pkt is /* return true even though the pkt is
* not forwarded. Basically skb_unshare * not forwarded. Basically skb_unshare
* failed and we want to continue with * failed and we want to continue with
@@ -897,11 +901,11 @@ bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
if (!dp_tx_send((struct cdp_soc_t *)soc, if (!dp_tx_send((struct cdp_soc_t *)soc,
tx_vdev_id, nbuf)) { tx_vdev_id, nbuf)) {
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
len); len);
} else { } else {
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
len); len);
tid_stats->fail_cnt[INTRABSS_DROP]++; tid_stats->fail_cnt[INTRABSS_DROP]++;
return false; return false;
} }
@@ -940,6 +944,9 @@ void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
uint32_t center_chan_freq; uint32_t center_chan_freq;
struct dp_soc *soc = vdev->pdev->soc; struct dp_soc *soc = vdev->pdev->soc;
struct dp_peer *peer; struct dp_peer *peer;
struct dp_peer *primary_link_peer;
struct dp_soc *link_peer_soc;
cdp_peer_stats_param_t buf = {0};
/* fill recv mesh stats */ /* fill recv mesh stats */
rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s)); rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
@@ -973,10 +980,22 @@ void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
rx_info->rs_keyix); rx_info->rs_keyix);
} }
rx_info->rs_snr = peer->stats.rx.snr;
dp_peer_unref_delete(peer, DP_MOD_ID_MESH); dp_peer_unref_delete(peer, DP_MOD_ID_MESH);
} }
primary_link_peer = dp_get_primary_link_peer_by_id(soc,
txrx_peer->peer_id,
DP_MOD_ID_MESH);
if (qdf_likely(primary_link_peer)) {
link_peer_soc = primary_link_peer->vdev->pdev->soc;
dp_monitor_peer_get_stats_param(link_peer_soc,
primary_link_peer,
cdp_peer_rx_snr, &buf);
rx_info->rs_snr = buf.rx_snr;
dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_MESH);
}
rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR; rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR;
soc = vdev->pdev->soc; soc = vdev->pdev->soc;
@@ -1738,6 +1757,7 @@ dp_rx_enqueue_rx(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t rx_buf_list)
struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info; struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info;
int num_buff_elem; int num_buff_elem;
QDF_STATUS ret = QDF_STATUS_SUCCESS; QDF_STATUS ret = QDF_STATUS_SUCCESS;
struct dp_soc *soc = txrx_peer->vdev->pdev->soc;
struct dp_peer *peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, struct dp_peer *peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
DP_MOD_ID_RX); DP_MOD_ID_RX);
@@ -1873,7 +1893,8 @@ static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head); num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
DP_STATS_INC(soc, rx.err.rejected, num_nbuf); DP_STATS_INC(soc, rx.err.rejected, num_nbuf);
if (txrx_peer) if (txrx_peer)
DP_STATS_DEC(txrx_peer, to_stack.num, num_nbuf); DP_PEER_STATS_FLAT_DEC(txrx_peer, to_stack.num,
num_nbuf);
} }
} }
#endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */ #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */
@@ -1966,27 +1987,128 @@ QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
#ifndef QCA_HOST_MODE_WIFI_DISABLED #ifndef QCA_HOST_MODE_WIFI_DISABLED
#ifdef VDEV_PEER_PROTOCOL_COUNT #ifdef VDEV_PEER_PROTOCOL_COUNT
#define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer) \ #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) \
{ \ { \
qdf_nbuf_t nbuf_local; \ qdf_nbuf_t nbuf_local; \
struct dp_peer *peer_local; \ struct dp_txrx_peer *txrx_peer_local; \
struct dp_vdev *vdev_local = vdev_hdl; \ struct dp_vdev *vdev_local = vdev_hdl; \
do { \ do { \
if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \ if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
break; \ break; \
nbuf_local = nbuf; \ nbuf_local = nbuf; \
peer_local = peer; \ txrx_peer_local = txrx_peer; \
if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \ if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \
break; \ break; \
else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \ else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \
break; \ break; \
dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \ dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
(nbuf_local), \ (nbuf_local), \
(peer_local), 0, 1); \ (txrx_peer_local), 0, 1); \
} while (0); \ } while (0); \
} }
#else #else
#define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer) #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer)
#endif
#ifndef QCA_ENHANCED_STATS_SUPPORT
/**
* dp_rx_msdu_extd_stats_update(): Update Rx extended path stats for peer
*
* @soc: datapath soc handle
* @nbuf: received msdu buffer
* @rx_tlv_hdr: rx tlv header
* @txrx_peer: datapath txrx_peer handle
*
* Return: void
*/
static inline
void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr,
struct dp_txrx_peer *txrx_peer)
{
bool is_ampdu;
uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
/*
* TODO - For KIWI this field is present in ring_desc
* Try to use ring desc instead of tlv.
*/
is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr);
DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu);
DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr);
mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
tid = qdf_nbuf_get_tid_val(nbuf);
bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
rx_tlv_hdr);
nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1,
((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1,
((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1);
/*
* only if nss > 0 and pkt_type is 11N/AC/AX,
* then increase index [nss - 1] in array counter.
*/
if (nss > 0 && (pkt_type == DOT11_N ||
pkt_type == DOT11_AC ||
pkt_type == DOT11_AX))
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1);
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1,
hal_rx_tlv_mic_err_get(soc->hal_soc,
rx_tlv_hdr));
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1,
hal_rx_tlv_decrypt_err_get(soc->hal_soc,
rx_tlv_hdr));
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1);
DP_PEER_EXTD_STATS_INCC(txrx_peer,
rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
DP_PEER_EXTD_STATS_INCC(txrx_peer,
rx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A)));
DP_PEER_EXTD_STATS_INCC(txrx_peer,
rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
DP_PEER_EXTD_STATS_INCC(txrx_peer,
rx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B)));
DP_PEER_EXTD_STATS_INCC(txrx_peer,
rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
DP_PEER_EXTD_STATS_INCC(txrx_peer,
rx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N)));
DP_PEER_EXTD_STATS_INCC(txrx_peer,
rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
DP_PEER_EXTD_STATS_INCC(txrx_peer,
rx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
DP_PEER_EXTD_STATS_INCC(txrx_peer,
rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS) && (pkt_type == DOT11_AX)));
DP_PEER_EXTD_STATS_INCC(txrx_peer,
rx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs < MAX_MCS) && (pkt_type == DOT11_AX)));
}
#else
static inline
void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr,
struct dp_txrx_peer *txrx_peer)
{
}
#endif #endif
/** /**
@@ -2007,8 +2129,7 @@ void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t ring_id, uint8_t ring_id,
struct cdp_tid_rx_stats *tid_stats) struct cdp_tid_rx_stats *tid_stats)
{ {
bool is_ampdu, is_not_amsdu; bool is_not_amsdu;
uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
struct dp_vdev *vdev = txrx_peer->vdev; struct dp_vdev *vdev = txrx_peer->vdev;
bool enh_flag; bool enh_flag;
qdf_ether_header_t *eh; qdf_ether_header_t *eh;
@@ -2017,89 +2138,29 @@ void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer); dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer);
is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) & is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
qdf_nbuf_is_rx_chfrag_end(nbuf); qdf_nbuf_is_rx_chfrag_end(nbuf);
DP_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1, msdu_len); DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1,
DP_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1, is_not_amsdu); msdu_len);
DP_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, !is_not_amsdu); DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1,
DP_STATS_INCC(peer, rx.rx_retries, 1, qdf_nbuf_is_rx_retry_flag(nbuf)); is_not_amsdu);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, !is_not_amsdu);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1,
qdf_nbuf_is_rx_retry_flag(nbuf));
tid_stats->msdu_cnt++; tid_stats->msdu_cnt++;
if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) && if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) { (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
enh_flag = vdev->pdev->enhanced_stats_en; enh_flag = vdev->pdev->enhanced_stats_en;
DP_PEER_MC_INCC_PKT(peer, 1, msdu_len, enh_flag); DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag);
tid_stats->mcast_msdu_cnt++; tid_stats->mcast_msdu_cnt++;
if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
DP_PEER_BC_INCC_PKT(peer, 1, msdu_len, enh_flag); DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag);
tid_stats->bcast_msdu_cnt++; tid_stats->bcast_msdu_cnt++;
} }
} }
/* txrx_peer->stats.per_pkt_stats.rx.last_rx_ts = qdf_system_ticks();
* currently we can return from here as we have similar stats
* updated at per ppdu level instead of msdu level
*/
if (!soc->process_rx_status)
return;
peer->stats.rx.last_rx_ts = qdf_system_ticks(); dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer);
/*
* TODO - For KIWI this field is present in ring_desc
* Try to use ring desc instead of tlv.
*/
is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr);
DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr);
mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
tid = qdf_nbuf_get_tid_val(nbuf);
bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
rx_tlv_hdr);
nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
DP_STATS_INCC(peer, rx.rx_mpdu_cnt[mcs], 1,
((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
DP_STATS_INCC(peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1,
((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
DP_STATS_INC(peer, rx.bw[bw], 1);
/*
* only if nss > 0 and pkt_type is 11N/AC/AX,
* then increase index [nss - 1] in array counter.
*/
if (nss > 0 && (pkt_type == DOT11_N ||
pkt_type == DOT11_AC ||
pkt_type == DOT11_AX))
DP_STATS_INC(peer, rx.nss[nss - 1], 1);
DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
DP_STATS_INCC(peer, rx.err.mic_err, 1,
hal_rx_tlv_mic_err_get(soc->hal_soc, rx_tlv_hdr));
DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
hal_rx_tlv_decrypt_err_get(soc->hal_soc, rx_tlv_hdr));
DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
DP_STATS_INC(peer, rx.reception_type[reception_type], 1);
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A)));
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B)));
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N)));
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS) && (pkt_type == DOT11_AX)));
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs < MAX_MCS) && (pkt_type == DOT11_AX)));
} }
#ifndef WDS_VENDOR_EXTENSION #ifndef WDS_VENDOR_EXTENSION

View File

@@ -263,10 +263,11 @@ bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer,
{ {
if (ta_txrx_peer->vdev->opmode == wlan_op_mode_ndi && if (ta_txrx_peer->vdev->opmode == wlan_op_mode_ndi &&
qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) { qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
DP_STATS_INC(ta_txrx_peer, rx.intra_bss.mdns_no_fwd, 1); DP_PEER_PER_PKT_STATS_INC(ta_txrx_peer,
rx.intra_bss.mdns_no_fwd, 1);
return false; return false;
} }
return true; return true;
} }
#else #else
static inline static inline

View File

@@ -1404,7 +1404,8 @@ dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
if ((!soc->mec_fw_offload) && if ((!soc->mec_fw_offload) &&
dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) { dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) {
/* this is a looped back MCBC pkt, drop it */ /* this is a looped back MCBC pkt, drop it */
DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
qdf_nbuf_len(nbuf));
goto drop_nbuf; goto drop_nbuf;
} }
@@ -1414,7 +1415,8 @@ dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
* from any proxysta. * from any proxysta.
*/ */
if (check_qwrap_multicast_loopback(vdev, nbuf)) { if (check_qwrap_multicast_loopback(vdev, nbuf)) {
DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf)); DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
qdf_nbuf_len(nbuf));
goto drop_nbuf; goto drop_nbuf;
} }
@@ -1423,13 +1425,13 @@ dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
rx_tlv_hdr))) { rx_tlv_hdr))) {
dp_err_rl("free buffer for multicast packet"); dp_err_rl("free buffer for multicast packet");
DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.nawds_mcast_drop, 1);
goto drop_nbuf; goto drop_nbuf;
} }
if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
dp_err_rl("mcast Policy Check Drop pkt"); dp_err_rl("mcast Policy Check Drop pkt");
DP_STATS_INC(peer, rx.policy_check_drop, 1); DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.policy_check_drop, 1);
goto drop_nbuf; goto drop_nbuf;
} }
/* WDS Source Port Learning */ /* WDS Source Port Learning */
@@ -1440,6 +1442,7 @@ dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
msdu_metadata); msdu_metadata);
if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) { if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
struct dp_peer *peer;
tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr); tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
DP_MOD_ID_RX_ERR); DP_MOD_ID_RX_ERR);
@@ -1510,7 +1513,8 @@ dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
enh_flag); enh_flag);
if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
DP_PEER_BC_INCC_PKT(peer, 1, qdf_nbuf_len(nbuf), DP_PEER_BC_INCC_PKT(txrx_peer, 1,
qdf_nbuf_len(nbuf),
enh_flag); enh_flag);
} }
@@ -1669,10 +1673,11 @@ process_rx:
eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf); eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
is_broadcast = (QDF_IS_ADDR_BROADCAST is_broadcast = (QDF_IS_ADDR_BROADCAST
(eh->ether_dhost)) ? 1 : 0 ; (eh->ether_dhost)) ? 1 : 0 ;
DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf)); DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1,
qdf_nbuf_len(nbuf));
if (is_broadcast) { if (is_broadcast) {
DP_STATS_INC_PKT(peer, rx.bcast, 1, DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 1,
qdf_nbuf_len(nbuf)); qdf_nbuf_len(nbuf));
} }
} }
@@ -1684,7 +1689,7 @@ process_rx:
EXCEPTION_DEST_RING_ID, true, true); EXCEPTION_DEST_RING_ID, true, true);
/* Update the flow tag in SKB based on FSE metadata */ /* Update the flow tag in SKB based on FSE metadata */
dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
DP_STATS_FLAT_INC(txrx_peer, to_stack.num, 1); DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1);
qdf_nbuf_set_exc_frame(nbuf, 1); qdf_nbuf_set_exc_frame(nbuf, 1);
dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL); dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
} }
@@ -2760,10 +2765,10 @@ done:
/* TODO */ /* TODO */
/* Add per error code accounting */ /* Add per error code accounting */
case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP: case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
if (peer) if (txrx_peer)
DP_STATS_INC(peer, DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.err.jump_2k_err, rx.err.jump_2k_err,
1); 1);
pool_id = wbm_err_info.pool_id; pool_id = wbm_err_info.pool_id;
@@ -2782,8 +2787,9 @@ done:
break; break;
case HAL_REO_ERR_REGULAR_FRAME_OOR: case HAL_REO_ERR_REGULAR_FRAME_OOR:
if (txrx_peer) if (txrx_peer)
DP_STATS_INC(peer, DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.err.oor_err, 1); rx.err.oor_err,
1);
if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
rx_tlv_hdr)) { rx_tlv_hdr)) {
tid = tid =
@@ -2811,8 +2817,9 @@ done:
case HAL_REO_ERR_PN_CHECK_FAILED: case HAL_REO_ERR_PN_CHECK_FAILED:
case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET: case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
if (txrx_peer) if (txrx_peer)
DP_STATS_INC(txrx_peer, DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.err.pn_err, 1); rx.err.pn_err,
1);
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
break; break;
@@ -2851,10 +2858,10 @@ done:
case HAL_RXDMA_ERR_UNENCRYPTED: case HAL_RXDMA_ERR_UNENCRYPTED:
case HAL_RXDMA_ERR_WIFI_PARSE: case HAL_RXDMA_ERR_WIFI_PARSE:
if (peer) if (txrx_peer)
DP_STATS_INC(peer, DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.err.rxdma_wifi_parse_err, rx.err.rxdma_wifi_parse_err,
1); 1);
pool_id = wbm_err_info.pool_id; pool_id = wbm_err_info.pool_id;
dp_rx_process_rxdma_err(soc, nbuf, dp_rx_process_rxdma_err(soc, nbuf,
@@ -2870,14 +2877,17 @@ done:
rx_tlv_hdr, rx_tlv_hdr,
txrx_peer); txrx_peer);
if (txrx_peer) if (txrx_peer)
DP_STATS_INC(peer, rx.err.mic_err, 1); DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.err.mic_err,
1);
break; break;
case HAL_RXDMA_ERR_DECRYPT: case HAL_RXDMA_ERR_DECRYPT:
if (txrx_peer) { if (txrx_peer) {
DP_STATS_INC(peer, rx.err. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
decrypt_err, 1); rx.err.decrypt_err,
1);
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
break; break;
} }
@@ -2896,8 +2906,10 @@ done:
pool_id); pool_id);
break; break;
case HAL_RXDMA_MULTICAST_ECHO: case HAL_RXDMA_MULTICAST_ECHO:
DP_STATS_INC_PKT(peer, rx.mec_drop, 1, if (txrx_peer)
qdf_nbuf_len(nbuf)); DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
rx.mec_drop, 1,
qdf_nbuf_len(nbuf));
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
break; break;
case HAL_RXDMA_UNAUTHORIZED_WDS: case HAL_RXDMA_UNAUTHORIZED_WDS:

View File

@@ -4458,17 +4458,18 @@ static QDF_STATUS dp_peer_stats_update_protocol_test_cnt(struct dp_vdev *vdev,
void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev, void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev,
qdf_nbuf_t nbuf, qdf_nbuf_t nbuf,
struct dp_peer *peer, struct dp_txrx_peer *txrx_peer,
bool is_egress, bool is_egress,
bool is_rx) bool is_rx)
{ {
struct cdp_peer_stats *peer_stats; struct dp_peer_per_pkt_stats *per_pkt_stats;
struct protocol_trace_count *protocol_trace_cnt; struct protocol_trace_count *protocol_trace_cnt;
enum cdp_protocol_trace prot; enum cdp_protocol_trace prot;
struct dp_soc *soc; struct dp_soc *soc;
struct ether_header *eh; struct ether_header *eh;
char *mac; char *mac;
bool new_peer_ref = false; bool new_peer_ref = false;
struct dp_peer *peer = NULL;
if (qdf_likely(!vdev->peer_protocol_count_track)) if (qdf_likely(!vdev->peer_protocol_count_track))
return; return;
@@ -4485,14 +4486,18 @@ void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev,
else else
mac = eh->ether_dhost; mac = eh->ether_dhost;
if (!peer) { if (!txrx_peer) {
peer = dp_peer_find_hash_find(soc, mac, 0, vdev->vdev_id, peer = dp_peer_find_hash_find(soc, mac, 0, vdev->vdev_id,
DP_MOD_ID_GENERIC_STATS); DP_MOD_ID_GENERIC_STATS);
new_peer_ref = true; new_peer_ref = true;
if (!peer) if (!peer)
return; return;
txrx_peer = peer->txrx_peer;
if (!txrx_peer)
goto dp_vdev_peer_stats_update_protocol_cnt_free_peer;
} }
peer_stats = &peer->stats; per_pkt_stats = &txrx_peer->stats.per_pkt_stats;
if (qdf_nbuf_is_icmp_pkt(nbuf) == true) if (qdf_nbuf_is_icmp_pkt(nbuf) == true)
prot = CDP_TRACE_ICMP; prot = CDP_TRACE_ICMP;
@@ -4504,9 +4509,9 @@ void dp_vdev_peer_stats_update_protocol_cnt(struct dp_vdev *vdev,
goto dp_vdev_peer_stats_update_protocol_cnt_free_peer; goto dp_vdev_peer_stats_update_protocol_cnt_free_peer;
if (is_rx) if (is_rx)
protocol_trace_cnt = peer_stats->rx.protocol_trace_cnt; protocol_trace_cnt = per_pkt_stats->rx.protocol_trace_cnt;
else else
protocol_trace_cnt = peer_stats->tx.protocol_trace_cnt; protocol_trace_cnt = per_pkt_stats->tx.protocol_trace_cnt;
if (is_egress) if (is_egress)
protocol_trace_cnt[prot].egress_cnt++; protocol_trace_cnt[prot].egress_cnt++;

View File

@@ -3151,8 +3151,9 @@ void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
*/ */
if (sa_peer_id == peer->peer_id) { if (sa_peer_id == peer->peer_id) {
dp_tx_debug("multicast packet"); dp_tx_debug("multicast packet");
DP_STATS_INC(peer, tx.nawds_mcast_drop, DP_PEER_PER_PKT_STATS_INC(txrx_peer,
1); tx.nawds_mcast_drop,
1);
continue; continue;
} }
@@ -3173,9 +3174,11 @@ void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
dp_tx_debug("pkt send failed"); dp_tx_debug("pkt send failed");
qdf_nbuf_free(nbuf_clone); qdf_nbuf_free(nbuf_clone);
} else { } else {
if (peer_id != DP_INVALID_PEER) if (peer_id != DP_INVALID_PEER) {
DP_STATS_INC_PKT(peer, tx.nawds_mcast, DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
1, qdf_nbuf_len(nbuf)); tx.nawds_mcast,
1, qdf_nbuf_len(nbuf));
}
} }
} }
} }
@@ -3788,7 +3791,8 @@ void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
#ifdef DISABLE_DP_STATS #ifdef DISABLE_DP_STATS
static static
inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *peer) inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf,
struct dp_txrx_peer *txrx_peer)
{ {
} }
#else #else
@@ -3799,7 +3803,81 @@ dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer)
DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype)); DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
if (subtype != QDF_PROTO_INVALID) if (subtype != QDF_PROTO_INVALID)
DP_STATS_INC(peer, tx.no_ack_count[subtype], 1); DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype],
1);
}
#endif
#ifndef QCA_ENHANCED_STATS_SUPPORT
/**
* dp_tx_update_peer_extd_stats()- Update Tx extended path stats for peer
*
* @ts: Tx compltion status
* @txrx_peer: datapath txrx_peer handle
*
* Return: void
*/
static inline void
dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
struct dp_txrx_peer *txrx_peer)
{
uint8_t mcs, pkt_type;
mcs = ts->mcs;
pkt_type = ts->pkt_type;
DP_PEER_EXTD_STATS_INCC(txrx_peer,
tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
DP_PEER_EXTD_STATS_INCC(txrx_peer,
tx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
DP_PEER_EXTD_STATS_INCC(txrx_peer,
tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
DP_PEER_EXTD_STATS_INCC(txrx_peer,
tx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
DP_PEER_EXTD_STATS_INCC(txrx_peer,
tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
DP_PEER_EXTD_STATS_INCC(txrx_peer,
tx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
DP_PEER_EXTD_STATS_INCC(txrx_peer,
tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
DP_PEER_EXTD_STATS_INCC(txrx_peer,
tx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
DP_PEER_EXTD_STATS_INCC(txrx_peer,
tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
DP_PEER_EXTD_STATS_INCC(txrx_peer,
tx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1);
DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1);
DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi);
DP_PEER_EXTD_STATS_INC(txrx_peer,
tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc);
DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc);
DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1);
if (ts->first_msdu) {
DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1,
ts->transmit_cnt > 1);
DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries,
qdf_do_div(ts->transmit_cnt, DP_RETRY_COUNT),
ts->transmit_cnt > DP_RETRY_COUNT);
}
}
#else
static inline void
dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
struct dp_txrx_peer *txrx_peer)
{
} }
#endif #endif
@@ -3821,7 +3899,6 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
{ {
struct dp_pdev *pdev = txrx_peer->vdev->pdev; struct dp_pdev *pdev = txrx_peer->vdev->pdev;
struct dp_soc *soc = NULL; struct dp_soc *soc = NULL;
uint8_t mcs, pkt_type;
uint8_t tid = ts->tid; uint8_t tid = ts->tid;
uint32_t length; uint32_t length;
struct cdp_tid_tx_stats *tid_stats; struct cdp_tid_tx_stats *tid_stats;
@@ -3835,38 +3912,35 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid]; tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
soc = pdev->soc; soc = pdev->soc;
mcs = ts->mcs;
pkt_type = ts->pkt_type;
if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) { if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
dp_err("Release source is not from TQM"); dp_err("Release source is not from TQM");
return; return;
} }
length = qdf_nbuf_len(tx_desc->nbuf); length = qdf_nbuf_len(tx_desc->nbuf);
DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length); DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
if (qdf_unlikely(pdev->delay_stats_flag)) if (qdf_unlikely(pdev->delay_stats_flag))
dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id); dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
DP_STATS_INCC(peer, tx.dropped.age_out, 1, DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.dropped.age_out, 1,
(ts->status == HAL_TX_TQM_RR_REM_CMD_AGED)); (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length, DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, tx.dropped.fw_rem, 1, length,
(ts->status == HAL_TX_TQM_RR_REM_CMD_REM)); (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1, DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.dropped.fw_rem_notx, 1,
(ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX)); (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1, DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.dropped.fw_rem_tx, 1,
(ts->status == HAL_TX_TQM_RR_REM_CMD_TX)); (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1, DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.dropped.fw_reason1, 1,
(ts->status == HAL_TX_TQM_RR_FW_REASON1)); (ts->status == HAL_TX_TQM_RR_FW_REASON1));
DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1, DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.dropped.fw_reason2, 1,
(ts->status == HAL_TX_TQM_RR_FW_REASON2)); (ts->status == HAL_TX_TQM_RR_FW_REASON2));
DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1, DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.dropped.fw_reason3, 1,
(ts->status == HAL_TX_TQM_RR_FW_REASON3)); (ts->status == HAL_TX_TQM_RR_FW_REASON3));
/* /*
* tx_failed is ideally supposed to be updated from HTT ppdu completion * tx_failed is ideally supposed to be updated from HTT ppdu completion
@@ -3876,73 +3950,40 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
* then this has to be removed * then this has to be removed
*/ */
txrx_peer->tx_failed = txrx_peer->tx_failed =
peer->stats.tx.dropped.fw_rem.num + txrx_peer->stats.per_pkt_stats.tx.dropped.fw_rem.num +
peer->stats.tx.dropped.fw_rem_notx + txrx_peer->stats.per_pkt_stats.tx.dropped.fw_rem_notx +
peer->stats.tx.dropped.fw_rem_tx + txrx_peer->stats.per_pkt_stats.tx.dropped.fw_rem_tx +
peer->stats.tx.dropped.age_out + txrx_peer->stats.per_pkt_stats.tx.dropped.age_out +
peer->stats.tx.dropped.fw_reason1 + txrx_peer->stats.per_pkt_stats.tx.dropped.fw_reason1 +
peer->stats.tx.dropped.fw_reason2 + txrx_peer->stats.per_pkt_stats.tx.dropped.fw_reason2 +
peer->stats.tx.dropped.fw_reason3; txrx_peer->stats.per_pkt_stats.tx.dropped.fw_reason3;
if (ts->status < CDP_MAX_TX_TQM_STATUS) { if (ts->status < CDP_MAX_TX_TQM_STATUS) {
tid_stats->tqm_status_cnt[ts->status]++; tid_stats->tqm_status_cnt[ts->status]++;
} }
if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) { if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
DP_STATS_INCC(peer, tx.failed_retry_count, 1, DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1,
ts->transmit_cnt > DP_RETRY_COUNT); ts->transmit_cnt > DP_RETRY_COUNT);
dp_update_no_ack_stats(tx_desc->nbuf, peer); dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer);
return; return;
} }
DP_STATS_INCC(peer, tx.retry_count, 1, ts->transmit_cnt > 1); DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1,
ts->transmit_cnt > 1);
DP_STATS_INCC(peer, tx.multiple_retry_count, 1, ts->transmit_cnt > 2); DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count, 1,
ts->transmit_cnt > 2);
DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma); DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma);
DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu); DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1,
DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu); ts->msdu_part_of_amsdu);
/* DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1,
* Following Rate Statistics are updated from HTT PPDU events from FW. !ts->msdu_part_of_amsdu);
* Return from here if HTT PPDU events are enabled.
*/
if (!(soc->process_tx_status))
return;
DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1); txrx_peer->stats.per_pkt_stats.tx.last_tx_ts = qdf_system_ticks();
DP_STATS_INC(peer, tx.bw[ts->bw], 1);
DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi); dp_tx_update_peer_extd_stats(ts, txrx_peer);
DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
if (ts->first_msdu) {
DP_STATS_INCC(peer, tx.retries_mpdu, 1, ts->transmit_cnt > 1);
DP_STATS_INCC(peer, tx.mpdu_success_with_retries,
qdf_do_div(ts->transmit_cnt, DP_RETRY_COUNT),
ts->transmit_cnt > DP_RETRY_COUNT);
}
peer->stats.tx.last_tx_ts = qdf_system_ticks();
} }
#ifdef QCA_LL_TX_FLOW_CONTROL_V2 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
@@ -4055,6 +4096,8 @@ static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
{ {
uint64_t delta_ms; uint64_t delta_ms;
struct cdp_tx_sojourn_stats *sojourn_stats; struct cdp_tx_sojourn_stats *sojourn_stats;
struct dp_peer *primary_link_peer = NULL;
struct dp_soc *link_peer_soc = NULL;
if (qdf_unlikely(!pdev->enhanced_stats_en)) if (qdf_unlikely(!pdev->enhanced_stats_en))
return; return;
@@ -4066,29 +4109,41 @@ static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
if (qdf_unlikely(!pdev->sojourn_buf)) if (qdf_unlikely(!pdev->sojourn_buf))
return; return;
primary_link_peer = dp_get_primary_link_peer_by_id(pdev->soc,
txrx_peer->peer_id,
DP_MOD_ID_TX_COMP);
if (qdf_unlikely(!primary_link_peer))
return;
sojourn_stats = (struct cdp_tx_sojourn_stats *) sojourn_stats = (struct cdp_tx_sojourn_stats *)
qdf_nbuf_data(pdev->sojourn_buf); qdf_nbuf_data(pdev->sojourn_buf);
sojourn_stats->cookie = (void *)peer->rdkstats_ctx; link_peer_soc = primary_link_peer->vdev->pdev->soc;
sojourn_stats->cookie = (void *)
dp_monitor_peer_get_rdkstats_ctx(link_peer_soc,
primary_link_peer);
delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) - delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) -
txdesc_ts; txdesc_ts;
qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid], qdf_ewma_tx_lag_add(&txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid],
delta_ms); delta_ms);
sojourn_stats->sum_sojourn_msdu[tid] = delta_ms; sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
sojourn_stats->num_msdus[tid] = 1; sojourn_stats->num_msdus[tid] = 1;
sojourn_stats->avg_sojourn_msdu[tid].internal = sojourn_stats->avg_sojourn_msdu[tid].internal =
peer->avg_sojourn_msdu[tid].internal; txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid].internal;
dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc, dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
pdev->sojourn_buf, HTT_INVALID_PEER, pdev->sojourn_buf, HTT_INVALID_PEER,
WDI_NO_VAL, pdev->pdev_id); WDI_NO_VAL, pdev->pdev_id);
sojourn_stats->sum_sojourn_msdu[tid] = 0; sojourn_stats->sum_sojourn_msdu[tid] = 0;
sojourn_stats->num_msdus[tid] = 0; sojourn_stats->num_msdus[tid] = 0;
sojourn_stats->avg_sojourn_msdu[tid].internal = 0; sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_TX_COMP);
} }
#else #else
static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev, static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
struct dp_peer *peer, struct dp_txrx_peer *txrx_peer,
uint8_t tid, uint8_t tid,
uint64_t txdesc_ts, uint64_t txdesc_ts,
uint32_t ppdu_id) uint32_t ppdu_id)
@@ -4445,22 +4500,26 @@ void dp_tx_comp_process_tx_status(struct dp_soc *soc,
if (qdf_unlikely(txrx_peer->bss_peer && if (qdf_unlikely(txrx_peer->bss_peer &&
vdev->opmode == wlan_op_mode_ap)) { vdev->opmode == wlan_op_mode_ap)) {
if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) { if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
DP_STATS_INC_PKT(peer, tx.mcast, 1, length); DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
length);
if (txrx_peer->vdev->tx_encap_type == if (txrx_peer->vdev->tx_encap_type ==
htt_cmn_pkt_type_ethernet && htt_cmn_pkt_type_ethernet &&
QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) { QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
DP_STATS_INC_PKT(peer, tx.bcast, 1, length); DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
tx.bcast, 1,
length);
} }
} }
} else { } else {
DP_STATS_INC_PKT(peer, tx.ucast, 1, length); DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length);
if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) { if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
DP_STATS_INC_PKT(peer, tx.tx_success, 1, length); DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success,
1, length);
if (qdf_unlikely(txrx_peer->in_twt)) { if (qdf_unlikely(txrx_peer->in_twt)) {
DP_STATS_INC_PKT(peer, DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
tx.tx_success_twt, tx.tx_success_twt,
1, length); 1, length);
} }
} }
} }
@@ -4483,7 +4542,7 @@ out:
defined(QCA_ENHANCED_STATS_SUPPORT) defined(QCA_ENHANCED_STATS_SUPPORT)
/* /*
* dp_tx_update_peer_basic_stats(): Update peer basic stats * dp_tx_update_peer_basic_stats(): Update peer basic stats
* @peer: Datapath peer handle * @txrx_peer: Datapath txrx_peer handle
* @length: Length of the packet * @length: Length of the packet
* @tx_status: Tx status from TQM/FW * @tx_status: Tx status from TQM/FW
* @update: enhanced flag value present in dp_pdev * @update: enhanced flag value present in dp_pdev
@@ -4495,9 +4554,10 @@ void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
bool update) bool update)
{ {
if ((!txrx_peer->hw_txrx_stats_en) || update) { if ((!txrx_peer->hw_txrx_stats_en) || update) {
DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length); DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
DP_STATS_FLAT_INC(txrx_peer, tx_failed, 1,
tx_status != HAL_TX_TQM_RR_FRAME_ACKED); if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
} }
} }
#elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
@@ -4506,9 +4566,10 @@ void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
bool update) bool update)
{ {
if (!peer->hw_txrx_stats_en) { if (!peer->hw_txrx_stats_en) {
DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length); DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
DP_STATS_FLAT_INC(txrx_peer, tx_failed, 1,
tx_status != HAL_TX_TQM_RR_FRAME_ACKED); if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
} }
} }
@@ -4517,9 +4578,10 @@ void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
uint32_t length, uint8_t tx_status, uint32_t length, uint8_t tx_status,
bool update) bool update)
{ {
DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length); DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
DP_STATS_FLAT_INC(txrx_peer, tx_failed, 1,
tx_status != HAL_TX_TQM_RR_FRAME_ACKED); if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
} }
#endif #endif

View File

@@ -740,7 +740,9 @@ done:
if (qdf_nbuf_is_raw_frame(nbuf)) { if (qdf_nbuf_is_raw_frame(nbuf)) {
DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len); DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
rx.raw, 1,
msdu_len);
} else { } else {
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
DP_STATS_INC(soc, rx.err.scatter_msdu, 1); DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
@@ -765,7 +767,9 @@ done:
if (qdf_unlikely(vdev->multipass_en)) { if (qdf_unlikely(vdev->multipass_en)) {
if (dp_rx_multipass_process(txrx_peer, nbuf, if (dp_rx_multipass_process(txrx_peer, nbuf,
tid) == false) { tid) == false) {
DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1); DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.multipass_rx_pkt_drop,
1);
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
nbuf = next; nbuf = next;
continue; continue;
@@ -774,7 +778,8 @@ done:
if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) { if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
dp_rx_err("%pK: Policy Check Drop pkt", soc); dp_rx_err("%pK: Policy Check Drop pkt", soc);
DP_STATS_INC(peer, rx.policy_check_drop, 1); DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.policy_check_drop, 1);
tid_stats->fail_cnt[POLICY_CHECK_DROP]++; tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
/* Drop & free packet */ /* Drop & free packet */
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
@@ -789,7 +794,8 @@ done:
rx_tlv_hdr) == rx_tlv_hdr) ==
false))) { false))) {
tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.nawds_mcast_drop, 1);
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
nbuf = next; nbuf = next;
continue; continue;
@@ -805,8 +811,9 @@ done:
qdf_nbuf_is_ipv4_wapi_pkt(nbuf); qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
if (!is_eapol) { if (!is_eapol) {
DP_STATS_INC(peer, DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.peer_unauth_rx_pkt_drop, 1); rx.peer_unauth_rx_pkt_drop,
1);
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
nbuf = next; nbuf = next;
continue; continue;
@@ -867,8 +874,9 @@ done:
rx_tlv_hdr, rx_tlv_hdr,
nbuf))) { nbuf))) {
/* this is a looped back MCBC pkt,drop it */ /* this is a looped back MCBC pkt,drop it */
DP_STATS_INC_PKT(peer, rx.mec_drop, 1, DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
QDF_NBUF_CB_RX_PKT_LEN(nbuf)); rx.mec_drop, 1,
QDF_NBUF_CB_RX_PKT_LEN(nbuf));
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
nbuf = next; nbuf = next;
continue; continue;
@@ -904,11 +912,12 @@ done:
DP_RX_LIST_APPEND(deliver_list_head, DP_RX_LIST_APPEND(deliver_list_head,
deliver_list_tail, deliver_list_tail,
nbuf); nbuf);
DP_STATS_FLAT_INC_PKT(txrx_peer, to_stack, 1, DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, to_stack, 1,
QDF_NBUF_CB_RX_PKT_LEN(nbuf)); QDF_NBUF_CB_RX_PKT_LEN(nbuf));
if (qdf_unlikely(txrx_peer->in_twt)) if (qdf_unlikely(txrx_peer->in_twt))
DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1, DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
QDF_NBUF_CB_RX_PKT_LEN(nbuf)); rx.to_stack_twt, 1,
QDF_NBUF_CB_RX_PKT_LEN(nbuf));
tid_stats->delivered_to_stack++; tid_stats->delivered_to_stack++;
nbuf = next; nbuf = next;

View File

@@ -193,10 +193,10 @@ void dp_tx_process_htt_completion_li(struct dp_soc *soc,
&txrx_ref_handle, &txrx_ref_handle,
DP_MOD_ID_HTT_COMP); DP_MOD_ID_HTT_COMP);
if (qdf_likely(txrx_peer)) { if (qdf_likely(txrx_peer)) {
DP_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1,
qdf_nbuf_len(tx_desc->nbuf)); qdf_nbuf_len(tx_desc->nbuf));
DP_STATS_FLAT_INC(txrx_peer, tx_failed, 1, if (tx_status != HTT_TX_FW2WBM_TX_STATUS_OK)
tx_status != HTT_TX_FW2WBM_TX_STATUS_OK); DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
} }
dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer, dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer,