qcacmn: Add support for stats in IPA Architecture

Add support for stats in IPA Architecture.

Change-Id: I38706503719f6236fa1081342e9eb1f0bd0ba1c2
CRs-Fixed: 3345292
Cette révision appartient à :
Amrit
2022-11-22 14:57:04 +05:30
révisé par Madan Koyyalamudi
Parent 266f87715e
révision 1833aa0e7f
9 fichiers modifiés avec 358 ajouts et 7 suppressions

Voir le fichier

@@ -1431,6 +1431,9 @@ struct protocol_trace_count {
* @rts_failure: RTS failure count
* @bar_cnt: Block ACK Request frame count
* @ndpa_cnt: NDP announcement frame count
* @wme_ac_type_bytes: Wireless Multimedia Type Bytes Count
* @tx_ucast_total: Total tx unicast count
* @tx_ucast_success: Total tx unicast success count
*/
struct cdp_tx_stats {
struct cdp_pkt_info comp_pkt;
@@ -1554,6 +1557,9 @@ struct cdp_tx_stats {
uint32_t rts_failure;
uint32_t bar_cnt;
uint32_t ndpa_cnt;
uint64_t wme_ac_type_bytes[WME_AC_MAX];
struct cdp_pkt_info tx_ucast_total;
struct cdp_pkt_info tx_ucast_success;
};
/* struct cdp_rx_stats - rx Level Stats
@@ -1645,6 +1651,8 @@ struct cdp_tx_stats {
* @punc_bw[MAX_PUNCTURED_MODE]: MSDU count for punctured BW
* @bar_cnt: Block ACK Request frame count
* @ndpa_cnt: NDP announcement frame count
* @wme_ac_type_bytes: Wireless Multimedia type Byte Count
* @rx_total: Total rx count
*/
struct cdp_rx_stats {
struct cdp_pkt_info to_stack;
@@ -1736,6 +1744,10 @@ struct cdp_rx_stats {
uint32_t mcast_3addr_drop;
uint32_t bar_cnt;
uint32_t ndpa_cnt;
uint64_t wme_ac_type_bytes[WME_AC_MAX];
#ifdef IPA_OFFLOAD
struct cdp_pkt_info rx_total;
#endif
};
/* struct cdp_tx_ingress_stats - Tx ingress Stats

Voir le fichier

@@ -1163,6 +1163,11 @@ void DP_PRINT_STATS(const char *fmt, ...);
DP_PEER_PER_PKT_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \
}
#define DP_PEER_PER_PKT_STATS_UPD(_handle, _field, _delta) \
{ \
DP_STATS_UPD(_handle, per_pkt_stats._field, _delta); \
}
#ifndef QCA_ENHANCED_STATS_SUPPORT
#define DP_PEER_EXTD_STATS_INC(_handle, _field, _delta) \
{ \
@@ -1629,6 +1634,23 @@ void dp_update_vdev_stats(struct dp_soc *soc,
void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
struct dp_peer *peer);
#ifdef IPA_OFFLOAD
#define DP_IPA_UPDATE_RX_STATS(__tgtobj, __srcobj) \
{ \
DP_STATS_AGGR_PKT(__tgtobj, __srcobj, rx.rx_total); \
}
#define DP_IPA_UPDATE_PER_PKT_RX_STATS(__tgtobj, __srcobj) \
{ \
(__tgtobj)->rx.rx_total.num += (__srcobj)->rx.rx_total.num; \
(__tgtobj)->rx.rx_total.bytes += (__srcobj)->rx.rx_total.bytes; \
}
#else
#define DP_IPA_UPDATE_PER_PKT_RX_STATS(tgtobj, srcobj) \
#define DP_IPA_UPDATE_RX_STATS(tgtobj, srcobj)
#endif
#define DP_UPDATE_STATS(_tgtobj, _srcobj) \
do { \
uint8_t i; \
@@ -1654,6 +1676,14 @@ void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
for (i = 0; i < WME_AC_MAX; i++) { \
DP_STATS_AGGR(_tgtobj, _srcobj, tx.wme_ac_type[i]); \
DP_STATS_AGGR(_tgtobj, _srcobj, rx.wme_ac_type[i]); \
DP_STATS_AGGR(_tgtobj, _srcobj, \
tx.wme_ac_type_bytes[i]); \
DP_STATS_AGGR(_tgtobj, _srcobj, \
rx.wme_ac_type_bytes[i]); \
DP_STATS_AGGR(_tgtobj, _srcobj, \
tx.wme_ac_type_bytes[i]); \
DP_STATS_AGGR(_tgtobj, _srcobj, \
rx.wme_ac_type_bytes[i]); \
DP_STATS_AGGR(_tgtobj, _srcobj, tx.excess_retries_per_ac[i]); \
\
} \
@@ -1698,6 +1728,8 @@ void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.mcast_vdev_drop); \
DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.invalid_rr); \
DP_STATS_AGGR(_tgtobj, _srcobj, tx.dropped.age_out); \
DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_ucast_total); \
DP_STATS_AGGR_PKT(_tgtobj, _srcobj, tx.tx_ucast_success); \
\
DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.mic_err); \
DP_STATS_AGGR(_tgtobj, _srcobj, rx.err.decrypt_err); \
@@ -1741,6 +1773,7 @@ void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
DP_STATS_AGGR(_tgtobj, _srcobj, rx.multipass_rx_pkt_drop); \
DP_STATS_AGGR(_tgtobj, _srcobj, rx.peer_unauth_rx_pkt_drop); \
DP_STATS_AGGR(_tgtobj, _srcobj, rx.policy_check_drop); \
DP_IPA_UPDATE_RX_STATS(_tgtobj, _srcobj); \
} while (0)
#ifdef VDEV_PEER_PROTOCOL_COUNT
@@ -1912,6 +1945,7 @@ void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
_tgtobj->rx.rx_lmac[i].bytes += \
_srcobj->rx.rx_lmac[i].bytes; \
} \
DP_IPA_UPDATE_PER_PKT_RX_STATS(_tgtobj, _srcobj); \
DP_UPDATE_PROTOCOL_COUNT_STATS(_tgtobj, _srcobj); \
} while (0)
@@ -1961,6 +1995,8 @@ void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
} \
for (i = 0; i < WME_AC_MAX; i++) { \
_tgtobj->tx.wme_ac_type[i] += _srcobj->tx.wme_ac_type[i]; \
_tgtobj->tx.wme_ac_type_bytes[i] += \
_srcobj->tx.wme_ac_type_bytes[i]; \
_tgtobj->tx.excess_retries_per_ac[i] += \
_srcobj->tx.excess_retries_per_ac[i]; \
} \
@@ -1992,6 +2028,14 @@ void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
for (i = 0; i < MAX_MU_GROUP_ID; i++) { \
_tgtobj->tx.mu_group_id[i] = _srcobj->tx.mu_group_id[i]; \
} \
_tgtobj->tx.tx_ucast_total.num += \
_srcobj->tx.tx_ucast_total.num;\
_tgtobj->tx.tx_ucast_total.bytes += \
_srcobj->tx.tx_ucast_total.bytes;\
_tgtobj->tx.tx_ucast_success.num += \
_srcobj->tx.tx_ucast_success.num; \
_tgtobj->tx.tx_ucast_success.bytes += \
_srcobj->tx.tx_ucast_success.bytes; \
\
_tgtobj->rx.mpdu_cnt_fcs_ok += _srcobj->rx.mpdu_cnt_fcs_ok; \
_tgtobj->rx.mpdu_cnt_fcs_err += _srcobj->rx.mpdu_cnt_fcs_err; \
@@ -2025,6 +2069,8 @@ void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
} \
for (i = 0; i < WME_AC_MAX; i++) { \
_tgtobj->rx.wme_ac_type[i] += _srcobj->rx.wme_ac_type[i]; \
_tgtobj->rx.wme_ac_type_bytes[i] += \
_srcobj->rx.wme_ac_type_bytes[i]; \
} \
for (i = 0; i < MAX_MCS; i++) { \
_tgtobj->rx.su_ax_ppdu_cnt.mcs_count[i] += \
@@ -2379,6 +2425,13 @@ void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt,
union hal_reo_status *reo_status);
int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
dp_rxtid_stats_cmd_cb dp_stats_cmd_cb);
#ifdef QCA_ENHANCED_STATS_SUPPORT
void dp_peer_aggregate_tid_stats(struct dp_peer *peer);
#endif
#else
static inline void dp_peer_aggregate_tid_stats(struct dp_peer *peer)
{
}
#endif
QDF_STATUS
dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
@@ -4334,4 +4387,8 @@ void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl,
* Return: none
*/
void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc);
void dp_get_peer_stats(struct dp_peer *peer,
struct cdp_peer_stats *peer_stats);
#endif /* #ifndef _DP_INTERNAL_H_ */

Voir le fichier

@@ -3797,5 +3797,245 @@ QDF_STATUS dp_ipa_update_peer_rx_stats(struct cdp_soc_t *soc,
return QDF_STATUS_SUCCESS;
}
/**
* dp_peer_aggregate_tid_stats - aggregate rx tid stats
* @peer: Data Path peer
*
* Return: void
*/
void
dp_peer_aggregate_tid_stats(struct dp_peer *peer)
{
uint8_t i = 0;
struct dp_rx_tid *rx_tid = NULL;
struct cdp_pkt_info rx_total = {0};
struct dp_txrx_peer *txrx_peer = NULL;
if (!peer->rx_tid)
return;
txrx_peer = dp_get_txrx_peer(peer);
if (!txrx_peer)
return;
for (i = 0; i < DP_MAX_TIDS; i++) {
rx_tid = &peer->rx_tid[i];
rx_total.num += rx_tid->rx_msdu_cnt.num;
rx_total.bytes += rx_tid->rx_msdu_cnt.bytes;
}
DP_PEER_PER_PKT_STATS_UPD(txrx_peer, rx.rx_total.num,
rx_total.num);
DP_PEER_PER_PKT_STATS_UPD(txrx_peer, rx.rx_total.bytes,
rx_total.bytes);
}
/**
* dp_ipa_update_vdev_stats(): update vdev stats
* @soc: soc handle
* @srcobj: DP_PEER object
* @arg: point to vdev stats structure
*
* Return: void
*/
static inline
void dp_ipa_update_vdev_stats(struct dp_soc *soc, struct dp_peer *srcobj,
void *arg)
{
dp_peer_aggregate_tid_stats(srcobj);
dp_update_vdev_stats(soc, srcobj, arg);
}
/**
* dp_ipa_aggregate_vdev_stats - Aggregate vdev_stats
* @vdev: Data path vdev
* @vdev_stats: buffer to hold vdev stats
*
* Return: void
*/
static inline
void dp_ipa_aggregate_vdev_stats(struct dp_vdev *vdev,
struct cdp_vdev_stats *vdev_stats)
{
struct dp_soc *soc = NULL;
if (!vdev || !vdev->pdev)
return;
soc = vdev->pdev->soc;
dp_update_vdev_ingress_stats(vdev);
qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
dp_vdev_iterate_peer(vdev, dp_ipa_update_vdev_stats, vdev_stats,
DP_MOD_ID_GENERIC_STATS);
dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
vdev_stats->tx.ucast.num = vdev_stats->tx.tx_ucast_total.num;
vdev_stats->tx.ucast.bytes = vdev_stats->tx.tx_ucast_total.bytes;
vdev_stats->tx.tx_success.num = vdev_stats->tx.tx_ucast_success.num;
vdev_stats->tx.tx_success.bytes = vdev_stats->tx.tx_ucast_success.bytes;
if (vdev_stats->rx.rx_total.num >= vdev_stats->rx.multicast.num)
vdev_stats->rx.unicast.num = vdev_stats->rx.rx_total.num -
vdev_stats->rx.multicast.num;
if (vdev_stats->rx.rx_total.bytes >= vdev_stats->rx.multicast.bytes)
vdev_stats->rx.unicast.bytes = vdev_stats->rx.rx_total.bytes -
vdev_stats->rx.multicast.bytes;
vdev_stats->rx.to_stack.num = vdev_stats->rx.rx_total.num;
vdev_stats->rx.to_stack.bytes = vdev_stats->rx.rx_total.bytes;
}
/**
* dp_ipa_aggregate_pdev_stats - Aggregate pdev stats
* @pdev: Data path pdev
*
* Return: void
*/
static inline
void dp_ipa_aggregate_pdev_stats(struct dp_pdev *pdev)
{
struct dp_vdev *vdev = NULL;
struct dp_soc *soc;
struct cdp_vdev_stats *vdev_stats =
qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
if (!vdev_stats) {
dp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
pdev->soc);
return;
}
soc = pdev->soc;
qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
qdf_mem_zero(&pdev->stats.rx_i, sizeof(pdev->stats.rx_i));
qdf_spin_lock_bh(&pdev->vdev_list_lock);
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
dp_ipa_aggregate_vdev_stats(vdev, vdev_stats);
dp_update_pdev_stats(pdev, vdev_stats);
dp_update_pdev_ingress_stats(pdev, vdev);
}
qdf_spin_unlock_bh(&pdev->vdev_list_lock);
qdf_mem_free(vdev_stats);
}
/**
* dp_ipa_get_peer_stats - Get peer stats
* @peer: Data path peer
* @peer_stats: buffer to hold peer stats
*
* Return: void
*/
void dp_ipa_get_peer_stats(struct dp_peer *peer,
struct cdp_peer_stats *peer_stats)
{
dp_peer_aggregate_tid_stats(peer);
dp_get_peer_stats(peer, peer_stats);
peer_stats->tx.tx_success.num =
peer_stats->tx.tx_ucast_success.num;
peer_stats->tx.tx_success.bytes =
peer_stats->tx.tx_ucast_success.bytes;
peer_stats->tx.ucast.num =
peer_stats->tx.tx_ucast_total.num;
peer_stats->tx.ucast.bytes =
peer_stats->tx.tx_ucast_total.bytes;
if (peer_stats->rx.rx_total.num >= peer_stats->rx.multicast.num)
peer_stats->rx.unicast.num = peer_stats->rx.rx_total.num -
peer_stats->rx.multicast.num;
if (peer_stats->rx.rx_total.bytes >= peer_stats->rx.multicast.bytes)
peer_stats->rx.unicast.bytes = peer_stats->rx.rx_total.bytes -
peer_stats->rx.multicast.bytes;
}
/**
* dp_ipa_txrx_get_pdev_stats - fetch pdev stats
* @soc: DP soc handle
* @pdev_id: id of DP pdev handle
* @pdev_stats: buffer to hold pdev stats
*
* Return : status success/failure
*/
QDF_STATUS
dp_ipa_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
struct cdp_pdev_stats *pdev_stats)
{
struct dp_pdev *pdev =
dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
pdev_id);
if (!pdev)
return QDF_STATUS_E_FAILURE;
dp_ipa_aggregate_pdev_stats(pdev);
qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
return QDF_STATUS_SUCCESS;
}
/**
* dp_ipa_txrx_get_vdev_stats - fetch vdev stats
* @soc_hdl: soc handle
* @vdev_id: id of vdev handle
* @buf: buffer to hold vdev stats
* @is_aggregate: for aggregation
*
* Return : int
*/
int dp_ipa_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
void *buf, bool is_aggregate)
{
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
struct cdp_vdev_stats *vdev_stats;
struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
DP_MOD_ID_IPA);
if (!vdev)
return 1;
vdev_stats = (struct cdp_vdev_stats *)buf;
dp_ipa_aggregate_vdev_stats(vdev, buf);
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_IPA);
return 0;
}
/**
* dp_ipa_txrx_get_peer_stats - fetch peer stats
* @soc: soc handle
* @vdev_id: id of vdev handle
* @peer_mac: peer mac address
* @peer_stats: buffer to hold peer stats
*
* Return : status success/failure
*/
QDF_STATUS dp_ipa_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
uint8_t *peer_mac,
struct cdp_peer_stats *peer_stats)
{
struct dp_peer *peer = NULL;
struct cdp_peer_info peer_info = { 0 };
DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false,
CDP_WILD_PEER_TYPE);
peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
DP_MOD_ID_IPA);
qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
if (!peer)
return QDF_STATUS_E_FAILURE;
dp_ipa_get_peer_stats(peer, peer_stats);
dp_peer_unref_delete(peer, DP_MOD_ID_IPA);
return QDF_STATUS_SUCCESS;
}
#endif
#endif

Voir le fichier

@@ -468,6 +468,17 @@ dp_ipa_ast_notify_cb(qdf_ipa_wdi_conn_in_params_t *pipe_in,
}
#endif
#ifdef QCA_ENHANCED_STATS_SUPPORT
QDF_STATUS dp_ipa_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
uint8_t *peer_mac,
struct cdp_peer_stats *peer_stats);
int dp_ipa_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
void *buf, bool is_aggregate);
QDF_STATUS dp_ipa_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
struct cdp_pdev_stats *pdev_stats);
QDF_STATUS dp_ipa_update_peer_rx_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
uint8_t *peer_mac, qdf_nbuf_t nbuf);
#endif
#else
static inline int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
{

Voir le fichier

@@ -10014,7 +10014,7 @@ void dp_get_peer_tx_per(struct cdp_peer_stats *peer_stats)
*
* Return: none
*/
static inline
void dp_get_peer_stats(struct dp_peer *peer, struct cdp_peer_stats *peer_stats)
{
dp_get_peer_calibr_stats(peer, peer_stats);
@@ -14139,6 +14139,11 @@ static struct cdp_host_stats_ops dp_ops_host_stats = {
.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
#if defined(IPA_OFFLOAD) && defined(QCA_ENHANCED_STATS_SUPPORT)
.txrx_get_peer_stats = dp_ipa_txrx_get_peer_stats,
.txrx_get_vdev_stats = dp_ipa_txrx_get_vdev_stats,
.txrx_get_pdev_stats = dp_ipa_txrx_get_pdev_stats,
#endif
.txrx_get_ratekbps = dp_txrx_get_ratekbps,
.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
.txrx_get_peer_delay_stats = dp_txrx_get_peer_delay_stats,

Voir le fichier

@@ -8493,6 +8493,7 @@ void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
goto link_stats;
dp_update_vdev_basic_stats(txrx_peer, vdev_stats);
dp_peer_aggregate_tid_stats(peer);
per_pkt_stats = &txrx_peer->stats.per_pkt_stats;
DP_UPDATE_PER_PKT_STATS(vdev_stats, per_pkt_stats);
@@ -8587,8 +8588,12 @@ void dp_update_pdev_stats(struct dp_pdev *tgtobj,
for (i = 0; i < WME_AC_MAX; i++) {
tgtobj->stats.tx.wme_ac_type[i] +=
srcobj->tx.wme_ac_type[i];
tgtobj->stats.tx.wme_ac_type_bytes[i] +=
srcobj->tx.wme_ac_type_bytes[i];
tgtobj->stats.rx.wme_ac_type[i] +=
srcobj->rx.wme_ac_type[i];
tgtobj->stats.rx.wme_ac_type_bytes[i] +=
srcobj->rx.wme_ac_type_bytes[i];
tgtobj->stats.tx.excess_retries_per_ac[i] +=
srcobj->tx.excess_retries_per_ac[i];
}
@@ -8757,11 +8762,13 @@ void dp_update_pdev_stats(struct dp_pdev *tgtobj,
srcobj->rx.rx_lmac[i].bytes;
}
srcobj->rx.unicast.num =
srcobj->rx.to_stack.num -
if (srcobj->rx.to_stack.num >= (srcobj->rx.multicast.num))
srcobj->rx.unicast.num =
srcobj->rx.to_stack.num -
(srcobj->rx.multicast.num);
srcobj->rx.unicast.bytes =
srcobj->rx.to_stack.bytes -
if (srcobj->rx.to_stack.bytes >= srcobj->rx.multicast.bytes)
srcobj->rx.unicast.bytes =
srcobj->rx.to_stack.bytes -
(srcobj->rx.multicast.bytes);
tgtobj->stats.rx.unicast.num += srcobj->rx.unicast.num;

Voir le fichier

@@ -3904,6 +3904,7 @@ struct dp_peer_per_pkt_tx_stats {
* @rts_failure: RTS failure count
* @bar_cnt: Block ACK Request frame count
* @ndpa_cnt: NDP announcement frame count
* @wme_ac_type_bytes: Wireless Multimedia bytes Count
*/
struct dp_peer_extd_tx_stats {
uint32_t stbc;
@@ -3960,6 +3961,7 @@ struct dp_peer_extd_tx_stats {
uint32_t rts_failure;
uint32_t bar_cnt;
uint32_t ndpa_cnt;
uint64_t wme_ac_type_bytes[WME_AC_MAX];
};
/**
@@ -3991,6 +3993,7 @@ struct dp_peer_extd_tx_stats {
* @policy_check_drop: policy check drops
* @to_stack_twt: Total packets sent up the stack in TWT session
* @protocol_trace_cnt: per-peer protocol counters
* @rx_total: total rx count
*/
struct dp_peer_per_pkt_rx_stats {
struct cdp_pkt_info rcvd_reo[CDP_MAX_RX_RINGS];
@@ -4027,6 +4030,9 @@ struct dp_peer_per_pkt_rx_stats {
struct protocol_trace_count protocol_trace_cnt[CDP_TRACE_MAX];
#endif
uint32_t mcast_3addr_drop;
#ifdef IPA_OFFLOAD
struct cdp_pkt_info rx_total;
#endif
};
/**
@@ -4077,6 +4083,7 @@ struct dp_peer_per_pkt_rx_stats {
* @punc_bw[MAX_PUNCTURED_MODE]: MSDU count for punctured bw
* @bar_cnt: Block ACK Request frame count
* @ndpa_cnt: NDP announcement frame count
* @wme_ac_type_bytes: Wireless Multimedia type Bytes Count
*/
struct dp_peer_extd_rx_stats {
struct cdp_pkt_type pkt_type[DOT11_MAX];
@@ -4124,6 +4131,7 @@ struct dp_peer_extd_rx_stats {
#endif
uint32_t bar_cnt;
uint32_t ndpa_cnt;
uint64_t wme_ac_type_bytes[WME_AC_MAX];
};
/**

Voir le fichier

@@ -3101,9 +3101,14 @@ dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
DP_STATS_INC(mon_peer, tx.sgi_count[ppdu->gi], num_msdu);
DP_STATS_INC(mon_peer, tx.bw[ppdu->bw], num_msdu);
DP_STATS_INC(mon_peer, tx.nss[ppdu->nss], num_msdu);
if (ppdu->tid < CDP_DATA_TID_MAX)
if (ppdu->tid < CDP_DATA_TID_MAX) {
DP_STATS_INC(mon_peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
num_msdu);
DP_STATS_INC(mon_peer,
tx.wme_ac_type_bytes[TID_TO_WME_AC(ppdu->tid)],
tx_byte_count);
}
DP_STATS_INCC(mon_peer, tx.stbc, num_msdu, ppdu->stbc);
DP_STATS_INCC(mon_peer, tx.ldpc, num_msdu, ppdu->ldpc);
if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)

Voir le fichier

@@ -865,6 +865,7 @@ static void dp_rx_stats_update(struct dp_pdev *pdev,
enum cdp_mu_packet_type mu_pkt_type;
struct dp_mon_ops *mon_ops;
struct dp_mon_pdev *mon_pdev = NULL;
uint64_t byte_count;
if (qdf_likely(pdev))
soc = pdev->soc;
@@ -910,6 +911,8 @@ static void dp_rx_stats_update(struct dp_pdev *pdev,
}
num_msdu = ppdu_user->num_msdu;
byte_count = ppdu_user->mpdu_ok_byte_count +
ppdu_user->mpdu_err_byte_count;
pkt_bw_offset = dp_get_bw_offset_frm_bw(soc, ppdu->u.bw);
DP_STATS_UPD(mon_peer, rx.snr, (ppdu->rssi + pkt_bw_offset));
@@ -1030,8 +1033,11 @@ static void dp_rx_stats_update(struct dp_pdev *pdev,
*/
ac = TID_TO_WME_AC(ppdu_user->tid);
if (qdf_likely(ppdu->tid != HAL_TID_INVALID))
if (qdf_likely(ppdu->tid != HAL_TID_INVALID)) {
DP_STATS_INC(mon_peer, rx.wme_ac_type[ac], num_msdu);
DP_STATS_INC(mon_peer, rx.wme_ac_type_bytes[ac],
byte_count);
}
DP_STATS_INC(mon_peer, rx.rx_ppdus, 1);
DP_STATS_INC(mon_peer, rx.rx_mpdus,