qcacmn: Add missing TxRx Datapath stats

1.Add QDF_TRACE_STATS for datapath stats to remove
  module prints.
2.Fix Stats indentation for better readability.
3.Add missing stats
  a. Update Packets on Rx side on per-ring basis.
  b. Add stats for features (TSO/SG/RAW/Mesh)
  c. Add packet type count on Rx.

Change-Id: Ief1719b67330f729ff60703ec2be26bc5e4201e9
CRs-Fixed: 2034876
This commit is contained in:
Ishank Jain
2017-04-12 10:42:22 +05:30
کامیت شده توسط snandini
والد 56bfd8fdb3
کامیت 57c42a193c
7فایلهای تغییر یافته به همراه538 افزوده شده و 431 حذف شده

مشاهده پرونده

@@ -88,6 +88,7 @@
(((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
WME_AC_VO)
#define CDP_MAX_RX_RINGS 4
/*
* htt_dbg_stats_type -
* bit positions for each stats type within a stats type bitmask
@@ -134,6 +135,7 @@ enum cdp_host_txrx_stats {
TXRX_TX_HOST_STATS = 2,
TXRX_RX_HOST_STATS = 3,
TXRX_CLEAR_STATS = 4,
TXRX_HOST_STATS_MAX,
};
/**
@@ -590,10 +592,7 @@ struct cdp_tx_stats {
/* Packets dropped on the Tx side */
struct {
uint32_t dma_map_error;
/* dropped due to ring full */
uint32_t ring_full;
/* Discarded bu firmware */
/* Discarded by firmware */
uint32_t fw_discard;
/* fw_discard_retired */
uint32_t fw_discard_retired;
@@ -615,18 +614,22 @@ struct cdp_rx_stats {
/* Total packets sent up the stack */
struct cdp_pkt_info to_stack;
/* Packets received on the reo ring */
struct cdp_pkt_info rcvd_reo;
/* Total multicast packets */
struct cdp_pkt_info unicast;
struct cdp_pkt_info rcvd_reo[CDP_MAX_RX_RINGS];
/* Total unicast packets */
struct cdp_pkt_info unicast;
/* Total multicast packets */
struct cdp_pkt_info multicast;
/* WDS packets received */
struct cdp_pkt_info wds;
/* Intra BSS packets received */
struct cdp_pkt_info intra_bss;
/* Raw Pakets received */
struct cdp_pkt_info raw;
struct {
/* Intra BSS packets received */
struct cdp_pkt_info pkts;
struct cdp_pkt_info fail;
} intra_bss;
/* Errors */
struct {
/* Rx MIC errors */
@@ -639,8 +642,11 @@ struct cdp_rx_stats {
uint32_t wme_ac_type[WME_AC_MAX];
/* Reception type os packets */
uint32_t reception_type[SUPPORTED_RECEPTION_TYPES];
/* packets in different MCS rates */
uint32_t mcs_count[MAX_MCS + 1];
/* Packet Type */
struct {
/* MCS Count */
uint32_t mcs_count[MAX_MCS + 1];
} pkt_type[DOT11_MAX];
/* SGI count */
uint32_t sgi_count[MAX_MCS + 1];
/* Packet count in spatiel Streams */
@@ -661,16 +667,19 @@ struct cdp_rx_stats {
struct cdp_tx_ingress_stats {
/* Total packets received for transmission */
struct cdp_pkt_info rcvd;
/* Tx packets freed*/
struct cdp_pkt_info freed;
/* Tx packets processed*/
struct cdp_pkt_info processed;
/* Total packets passed Reinject handler */
struct cdp_pkt_info reinject_pkts;
/* Total packets passed to inspect handler */
struct cdp_pkt_info inspect_pkts;
/* Total Raw packets */
struct cdp_pkt_info raw_pkt;
struct {
/* Total Raw packets */
struct cdp_pkt_info raw_pkt;
/* DMA map error */
uint32_t dma_map_error;
} raw;
/* TSO packets info */
struct {
@@ -692,6 +701,8 @@ struct cdp_tx_ingress_stats {
uint32_t dropped_host;
/* SG packets dropped by target */
uint32_t dropped_target;
/* Dma map error */
uint32_t dma_map_error;
} sg;
/* Multicast Enhancement packets info */
@@ -714,7 +725,7 @@ struct cdp_tx_ingress_stats {
/* Packets dropped on the Tx side */
struct {
/* Total packets dropped */
/* Total scatter gather packets */
struct cdp_pkt_info dropped_pkt;
/* Desc Not Available */
uint32_t desc_na;
@@ -724,6 +735,8 @@ struct cdp_tx_ingress_stats {
uint32_t enqueue_fail;
/* DMA failed */
uint32_t dma_error;
/* Resource Full: Congestion Control */
uint32_t res_full;
} dropped;
};
@@ -773,14 +786,33 @@ struct cdp_pdev_stats {
/* packets dropped on rx */
struct {
/* packets dropped because nsdu_done bit not set */
struct cdp_pkt_info msdu_not_done;
uint32_t msdu_not_done;
/* Multicast Echo check */
uint32_t mec;
/* Mesh Filtered packets */
uint32_t mesh_filter;
} dropped;
/* total packets replnished */
struct cdp_pkt_info replenished;
struct {
/* total packets replnished */
struct cdp_pkt_info pkts;
/* rxdma errors */
uint32_t rxdma_err;
/* nbuf alloc failed */
uint32_t nbuf_alloc_fail;
/* Mapping failure */
uint32_t map_err;
/* x86 failures */
uint32_t x86_fail;
} replenish;
/* Rx Raw Packets */
uint32_t rx_raw_pkts;
/* Mesh Rx Stats Alloc fail */
uint32_t mesh_mem_alloc;
/* Rx errors */
struct {
/* rxdma_unitialized errors */
uint32_t rxdma_unitialized;
/* desc alloc failed errors */
uint32_t desc_alloc_fail;
} err;

مشاهده پرونده

@@ -57,6 +57,21 @@ while (0)
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL, \
fmt, ## args)
#ifdef CONFIG_WIN
#ifdef DP_STATS_LOG_EN
#define DP_TRACE_STATS(LVL, fmt, args ...) \
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL, \
fmt, ## args)
#else
#define DP_TRACE_STATS(LVL, fmt, args ...) \
qdf_trace(QDF_TRACE_LEVEL_##LVL, fmt, ## args)
#endif
#else
#define DP_TRACE_STATS(LVL, fmt, args ...) \
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL, \
fmt, ## args)
#endif
#define DP_STATS_INIT(_handle) \
qdf_mem_set(&((_handle)->stats), sizeof((_handle)->stats), 0x0)
@@ -204,7 +219,7 @@ while (0)
#define DP_STATS_DEC(_handle, _field, _delta)
#define DP_STATS_UPD(_handle, _field, _delta)
#define DP_STATS_INC_PKT(_handle, _field, _count, _bytes)
#define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes)
#define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond)
#define DP_STATS_AGGR(_handle_a, _handle_b, _field)
#define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field)
#define DP_HIST_INIT()

مشاهده پرونده

@@ -2420,7 +2420,11 @@ void dp_aggregate_vdev_stats(struct dp_vdev *vdev)
DP_STATS_AGGR(vdev, peer, tx.pkt_type[2].mcs_count[i]);
DP_STATS_AGGR(vdev, peer, tx.pkt_type[3].mcs_count[i]);
DP_STATS_AGGR(vdev, peer, tx.pkt_type[4].mcs_count[i]);
DP_STATS_AGGR(vdev, peer, rx.mcs_count[i]);
DP_STATS_AGGR(vdev, peer, rx.pkt_type[0].mcs_count[i]);
DP_STATS_AGGR(vdev, peer, rx.pkt_type[1].mcs_count[i]);
DP_STATS_AGGR(vdev, peer, rx.pkt_type[2].mcs_count[i]);
DP_STATS_AGGR(vdev, peer, rx.pkt_type[3].mcs_count[i]);
DP_STATS_AGGR(vdev, peer, rx.pkt_type[4].mcs_count[i]);
}
for (i = 0; i < SUPPORTED_BW; i++) {
@@ -2454,14 +2458,13 @@ void dp_aggregate_vdev_stats(struct dp_vdev *vdev)
DP_STATS_AGGR(vdev, peer, tx.retries);
DP_STATS_AGGR(vdev, peer, tx.non_amsdu_cnt);
DP_STATS_AGGR(vdev, peer, tx.amsdu_cnt);
DP_STATS_AGGR(vdev, peer, tx.dropped.dma_map_error);
DP_STATS_AGGR(vdev, peer, tx.dropped.ring_full);
DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard);
DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard_retired);
DP_STATS_AGGR(vdev, peer, tx.dropped.mpdu_age_out);
DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard_reason1);
DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard_reason2);
DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard_reason3);
DP_STATS_AGGR(vdev, peer, rx.err.mic_err);
DP_STATS_AGGR(vdev, peer, rx.err.decrypt_err);
DP_STATS_AGGR(vdev, peer, rx.non_ampdu_cnt);
@@ -2469,12 +2472,20 @@ void dp_aggregate_vdev_stats(struct dp_vdev *vdev)
DP_STATS_AGGR(vdev, peer, rx.non_amsdu_cnt);
DP_STATS_AGGR(vdev, peer, rx.amsdu_cnt);
DP_STATS_AGGR_PKT(vdev, peer, rx.to_stack);
DP_STATS_AGGR_PKT(vdev, peer, rx.rcvd_reo);
for (i = 0; i < CDP_MAX_RX_RINGS; i++)
DP_STATS_AGGR_PKT(vdev, peer, rx.rcvd_reo[i]);
peer->stats.rx.unicast.num = peer->stats.rx.to_stack.num -
peer->stats.rx.multicast.num;
peer->stats.rx.unicast.bytes = peer->stats.rx.to_stack.bytes -
peer->stats.rx.multicast.bytes;
DP_STATS_AGGR_PKT(vdev, peer, rx.unicast);
DP_STATS_AGGR_PKT(vdev, peer, rx.multicast);
DP_STATS_AGGR_PKT(vdev, peer, rx.wds);
DP_STATS_AGGR_PKT(vdev, peer, rx.raw);
DP_STATS_AGGR_PKT(vdev, peer, rx.intra_bss);
DP_STATS_AGGR_PKT(vdev, peer, rx.intra_bss.pkts);
DP_STATS_AGGR_PKT(vdev, peer, rx.intra_bss.fail);
vdev->stats.tx.last_ack_rssi =
peer->stats.tx.last_ack_rssi;
@@ -2507,7 +2518,11 @@ static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
DP_STATS_AGGR(pdev, vdev, tx.pkt_type[2].mcs_count[i]);
DP_STATS_AGGR(pdev, vdev, tx.pkt_type[3].mcs_count[i]);
DP_STATS_AGGR(pdev, vdev, tx.pkt_type[4].mcs_count[i]);
DP_STATS_AGGR(pdev, vdev, rx.mcs_count[i]);
DP_STATS_AGGR(pdev, vdev, rx.pkt_type[0].mcs_count[i]);
DP_STATS_AGGR(pdev, vdev, rx.pkt_type[1].mcs_count[i]);
DP_STATS_AGGR(pdev, vdev, rx.pkt_type[2].mcs_count[i]);
DP_STATS_AGGR(pdev, vdev, rx.pkt_type[3].mcs_count[i]);
DP_STATS_AGGR(pdev, vdev, rx.pkt_type[4].mcs_count[i]);
}
for (i = 0; i < SUPPORTED_BW; i++) {
@@ -2542,8 +2557,6 @@ static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
DP_STATS_AGGR(pdev, vdev, tx.retries);
DP_STATS_AGGR(pdev, vdev, tx.non_amsdu_cnt);
DP_STATS_AGGR(pdev, vdev, tx.amsdu_cnt);
DP_STATS_AGGR(pdev, vdev, tx.dropped.dma_map_error);
DP_STATS_AGGR(pdev, vdev, tx.dropped.ring_full);
DP_STATS_AGGR(pdev, vdev, tx.dropped.fw_discard);
DP_STATS_AGGR(pdev, vdev,
tx.dropped.fw_discard_retired);
@@ -2554,6 +2567,7 @@ static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
tx.dropped.fw_discard_reason2);
DP_STATS_AGGR(pdev, vdev,
tx.dropped.fw_discard_reason3);
DP_STATS_AGGR(pdev, vdev, rx.err.mic_err);
DP_STATS_AGGR(pdev, vdev, rx.err.decrypt_err);
DP_STATS_AGGR(pdev, vdev, rx.non_ampdu_cnt);
@@ -2561,19 +2575,23 @@ static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
DP_STATS_AGGR(pdev, vdev, rx.non_amsdu_cnt);
DP_STATS_AGGR(pdev, vdev, rx.amsdu_cnt);
DP_STATS_AGGR_PKT(pdev, vdev, rx.to_stack);
DP_STATS_AGGR_PKT(pdev, vdev, rx.rcvd_reo);
DP_STATS_AGGR_PKT(pdev, vdev, rx.rcvd_reo[0]);
DP_STATS_AGGR_PKT(pdev, vdev, rx.rcvd_reo[1]);
DP_STATS_AGGR_PKT(pdev, vdev, rx.rcvd_reo[2]);
DP_STATS_AGGR_PKT(pdev, vdev, rx.rcvd_reo[3]);
DP_STATS_AGGR_PKT(pdev, vdev, rx.unicast);
DP_STATS_AGGR_PKT(pdev, vdev, rx.multicast);
DP_STATS_AGGR_PKT(pdev, vdev, rx.wds);
DP_STATS_AGGR_PKT(pdev, vdev, rx.intra_bss);
DP_STATS_AGGR_PKT(pdev, vdev, rx.intra_bss.pkts);
DP_STATS_AGGR_PKT(pdev, vdev, rx.intra_bss.fail);
DP_STATS_AGGR_PKT(pdev, vdev, rx.raw);
DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
DP_STATS_AGGR_PKT(pdev, vdev, tx_i.freed);
DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw_pkt);
DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host);
DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
@@ -2588,7 +2606,18 @@ static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
DP_STATS_AGGR(pdev, vdev,
tx_i.mcast_en.dropped_send_fail);
DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
DP_STATS_AGGR_PKT(pdev, vdev, tx_i.dropped.dropped_pkt);
DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na);
DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
pdev->stats.tx_i.dropped.dropped_pkt.num =
pdev->stats.tx_i.dropped.dma_error +
pdev->stats.tx_i.dropped.ring_full +
pdev->stats.tx_i.dropped.enqueue_fail +
pdev->stats.tx_i.dropped.desc_na +
pdev->stats.tx_i.dropped.res_full;
pdev->stats.tx.last_ack_rssi =
vdev->stats.tx.last_ack_rssi;
@@ -2606,76 +2635,97 @@ static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
static inline void
dp_print_pdev_tx_stats(struct dp_pdev *pdev)
{
DP_TRACE(NONE, "WLAN Tx Stats:\n");
DP_TRACE(NONE, "Received From Stack:\n");
DP_TRACE(NONE, "Total Packets Received = %d",
DP_TRACE_STATS(NONE, "WLAN Tx Stats:\n");
DP_TRACE_STATS(NONE, "Received From Stack:\n");
DP_TRACE_STATS(NONE, "Packets = %d",
pdev->stats.tx_i.rcvd.num);
DP_TRACE(NONE, "Bytes Sent = %d",
DP_TRACE_STATS(NONE, "Bytes = %d\n",
pdev->stats.tx_i.rcvd.bytes);
DP_TRACE(NONE, "Processed:\n");
DP_TRACE(NONE, "Msdu Processed = %d",
DP_TRACE_STATS(NONE, "Processed:\n");
DP_TRACE_STATS(NONE, "Packets = %d",
pdev->stats.tx_i.processed.num);
DP_TRACE(NONE, "Bytes Processed = %d",
DP_TRACE_STATS(NONE, "Bytes = %d\n",
pdev->stats.tx_i.processed.bytes);
DP_TRACE(NONE, "Completions:\n");
DP_TRACE(NONE, "Msdu Sent = %d",
DP_TRACE_STATS(NONE, "Completions:\n");
DP_TRACE_STATS(NONE, "Packets = %d",
pdev->stats.tx.comp_pkt.num);
DP_TRACE(NONE, "Bytes Sent = %d",
DP_TRACE_STATS(NONE, "Bytes = %d\n",
pdev->stats.tx.comp_pkt.bytes);
DP_TRACE(NONE, "Freed:\n");
DP_TRACE(NONE, "Msdus Freed = %d",
pdev->stats.tx_i.freed.num);
DP_TRACE(NONE, "Bytes Freed = %d",
pdev->stats.tx_i.freed.bytes);
DP_TRACE(NONE, "Dropped:\n");
DP_TRACE(NONE, "Total Packets Dropped = %d",
DP_TRACE_STATS(NONE, "Dropped:\n");
DP_TRACE_STATS(NONE, "Packets = %d",
pdev->stats.tx_i.dropped.dropped_pkt.num);
DP_TRACE(NONE, "Bytes Dropped = %d",
pdev->stats.tx_i.dropped.dropped_pkt.bytes);
DP_TRACE(NONE, "Dma_map_error = %d",
pdev->stats.tx.dropped.dma_map_error);
DP_TRACE(NONE, "Ring Full = %d", pdev->stats.tx.dropped.ring_full);
DP_TRACE(NONE, "Fw Discard = %d",
DP_TRACE_STATS(NONE, "Dma_map_error = %d",
pdev->stats.tx_i.dropped.dma_error);
DP_TRACE_STATS(NONE, "Ring Full = %d",
pdev->stats.tx_i.dropped.ring_full);
DP_TRACE_STATS(NONE, "Descriptor Not available = %d",
pdev->stats.tx_i.dropped.desc_na);
DP_TRACE_STATS(NONE, "HW enqueue failed= %d",
pdev->stats.tx_i.dropped.enqueue_fail);
DP_TRACE_STATS(NONE, "Resources Full = %d",
pdev->stats.tx_i.dropped.res_full);
DP_TRACE_STATS(NONE, "Fw Discard = %d",
pdev->stats.tx.dropped.fw_discard);
DP_TRACE(NONE, "Fw Discard Retired = %d",
DP_TRACE_STATS(NONE, "Fw Discard Retired = %d",
pdev->stats.tx.dropped.fw_discard_retired);
DP_TRACE(NONE, "Firmware Discard Untransmitted = %d",
DP_TRACE_STATS(NONE, "Firmware Discard Untransmitted = %d",
pdev->stats.tx.dropped.fw_discard_untransmitted);
DP_TRACE(NONE, "Mpdu Age Out = %d",
DP_TRACE_STATS(NONE, "Mpdu Age Out = %d",
pdev->stats.tx.dropped.mpdu_age_out);
DP_TRACE(NONE, "Firmware Discard Reason1 = %d",
DP_TRACE_STATS(NONE, "Firmware Discard Reason1 = %d",
pdev->stats.tx.dropped.fw_discard_reason1);
DP_TRACE(NONE, "Firmware Discard Reason2 = %d",
DP_TRACE_STATS(NONE, "Firmware Discard Reason2 = %d",
pdev->stats.tx.dropped.fw_discard_reason2);
DP_TRACE(NONE, "Firmware Discard Reason3 = %d",
DP_TRACE_STATS(NONE, "Firmware Discard Reason3 = %d\n",
pdev->stats.tx.dropped.fw_discard_reason3);
DP_TRACE(NONE, "Scatter Gather:\n");
DP_TRACE(NONE, "Total Packets = %d",
DP_TRACE_STATS(NONE, "Scatter Gather:\n");
DP_TRACE_STATS(NONE, "Packets = %d",
pdev->stats.tx_i.sg.sg_pkt.num);
DP_TRACE(NONE, "Total Bytes = %d",
DP_TRACE_STATS(NONE, "Bytes = %d",
pdev->stats.tx_i.sg.sg_pkt.bytes);
DP_TRACE(NONE, "Dropped By Host = %d",
DP_TRACE_STATS(NONE, "Dropped By Host = %d",
pdev->stats.tx_i.sg.dropped_host);
DP_TRACE(NONE, "Dropped By Target = %d",
DP_TRACE_STATS(NONE, "Dropped By Target = %d\n",
pdev->stats.tx_i.sg.dropped_target);
DP_TRACE(NONE, "Tso:\n");
DP_TRACE(NONE, "Number of Segments = %d",
DP_TRACE_STATS(NONE, "Tso:\n");
DP_TRACE_STATS(NONE, "Number of Segments = %d",
pdev->stats.tx_i.tso.num_seg);
DP_TRACE(NONE, "Number Packets = %d",
DP_TRACE_STATS(NONE, "Packets = %d",
pdev->stats.tx_i.tso.tso_pkt.num);
DP_TRACE(NONE, "Total Bytes = %d",
DP_TRACE_STATS(NONE, "Bytes = %d",
pdev->stats.tx_i.tso.tso_pkt.bytes);
DP_TRACE(NONE, "Dropped By Host = %d",
DP_TRACE_STATS(NONE, "Dropped By Host = %d\n",
pdev->stats.tx_i.tso.dropped_host);
DP_TRACE(NONE, "Mcast Enhancement:\n");
DP_TRACE(NONE, "Dropped: Map Errors = %d",
DP_TRACE_STATS(NONE, "Mcast Enhancement:\n");
DP_TRACE_STATS(NONE, "Packets = %d",
pdev->stats.tx_i.mcast_en.mcast_pkt.num);
DP_TRACE_STATS(NONE, "Bytes = %d",
pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
DP_TRACE_STATS(NONE, "Dropped: Map Errors = %d",
pdev->stats.tx_i.mcast_en.dropped_map_error);
DP_TRACE(NONE, "Dropped: Self Mac = %d",
DP_TRACE_STATS(NONE, "Dropped: Self Mac = %d",
pdev->stats.tx_i.mcast_en.dropped_self_mac);
DP_TRACE(NONE, "Dropped: Send Fail = %d",
DP_TRACE_STATS(NONE, "Dropped: Send Fail = %d",
pdev->stats.tx_i.mcast_en.dropped_send_fail);
DP_TRACE(NONE, "Total Unicast sent = %d",
DP_TRACE_STATS(NONE, "Unicast sent = %d\n",
pdev->stats.tx_i.mcast_en.ucast);
DP_TRACE_STATS(NONE, "Raw:\n");
DP_TRACE_STATS(NONE, "Packets = %d",
pdev->stats.tx_i.raw.raw_pkt.num);
DP_TRACE_STATS(NONE, "Bytes = %d",
pdev->stats.tx_i.raw.raw_pkt.bytes);
DP_TRACE_STATS(NONE, "DMA map error = %d\n",
pdev->stats.tx_i.raw.dma_map_error);
DP_TRACE_STATS(NONE, "Reinjected:\n");
DP_TRACE_STATS(NONE, "Packets = %d",
pdev->stats.tx_i.reinject_pkts.num);
DP_TRACE_STATS(NONE, "Bytes = %d\n",
pdev->stats.tx_i.reinject_pkts.bytes);
DP_TRACE_STATS(NONE, "Inspected:\n");
DP_TRACE_STATS(NONE, "Packets = %d",
pdev->stats.tx_i.inspect_pkts.num);
DP_TRACE_STATS(NONE, "Bytes = %d\n",
pdev->stats.tx_i.inspect_pkts.bytes);
}
/**
@@ -2687,33 +2737,42 @@ dp_print_pdev_tx_stats(struct dp_pdev *pdev)
static inline void
dp_print_pdev_rx_stats(struct dp_pdev *pdev)
{
DP_TRACE(NONE, "WLAN Rx Stats:\n");
DP_TRACE(NONE, "Received From HW (Reo Dest Ring):\n");
DP_TRACE(NONE, "Total Packets Received = %d",
pdev->stats.rx.rcvd_reo.num);
DP_TRACE(NONE, "Bytes Sent = %d",
pdev->stats.rx.rcvd_reo.bytes);
DP_TRACE(NONE, "Replenished:\n");
DP_TRACE(NONE, "Total Packets Replenished = %d",
pdev->stats.replenished.num);
DP_TRACE(NONE, "Bytes Sent = %d",
pdev->stats.replenished.bytes);
DP_TRACE(NONE, "Buffers Added To Freelist = %d",
DP_TRACE_STATS(NONE, "WLAN Rx Stats:\n");
DP_TRACE_STATS(NONE, "Received From HW (Per Rx Ring):\n");
DP_TRACE_STATS(NONE, "Packets = %d %d %d %d",
pdev->stats.rx.rcvd_reo[0].num,
pdev->stats.rx.rcvd_reo[1].num,
pdev->stats.rx.rcvd_reo[2].num,
pdev->stats.rx.rcvd_reo[3].num);
DP_TRACE_STATS(NONE, "Bytes = %d %d %d %d\n",
pdev->stats.rx.rcvd_reo[0].bytes,
pdev->stats.rx.rcvd_reo[1].bytes,
pdev->stats.rx.rcvd_reo[2].bytes,
pdev->stats.rx.rcvd_reo[3].bytes);
DP_TRACE_STATS(NONE, "Replenished:\n");
DP_TRACE_STATS(NONE, "Packets = %d",
pdev->stats.replenish.pkts.num);
DP_TRACE_STATS(NONE, "Bytes = %d",
pdev->stats.replenish.pkts.bytes);
DP_TRACE_STATS(NONE, "Buffers Added To Freelist = %d\n",
pdev->stats.buf_freelist);
DP_TRACE(NONE, "Dropped:\n");
DP_TRACE(NONE, "Total Packets With Msdu Not Done = %d",
pdev->stats.dropped.msdu_not_done.num);
DP_TRACE(NONE, "Bytes Sent With Msdu Not Done = %d",
pdev->stats.dropped.msdu_not_done.bytes);
DP_TRACE(NONE, "Sent To Stack:\n");
DP_TRACE(NONE, "Packets Sent To Stack = %d",
DP_TRACE_STATS(NONE, "Dropped:\n");
DP_TRACE_STATS(NONE, "Total Packets With Msdu Not Done = %d\n",
pdev->stats.dropped.msdu_not_done);
DP_TRACE_STATS(NONE, "Sent To Stack:\n");
DP_TRACE_STATS(NONE, "Packets = %d",
pdev->stats.rx.to_stack.num);
DP_TRACE(NONE, "Bytes Sent To Stack = %d",
DP_TRACE_STATS(NONE, "Bytes = %d\n",
pdev->stats.rx.to_stack.bytes);
DP_TRACE(NONE, "Errors:\n");
DP_TRACE(NONE, "Rxdma Ring Unititalized: %d",
pdev->stats.err.rxdma_unitialized);
DP_TRACE(NONE, "Desc Alloc Failed: %d",
DP_TRACE_STATS(NONE, "Multicast/Broadcast:\n");
DP_TRACE_STATS(NONE, "Packets = %d",
pdev->stats.rx.multicast.num);
DP_TRACE_STATS(NONE, "Bytes = %d\n",
pdev->stats.rx.multicast.bytes);
DP_TRACE_STATS(NONE, "Errors:\n");
DP_TRACE_STATS(NONE, "Rxdma Ring Un-inititalized = %d",
pdev->stats.replenish.rxdma_err);
DP_TRACE_STATS(NONE, "Desc Alloc Failed: = %d",
pdev->stats.err.desc_alloc_fail);
}
@@ -2726,13 +2785,19 @@ dp_print_pdev_rx_stats(struct dp_pdev *pdev)
static inline void
dp_print_soc_tx_stats(struct dp_soc *soc)
{
DP_TRACE(NONE, "SOC Tx Stats:\n");
DP_TRACE(NONE, "Tx Descriptors In Use = %d",
DP_TRACE_STATS(NONE, "SOC Tx Stats:\n");
DP_TRACE_STATS(NONE, "Tx Descriptors In Use = %d",
soc->stats.tx.desc_in_use);
DP_TRACE(NONE, "Total Packets With No Peer = %d",
DP_TRACE_STATS(NONE, "Invalid peer:\n");
DP_TRACE_STATS(NONE, "Packets = %d",
soc->stats.tx.tx_invalid_peer.num);
DP_TRACE(NONE, "Bytes Sent With No Peer = %d",
DP_TRACE_STATS(NONE, "Bytes = %d",
soc->stats.tx.tx_invalid_peer.bytes);
DP_TRACE_STATS(NONE, "Packets dropped due to TCL ring full = %d %d %d",
soc->stats.tx.tcl_ring_full[0],
soc->stats.tx.tcl_ring_full[1],
soc->stats.tx.tcl_ring_full[2]);
}
@@ -2750,33 +2815,33 @@ dp_print_soc_rx_stats(struct dp_soc *soc)
char rxdma_error[DP_RXDMA_ERR_LENGTH];
uint8_t index = 0;
DP_TRACE(NONE, "SOC Rx Stats:\n");
DP_TRACE(NONE, "Errors:\n");
DP_TRACE(NONE, "Invalid RBM = %d",
DP_TRACE_STATS(NONE, "SOC Rx Stats:\n");
DP_TRACE_STATS(NONE, "Errors:\n");
DP_TRACE_STATS(NONE, "Invalid RBM = %d",
soc->stats.rx.err.invalid_rbm);
DP_TRACE(NONE, "Invalid Vdev = %d",
DP_TRACE_STATS(NONE, "Invalid Vdev = %d",
soc->stats.rx.err.invalid_vdev);
DP_TRACE(NONE, "Invalid Pdev = %d",
DP_TRACE_STATS(NONE, "Invalid Pdev = %d",
soc->stats.rx.err.invalid_pdev);
DP_TRACE(NONE, "Invalid Peer = %d",
DP_TRACE_STATS(NONE, "Invalid Peer = %d",
soc->stats.rx.err.rx_invalid_peer.num);
DP_TRACE(NONE, "HAL Ring Access Fail = %d",
DP_TRACE_STATS(NONE, "HAL Ring Access Fail = %d",
soc->stats.rx.err.hal_ring_access_fail);
for (i = 0; i < MAX_RXDMA_ERRORS; i++) {
index += qdf_snprint(&rxdma_error[index],
DP_RXDMA_ERR_LENGTH - index,
" %d,", soc->stats.rx.err.rxdma_error[i]);
" %d", soc->stats.rx.err.rxdma_error[i]);
}
DP_TRACE(NONE, "RXDMA Error (0-31):%s",
DP_TRACE_STATS(NONE, "RXDMA Error (0-31):%s",
rxdma_error);
index = 0;
for (i = 0; i < REO_ERROR_TYPE_MAX; i++) {
index += qdf_snprint(&reo_error[index],
DP_REO_ERR_LENGTH - index,
" %d,", soc->stats.rx.err.reo_error[i]);
" %d", soc->stats.rx.err.reo_error[i]);
}
DP_TRACE(NONE, "REO Error(0-14):%s",
DP_TRACE_STATS(NONE, "REO Error(0-14):%s",
reo_error);
}
@@ -2811,28 +2876,53 @@ static inline void
dp_print_rx_rates(struct dp_vdev *vdev)
{
struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
uint8_t i;
uint8_t i, pkt_type;
uint8_t index = 0;
char mcs[DP_MCS_LENGTH];
char rx_mcs[DOT11_MAX][DP_MCS_LENGTH];
char nss[DP_NSS_LENGTH];
DP_TRACE(NONE, "Rx Rate Info:\n");
for (i = 0; i < MAX_MCS; i++) {
index += qdf_snprint(&mcs[index], DP_MCS_LENGTH - index,
" %d,", pdev->stats.rx.mcs_count[i]);
}
DP_TRACE(NONE, "MCS(0-11):%s",
mcs);
DP_TRACE_STATS(NONE, "Rx Rate Info:\n");
for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
index = 0;
for (i = 0; i < MAX_MCS; i++) {
index += qdf_snprint(&rx_mcs[pkt_type][index],
DP_MCS_LENGTH - index,
" %d ",
pdev->stats.rx.pkt_type[pkt_type].
mcs_count[i]);
}
}
DP_TRACE_STATS(NONE, "11A MCS(0-7) = %s",
rx_mcs[0]);
DP_TRACE_STATS(NONE, "11A MCS Invalid = %d",
pdev->stats.rx.pkt_type[DOT11_A].mcs_count[MAX_MCS]);
DP_TRACE_STATS(NONE, "11B MCS(0-6) = %s",
rx_mcs[1]);
DP_TRACE_STATS(NONE, "11B MCS Invalid = %d",
pdev->stats.rx.pkt_type[DOT11_B].mcs_count[MAX_MCS]);
DP_TRACE_STATS(NONE, "11N MCS(0-7) = %s",
rx_mcs[2]);
DP_TRACE_STATS(NONE, "11N MCS Invalid = %d",
pdev->stats.rx.pkt_type[DOT11_N].mcs_count[MAX_MCS]);
DP_TRACE_STATS(NONE, "Type 11AC MCS(0-9) = %s",
rx_mcs[3]);
DP_TRACE_STATS(NONE, "11AC MCS Invalid = %d",
pdev->stats.rx.pkt_type[DOT11_AC].mcs_count[MAX_MCS]);
DP_TRACE_STATS(NONE, "11AX MCS(0-11) = %s",
rx_mcs[4]);
DP_TRACE_STATS(NONE, "11AX MCS Invalid = %d",
pdev->stats.rx.pkt_type[DOT11_AX].mcs_count[MAX_MCS]);
index = 0;
for (i = 0; i < SS_COUNT; i++) {
index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
" %d,", pdev->stats.rx.nss[i]);
" %d", pdev->stats.rx.nss[i]);
}
DP_TRACE(NONE, "NSS(0-7):%s",
DP_TRACE_STATS(NONE, "NSS(0-7) = %s",
nss);
DP_TRACE(NONE, "SGI:"
DP_TRACE_STATS(NONE, "SGI ="
" 0.8us %d,"
" 0.4us %d,"
" 1.6us %d,"
@@ -2841,26 +2931,26 @@ dp_print_rx_rates(struct dp_vdev *vdev)
pdev->stats.rx.sgi_count[1],
pdev->stats.rx.sgi_count[2],
pdev->stats.rx.sgi_count[3]);
DP_TRACE(NONE, "BW Counts: 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
DP_TRACE_STATS(NONE, "BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
DP_TRACE(NONE, "Reception Type:"
DP_TRACE_STATS(NONE, "Reception Type ="
" SU: %d,"
" MU_MIMO:%d,"
" MU_OFDMA:%d,"
" MU_OFDMA_MIMO:%d",
" MU_OFDMA_MIMO:%d\n",
pdev->stats.rx.reception_type[0],
pdev->stats.rx.reception_type[1],
pdev->stats.rx.reception_type[2],
pdev->stats.rx.reception_type[3]);
DP_TRACE(NONE, "Aggregation:\n");
DP_TRACE(NONE, "Number of Msdu's Part of Ampdus = %d",
DP_TRACE_STATS(NONE, "Aggregation:\n");
DP_TRACE_STATS(NONE, "Number of Msdu's Part of Ampdus = %d",
pdev->stats.rx.ampdu_cnt);
DP_TRACE(NONE, "Number of Msdu's With No Mpdu Level Aggregation : %d",
DP_TRACE_STATS(NONE, "Number of Msdu's With No Mpdu Level Aggregation : %d",
pdev->stats.rx.non_ampdu_cnt);
DP_TRACE(NONE, "Number of Msdu's Part of Amsdu: %d",
DP_TRACE_STATS(NONE, "Number of Msdu's Part of Amsdu: %d",
pdev->stats.rx.amsdu_cnt);
DP_TRACE(NONE, "Number of Msdu's With No Msdu Level Aggregation: %d",
DP_TRACE_STATS(NONE, "Number of Msdu's With No Msdu Level Aggregation: %d",
pdev->stats.rx.non_amsdu_cnt);
}
@@ -2878,7 +2968,7 @@ dp_print_tx_rates(struct dp_vdev *vdev)
char mcs[DOT11_MAX][DP_MCS_LENGTH];
uint32_t index;
DP_TRACE(NONE, "Tx Rate Info:\n");
DP_TRACE_STATS(NONE, "Tx Rate Info:\n");
for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
index = 0;
@@ -2891,42 +2981,47 @@ dp_print_tx_rates(struct dp_vdev *vdev)
}
}
DP_TRACE(NONE, "Packet Type 11A MCS(0-7):%s",
DP_TRACE_STATS(NONE, "11A MCS(0-7) = %s",
mcs[0]);
DP_TRACE(NONE, "Packet Type 11A MCS Invalid = %d",
DP_TRACE_STATS(NONE, "11A MCS Invalid = %d",
pdev->stats.tx.pkt_type[DOT11_A].mcs_count[MAX_MCS]);
DP_TRACE(NONE, "Packet Type 11B MCS(0-6):%s",
DP_TRACE_STATS(NONE, "11B MCS(0-6) = %s",
mcs[1]);
DP_TRACE(NONE, "Packet Type 11B MCS Invalid = %d",
DP_TRACE_STATS(NONE, "11B MCS Invalid = %d",
pdev->stats.tx.pkt_type[DOT11_B].mcs_count[MAX_MCS]);
DP_TRACE(NONE, "Packet Type 11N MCS(0-7):%s",
DP_TRACE_STATS(NONE, "11N MCS(0-7) = %s",
mcs[2]);
DP_TRACE(NONE, "Packet Type 11N MCS Invalid = %d",
DP_TRACE_STATS(NONE, "11N MCS Invalid = %d",
pdev->stats.tx.pkt_type[DOT11_N].mcs_count[MAX_MCS]);
DP_TRACE(NONE, "Packet Type 11AC MCS(0-9):%s",
DP_TRACE_STATS(NONE, "Type 11AC MCS(0-9) = %s",
mcs[3]);
DP_TRACE(NONE, "Packet Type 11AC MCS Invalid = %d",
DP_TRACE_STATS(NONE, "11AC MCS Invalid = %d",
pdev->stats.tx.pkt_type[DOT11_AC].mcs_count[MAX_MCS]);
DP_TRACE(NONE, "Packet Type 11AX MCS(0-11):%s",
DP_TRACE_STATS(NONE, "11AX MCS(0-11) = %s",
mcs[4]);
DP_TRACE(NONE, "Packet Type 11AX MCS Invalid = %d",
DP_TRACE_STATS(NONE, "11AX MCS Invalid = %d",
pdev->stats.tx.pkt_type[DOT11_AX].mcs_count[MAX_MCS]);
DP_TRACE(NONE, "SGI:"
" 0.8us %d,"
" 0.4us %d,"
" 1.6us %d,"
" 3.2us %d,",
DP_TRACE_STATS(NONE, "SGI ="
" 0.8us %d"
" 0.4us %d"
" 1.6us %d"
" 3.2us %d",
pdev->stats.tx.sgi_count[0],
pdev->stats.tx.sgi_count[1],
pdev->stats.tx.sgi_count[2],
pdev->stats.tx.sgi_count[3]);
DP_TRACE(NONE, "BW Counts: 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
DP_TRACE_STATS(NONE, "BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
DP_TRACE(NONE, "Aggregation:\n");
DP_TRACE(NONE, "Number of Msdu's Part of Amsdu: %d",
DP_TRACE_STATS(NONE, "OFDMA = %d", pdev->stats.tx.ofdma);
DP_TRACE_STATS(NONE, "STBC = %d", pdev->stats.tx.stbc);
DP_TRACE_STATS(NONE, "LDPC = %d", pdev->stats.tx.ldpc);
DP_TRACE_STATS(NONE, "Retries = %d", pdev->stats.tx.retries);
DP_TRACE_STATS(NONE, "Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
DP_TRACE_STATS(NONE, "Aggregation:\n");
DP_TRACE_STATS(NONE, "Number of Msdu's Part of Amsdu = %d",
pdev->stats.tx.amsdu_cnt);
DP_TRACE(NONE, "Number of Msdu's With No Msdu Level Aggregation: %d",
DP_TRACE_STATS(NONE, "Number of Msdu's With No Msdu Level Aggregation = %d",
pdev->stats.tx.non_amsdu_cnt);
}
@@ -2939,58 +3034,54 @@ dp_print_tx_rates(struct dp_vdev *vdev)
static inline void dp_print_peer_stats(struct dp_peer *peer)
{
uint8_t i, pkt_type;
char mcs[DOT11_MAX][DP_MCS_LENGTH];
char tx_mcs[DOT11_MAX][DP_MCS_LENGTH];
char rx_mcs[DOT11_MAX][DP_MCS_LENGTH];
uint32_t index;
char nss[DP_NSS_LENGTH];
char mcs_rx[DP_MCS_LENGTH];
DP_TRACE(NONE, "Node Tx Stats:\n");
DP_TRACE(NONE, "Total Packet Completions %d",
DP_TRACE_STATS(NONE, "Node Tx Stats:\n");
DP_TRACE_STATS(NONE, "Total Packet Completions = %d",
peer->stats.tx.comp_pkt.num);
DP_TRACE(NONE, "Total Bytes Completions %d",
DP_TRACE_STATS(NONE, "Total Bytes Completions = %d",
peer->stats.tx.comp_pkt.bytes);
DP_TRACE(NONE, "Success Packets %d",
DP_TRACE_STATS(NONE, "Success Packets = %d",
peer->stats.tx.tx_success.num);
DP_TRACE(NONE, "Success Bytes %d",
DP_TRACE_STATS(NONE, "Success Bytes = %d",
peer->stats.tx.tx_success.bytes);
DP_TRACE(NONE, "Packets Failed %d",
DP_TRACE_STATS(NONE, "Packets Failed = %d",
peer->stats.tx.tx_failed);
DP_TRACE(NONE, "Packets In OFDMA %d",
DP_TRACE_STATS(NONE, "Packets In OFDMA = %d",
peer->stats.tx.ofdma);
DP_TRACE(NONE, "Packets In STBC %d",
DP_TRACE_STATS(NONE, "Packets In STBC = %d",
peer->stats.tx.stbc);
DP_TRACE(NONE, "Packets In LDPC %d",
DP_TRACE_STATS(NONE, "Packets In LDPC = %d",
peer->stats.tx.ldpc);
DP_TRACE(NONE, "Packet Retries %d",
DP_TRACE_STATS(NONE, "Packet Retries = %d",
peer->stats.tx.retries);
DP_TRACE(NONE, "Msdu's Not Part of Ampdu %d",
DP_TRACE_STATS(NONE, "Msdu's Not Part of Ampdu = %d",
peer->stats.tx.non_amsdu_cnt);
DP_TRACE(NONE, "Mpdu's Part of Ampdu %d",
DP_TRACE_STATS(NONE, "Mpdu's Part of Ampdu = %d",
peer->stats.tx.amsdu_cnt);
DP_TRACE(NONE, "Last Packet RSSI %d",
DP_TRACE_STATS(NONE, "Last Packet RSSI = %d",
peer->stats.tx.last_ack_rssi);
DP_TRACE(NONE, "Dropped At Host: Due To DMA Map Error %d",
peer->stats.tx.dropped.dma_map_error);
DP_TRACE(NONE, "Dropped At Host: Due To Ring Full %d",
peer->stats.tx.dropped.ring_full);
DP_TRACE(NONE, "Dropped At FW: FW Discard %d",
DP_TRACE_STATS(NONE, "Dropped At FW: FW Discard = %d",
peer->stats.tx.dropped.fw_discard);
DP_TRACE(NONE, "Dropped At FW: FW Discard Retired %d",
DP_TRACE_STATS(NONE, "Dropped At FW: FW Discard Retired = %d",
peer->stats.tx.dropped.fw_discard_retired);
DP_TRACE(NONE, "Dropped At FW: FW Discard Untransmitted %d",
DP_TRACE_STATS(NONE, "Dropped At FW: FW Discard Untransmitted = %d",
peer->stats.tx.dropped.fw_discard_untransmitted);
DP_TRACE(NONE, "Dropped : Mpdu Age Out %d",
DP_TRACE_STATS(NONE, "Dropped : Mpdu Age Out = %d",
peer->stats.tx.dropped.mpdu_age_out);
DP_TRACE(NONE, "Dropped : FW Discard Reason1 %d",
DP_TRACE_STATS(NONE, "Dropped : FW Discard Reason1 = %d",
peer->stats.tx.dropped.fw_discard_reason1);
DP_TRACE(NONE, "Dropped : FW Discard Reason2 %d",
DP_TRACE_STATS(NONE, "Dropped : FW Discard Reason2 = %d",
peer->stats.tx.dropped.fw_discard_reason2);
DP_TRACE(NONE, "Dropped : FW Discard Reason3 %d",
DP_TRACE_STATS(NONE, "Dropped : FW Discard Reason3 = %d",
peer->stats.tx.dropped.fw_discard_reason3);
for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
index = 0;
for (i = 0; i < MAX_MCS; i++) {
index += qdf_snprint(&mcs[pkt_type][index],
index += qdf_snprint(&tx_mcs[pkt_type][index],
DP_MCS_LENGTH - index,
" %d ",
peer->stats.tx.pkt_type[pkt_type].
@@ -2998,95 +3089,96 @@ static inline void dp_print_peer_stats(struct dp_peer *peer)
}
}
DP_TRACE(NONE, "Packet Type 11A MCS(0-7):%s",
mcs[0]);
DP_TRACE(NONE, "Packet Type 11A MCS Invalid = %d",
DP_TRACE_STATS(NONE, "11A MCS(0-7) = %s",
tx_mcs[0]);
DP_TRACE_STATS(NONE, "11A MCS Invalid = %d",
peer->stats.tx.pkt_type[DOT11_A].mcs_count[MAX_MCS]);
DP_TRACE(NONE, "Packet Type 11B MCS(0-6):%s",
mcs[1]);
DP_TRACE(NONE, "Packet Type 11B MCS Invalid = %d",
DP_TRACE_STATS(NONE, "11B MCS(0-6) = %s",
tx_mcs[1]);
DP_TRACE_STATS(NONE, "11B MCS Invalid = %d",
peer->stats.tx.pkt_type[DOT11_B].mcs_count[MAX_MCS]);
DP_TRACE(NONE, "Packet Type 11N MCS(0-7):%s",
mcs[2]);
DP_TRACE(NONE, "Packet Type 11N MCS Invalid = %d",
DP_TRACE_STATS(NONE, "11N MCS(0-7) = %s",
tx_mcs[2]);
DP_TRACE_STATS(NONE, "11N MCS Invalid = %d",
peer->stats.tx.pkt_type[DOT11_N].mcs_count[MAX_MCS]);
DP_TRACE(NONE, "Packet Type 11AC MCS(0-9):%s",
mcs[3]);
DP_TRACE(NONE, "Packet Type 11AC MCS Invalid = %d",
DP_TRACE_STATS(NONE, "11AC MCS(0-9) = %s",
tx_mcs[3]);
DP_TRACE_STATS(NONE, "11AC MCS Invalid = %d",
peer->stats.tx.pkt_type[DOT11_AC].mcs_count[MAX_MCS]);
DP_TRACE(NONE, "Packet Type 11AX MCS(0-11):%s",
mcs[4]);
DP_TRACE(NONE, "Packet Type 11AX MCS Invalid = %d",
DP_TRACE_STATS(NONE, "11AX MCS(0-11) = %s",
tx_mcs[4]);
DP_TRACE_STATS(NONE, "11AX MCS Invalid = %d",
peer->stats.tx.pkt_type[DOT11_AX].mcs_count[MAX_MCS]);
DP_TRACE(NONE, "SGI:"
" 0.8us %d,"
" 0.4us %d,"
" 1.6us %d,"
" 3.2us %d,",
DP_TRACE_STATS(NONE, "SGI = "
" 0.8us %d"
" 0.4us %d"
" 1.6us %d"
" 3.2us %d",
peer->stats.tx.sgi_count[0],
peer->stats.tx.sgi_count[1],
peer->stats.tx.sgi_count[2],
peer->stats.tx.sgi_count[3]);
DP_TRACE(NONE, "BW Counts: 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
DP_TRACE_STATS(NONE, "BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
peer->stats.tx.bw[0], peer->stats.tx.bw[1],
peer->stats.tx.bw[2], peer->stats.tx.bw[3]);
DP_TRACE(NONE, "Aggregation:\n");
DP_TRACE(NONE, "Number of Msdu's Part of Amsdu: %d",
DP_TRACE_STATS(NONE, "Aggregation:\n");
DP_TRACE_STATS(NONE, "Number of Msdu's Part of Amsdu = %d",
peer->stats.tx.amsdu_cnt);
DP_TRACE(NONE, "Number of Msdu's With No Msdu Level Aggregation: %d",
DP_TRACE_STATS(NONE, "Number of Msdu's With No Msdu Level Aggregation = %d\n",
peer->stats.tx.non_amsdu_cnt);
DP_TRACE(NONE, "Node Rx Stats:\n");
DP_TRACE(NONE, "Packets Sent To Stack %d",
DP_TRACE_STATS(NONE, "Node Rx Stats:\n");
DP_TRACE_STATS(NONE, "Packets Sent To Stack = %d",
peer->stats.rx.to_stack.num);
DP_TRACE(NONE, "Bytes Sent To Stack %d",
DP_TRACE_STATS(NONE, "Bytes Sent To Stack = %d",
peer->stats.rx.to_stack.bytes);
DP_TRACE(NONE, "Packets Received %d", peer->stats.rx.rcvd_reo.num);
DP_TRACE(NONE, "Bytes Received %d", peer->stats.rx.rcvd_reo.bytes);
DP_TRACE(NONE, "Unicast Packets Received %d",
peer->stats.rx.unicast.num);
DP_TRACE(NONE, "Unicast Bytes Received %d",
peer->stats.rx.unicast.bytes);
DP_TRACE(NONE, "Multicast Packets Received %d",
for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
DP_TRACE_STATS(NONE, "Packets Received = %d",
peer->stats.rx.rcvd_reo[i].num);
DP_TRACE_STATS(NONE, "Bytes Received = %d",
peer->stats.rx.rcvd_reo[i].bytes);
}
DP_TRACE_STATS(NONE, "Multicast Packets Received = %d",
peer->stats.rx.multicast.num);
DP_TRACE(NONE, "Multicast Bytes Received %d",
DP_TRACE_STATS(NONE, "Multicast Bytes Received = %d",
peer->stats.rx.multicast.bytes);
DP_TRACE(NONE, "WDS Packets Received %d",
DP_TRACE_STATS(NONE, "WDS Packets Received = %d",
peer->stats.rx.wds.num);
DP_TRACE(NONE, "WDS Bytes Received %d",
DP_TRACE_STATS(NONE, "WDS Bytes Received = %d",
peer->stats.rx.wds.bytes);
DP_TRACE(NONE, "Intra BSS Packets Received %d",
peer->stats.rx.intra_bss.num);
DP_TRACE(NONE, "Intra BSS Bytes Received %d",
peer->stats.rx.intra_bss.bytes);
DP_TRACE(NONE, "Raw Packets Received %d",
DP_TRACE_STATS(NONE, "Intra BSS Packets Received = %d",
peer->stats.rx.intra_bss.pkts.num);
DP_TRACE_STATS(NONE, "Intra BSS Bytes Received = %d",
peer->stats.rx.intra_bss.pkts.bytes);
DP_TRACE_STATS(NONE, "Raw Packets Received = %d",
peer->stats.rx.raw.num);
DP_TRACE(NONE, "Raw Bytes Received %d",
DP_TRACE_STATS(NONE, "Raw Bytes Received = %d",
peer->stats.rx.raw.bytes);
DP_TRACE(NONE, "Errors: MIC Errors %d",
DP_TRACE_STATS(NONE, "Errors: MIC Errors = %d",
peer->stats.rx.err.mic_err);
DP_TRACE(NONE, "Erros: Decryption Errors %d",
DP_TRACE_STATS(NONE, "Erros: Decryption Errors = %d",
peer->stats.rx.err.decrypt_err);
DP_TRACE(NONE, "Msdu's Received As Part of Ampdu %d",
DP_TRACE_STATS(NONE, "Msdu's Received As Part of Ampdu = %d",
peer->stats.rx.non_ampdu_cnt);
DP_TRACE(NONE, "Msdu's Recived As Ampdu %d", peer->stats.rx.ampdu_cnt);
DP_TRACE(NONE, "Msdu's Received Not Part of Amsdu's %d",
DP_TRACE_STATS(NONE, "Msdu's Recived As Ampdu = %d",
peer->stats.rx.ampdu_cnt);
DP_TRACE_STATS(NONE, "Msdu's Received Not Part of Amsdu's = %d",
peer->stats.rx.non_amsdu_cnt);
DP_TRACE(NONE, "MSDUs Received As Part of Amsdu %d",
DP_TRACE_STATS(NONE, "MSDUs Received As Part of Amsdu = %d",
peer->stats.rx.amsdu_cnt);
DP_TRACE(NONE, "SGI:"
" 0.8us %d,"
" 0.4us %d,"
" 1.6us %d,"
" 3.2us %d,",
DP_TRACE_STATS(NONE, "SGI ="
" 0.8us %d"
" 0.4us %d"
" 1.6us %d"
" 3.2us %d",
peer->stats.rx.sgi_count[0],
peer->stats.rx.sgi_count[1],
peer->stats.rx.sgi_count[2],
peer->stats.rx.sgi_count[3]);
DP_TRACE(NONE, "BW Counts: 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
DP_TRACE_STATS(NONE, "BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
peer->stats.rx.bw[0], peer->stats.rx.bw[1],
peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
DP_TRACE(NONE, "Reception Type:"
DP_TRACE_STATS(NONE, "Reception Type ="
" SU %d,"
" MU_MIMO %d,"
" MU_OFDMA %d,"
@@ -3096,29 +3188,53 @@ static inline void dp_print_peer_stats(struct dp_peer *peer)
peer->stats.rx.reception_type[2],
peer->stats.rx.reception_type[3]);
index = 0;
for (i = 0; i < MAX_MCS; i++) {
index += qdf_snprint(&mcs_rx[index], DP_MCS_LENGTH - index,
" %d,", peer->stats.rx.mcs_count[i]);
for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
index = 0;
for (i = 0; i < MAX_MCS; i++) {
index += qdf_snprint(&rx_mcs[pkt_type][index],
DP_MCS_LENGTH - index,
" %d ",
peer->stats.rx.pkt_type[pkt_type].
mcs_count[i]);
}
}
DP_TRACE(NONE, "MCS(0-11):%s",
mcs_rx);
DP_TRACE_STATS(NONE, "11A MCS(0-7) = %s",
rx_mcs[0]);
DP_TRACE_STATS(NONE, "11A MCS Invalid = %d",
peer->stats.rx.pkt_type[DOT11_A].mcs_count[MAX_MCS]);
DP_TRACE_STATS(NONE, "11B MCS(0-6) = %s",
rx_mcs[1]);
DP_TRACE_STATS(NONE, "11B MCS Invalid = %d",
peer->stats.rx.pkt_type[DOT11_B].mcs_count[MAX_MCS]);
DP_TRACE_STATS(NONE, "11N MCS(0-7) = %s",
rx_mcs[2]);
DP_TRACE_STATS(NONE, "11N MCS Invalid = %d",
peer->stats.rx.pkt_type[DOT11_N].mcs_count[MAX_MCS]);
DP_TRACE_STATS(NONE, "11AC MCS(0-9) = %s",
rx_mcs[3]);
DP_TRACE_STATS(NONE, "11AC MCS Invalid = %d",
peer->stats.rx.pkt_type[DOT11_AC].mcs_count[MAX_MCS]);
DP_TRACE_STATS(NONE, "11AX MCS(0-11) = %s",
rx_mcs[4]);
DP_TRACE_STATS(NONE, "11AX MCS Invalid = %d",
peer->stats.rx.pkt_type[DOT11_AX].mcs_count[MAX_MCS]);
index = 0;
for (i = 0; i < SS_COUNT; i++) {
index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
" %d,", peer->stats.rx.nss[i]);
" %d", peer->stats.rx.nss[i]);
}
DP_TRACE(NONE, "NSS(0-7):%s",
DP_TRACE_STATS(NONE, "NSS(0-7) = %s\n",
nss);
DP_TRACE(NONE, "Aggregation:\n");
DP_TRACE(NONE, "Number of Msdu's Part of Ampdu = %d",
DP_TRACE_STATS(NONE, "Aggregation:\n");
DP_TRACE_STATS(NONE, "Number of Msdu's Part of Ampdu = %d",
peer->stats.rx.ampdu_cnt);
DP_TRACE(NONE, "Number of Msdu's With No Mpdu Level Aggregation : %d",
DP_TRACE_STATS(NONE, "Number of Msdu's With No Mpdu Level Aggregation = %d",
peer->stats.rx.non_ampdu_cnt);
DP_TRACE(NONE, "Number of Msdu's Part of Amsdu: %d",
DP_TRACE_STATS(NONE, "Number of Msdu's Part of Amsdu = %d",
peer->stats.rx.amsdu_cnt);
DP_TRACE(NONE, "Number of Msdu's With No Msdu Level Aggregation: %d",
DP_TRACE_STATS(NONE, "Number of Msdu's With No Msdu Level Aggregation = %d",
peer->stats.rx.non_amsdu_cnt);
}
@@ -3296,8 +3412,12 @@ static int dp_txrx_stats(struct cdp_vdev *vdev,
/* TODO: Firmware Mapping not implemented */
if (host_stats != TXRX_HOST_STATS_INVALID)
if ((host_stats != TXRX_HOST_STATS_INVALID) &&
(host_stats <= TXRX_HOST_STATS_MAX))
return dp_print_host_stats(vdev, req, host_stats);
else
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"Wrong Input for TxRx Stats");
return 0;
}
@@ -3336,6 +3456,7 @@ static void dp_txrx_path_stats(struct dp_soc *soc)
uint8_t error_code;
uint8_t loop_pdev;
struct dp_pdev *pdev;
uint8_t i;
for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
@@ -3404,12 +3525,13 @@ static void dp_txrx_path_stats(struct dp_soc *soc)
DP_TRACE(NONE, "delivered %u msdus ( %u bytes),",
pdev->stats.rx.to_stack.num,
pdev->stats.rx.to_stack.bytes);
DP_TRACE(NONE, "received on reo %u msdus ( %u bytes),",
pdev->stats.rx.rcvd_reo.num,
pdev->stats.rx.rcvd_reo.bytes);
for (i = 0; i < CDP_MAX_RX_RINGS; i++)
DP_TRACE(NONE, "received on reo[%d] %u msdus ( %u bytes),",
i, pdev->stats.rx.rcvd_reo[i].num,
pdev->stats.rx.rcvd_reo[i].bytes);
DP_TRACE(NONE, "intra-bss packets %u msdus ( %u bytes),",
pdev->stats.rx.intra_bss.num,
pdev->stats.rx.intra_bss.bytes);
pdev->stats.rx.intra_bss.pkts.num,
pdev->stats.rx.intra_bss.pkts.bytes);
DP_TRACE(NONE, "raw packets %u msdus ( %u bytes),",
pdev->stats.rx.raw.num,
pdev->stats.rx.raw.bytes);

مشاهده پرونده

@@ -73,7 +73,7 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
if (!rxdma_srng) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"rxdma srng not initialized");
DP_STATS_INC(dp_pdev, err.rxdma_unitialized, 1);
DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
return QDF_STATUS_E_FAILURE;
}
@@ -95,7 +95,7 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"no free rx_descs in freelist");
DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
num_alloc_desc);
num_req_buffers);
return QDF_STATUS_E_NOMEM;
}
@@ -127,11 +127,17 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
RX_BUFFER_ALIGNMENT,
FALSE);
if (rx_netbuf == NULL)
if (rx_netbuf == NULL) {
DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
continue;
}
qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
QDF_DMA_BIDIRECTIONAL);
if (ret == QDF_STATUS_E_FAILURE) {
DP_STATS_INC(dp_pdev, replenish.map_err, 1);
continue;
}
paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
@@ -142,8 +148,10 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
* this is a temp WAR till we fix it properly.
*/
ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev);
if (ret == QDF_STATUS_E_FAILURE)
if (ret == QDF_STATUS_E_FAILURE) {
DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
break;
}
count++;
@@ -153,8 +161,6 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
next = (*desc_list)->next;
(*desc_list)->rx_desc.nbuf = rx_netbuf;
DP_STATS_INC_PKT(dp_pdev, replenished, 1,
qdf_nbuf_len(rx_netbuf));
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"rx_netbuf=%p, buf=%p, paddr=0x%llx, cookie=%d\n",
@@ -175,6 +181,8 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"%d rx desc added back to free list", num_desc_to_free);
DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
DP_STATS_INC_PKT(dp_pdev, replenish.pkts, num_req_buffers,
(RX_BUFFER_SIZE * num_req_buffers));
/*
* add any available free desc back to the free list
@@ -210,6 +218,7 @@ dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list)
DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
/*
* reset the chfrag_start and chfrag_end bits in nbuf cb
* as this is a non-amsdu pkt and RAW mode simulation expects
@@ -329,10 +338,14 @@ dp_rx_intrabss_fwd(struct dp_soc *soc,
memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
len = qdf_nbuf_len(nbuf);
if (!dp_tx_send(sa_peer->vdev, nbuf)) {
DP_STATS_INC_PKT(sa_peer, rx.intra_bss, 1, len);
DP_STATS_INC_PKT(sa_peer, rx.intra_bss.pkts,
1, len);
return true;
} else
} else {
DP_STATS_INC_PKT(sa_peer, rx.intra_bss.fail, 1,
len);
return false;
}
}
}
/* if it is a broadcast pkt (eg: ARP) and it is not its own
@@ -353,7 +366,7 @@ dp_rx_intrabss_fwd(struct dp_soc *soc,
if (dp_tx_send(sa_peer->vdev, nbuf_copy))
qdf_nbuf_free(nbuf_copy);
else
DP_STATS_INC_PKT(sa_peer, rx.intra_bss, 1, len);
DP_STATS_INC_PKT(sa_peer, rx.intra_bss.pkts, 1, len);
}
/* return false as we have to still send the original pkt
* up the stack
@@ -391,6 +404,7 @@ void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
if (rx_info == NULL) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"Memory allocation failed for mesh rx stats");
DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
return;
}
@@ -716,7 +730,7 @@ dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota)
static uint32_t peer_mdata;
uint8_t *rx_tlv_hdr;
uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
uint32_t sgi, rate_mcs, tid, nss, bw, reception_type;
uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
uint64_t vdev_map = 0;
uint8_t mac_id;
uint16_t i, vdev_cnt = 0;
@@ -761,10 +775,12 @@ dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota)
&& quota--)) {
error = HAL_RX_ERROR_STATUS_GET(ring_desc);
ring_id = hal_srng_ring_id_get(hal_ring);
if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
FL("HAL RING 0x%p:error %d"), hal_ring, error);
DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1);
/* Don't know how to deal with this -- assert */
qdf_assert(0);
}
@@ -784,7 +800,6 @@ dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota)
qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
QDF_DMA_BIDIRECTIONAL);
ring_id = hal_srng_ring_id_get(hal_ring);
core_id = smp_processor_id();
DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1);
@@ -801,6 +816,7 @@ dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota)
if (!vdev) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
FL("vdev is NULL"));
DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
qdf_nbuf_free(rx_desc->nbuf);
goto fail;
@@ -828,7 +844,7 @@ dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota)
if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
qdf_nbuf_set_chfrag_end(rx_desc->nbuf, 1);
DP_STATS_INC_PKT(peer, rx.rcvd_reo, 1,
DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1,
qdf_nbuf_len(rx_desc->nbuf));
ampdu_flag = (mpdu_desc_info.mpdu_flags &
@@ -897,6 +913,8 @@ done:
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_ERROR,
FL("MSDU DONE failure"));
DP_STATS_INC(vdev->pdev, dropped.msdu_not_done,
1);
hal_rx_dump_pkt_tlvs(rx_tlv_hdr,
QDF_TRACE_LEVEL_INFO);
@@ -931,6 +949,7 @@ done:
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_INFO,
FL("received pkt with same src MAC"));
DP_STATS_INC(vdev->pdev, dropped.mec, 1);
/* Drop & free packet */
qdf_nbuf_free(nbuf);
@@ -939,17 +958,18 @@ done:
}
sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr);
rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
tid = hal_rx_mpdu_start_tid_get(rx_tlv_hdr);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s: %d, SGI: %d, rate_mcs: %d, tid: %d",
__func__, __LINE__, sgi, rate_mcs, tid);
"%s: %d, SGI: %d, tid: %d",
__func__, __LINE__, sgi, tid);
bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
reception_type = hal_rx_msdu_start_reception_type_get(
rx_tlv_hdr);
nss = hal_rx_msdu_start_nss_get(rx_tlv_hdr);
pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
DP_STATS_INC(vdev->pdev, rx.bw[bw], 1);
DP_STATS_INC(vdev->pdev,
@@ -959,7 +979,6 @@ done:
(reception_type == REPT_MU_OFDMA_MIMO))
);
DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
DP_STATS_INC(peer, rx.mcs_count[rate_mcs], 1);
DP_STATS_INCC(peer, rx.err.mic_err, 1,
hal_rx_mpdu_end_mic_err_get(
rx_tlv_hdr));
@@ -973,6 +992,47 @@ done:
DP_STATS_INC(peer, rx.reception_type[reception_type],
1);
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
mcs_count[MAX_MCS], 1,
((mcs >= MAX_MCS_11A) && (pkt_type
== DOT11_A)));
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
mcs_count[mcs], 1,
((mcs <= MAX_MCS_11A) && (pkt_type
== DOT11_A)));
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
mcs_count[MAX_MCS], 1,
((mcs >= MAX_MCS_11B)
&& (pkt_type == DOT11_B)));
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
mcs_count[mcs], 1,
((mcs <= MAX_MCS_11B)
&& (pkt_type == DOT11_B)));
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
mcs_count[MAX_MCS], 1,
((mcs >= MAX_MCS_11A)
&& (pkt_type == DOT11_N)));
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
mcs_count[mcs], 1,
((mcs <= MAX_MCS_11A)
&& (pkt_type == DOT11_N)));
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
mcs_count[MAX_MCS], 1,
((mcs >= MAX_MCS_11AC)
&& (pkt_type == DOT11_AC)));
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
mcs_count[mcs], 1,
((mcs <= MAX_MCS_11AC)
&& (pkt_type == DOT11_AC)));
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
mcs_count[MAX_MCS], 1,
((mcs >= MAX_MCS)
&& (pkt_type == DOT11_AX)));
DP_STATS_INCC(peer, rx.pkt_type[pkt_type].
mcs_count[mcs], 1,
((mcs <= MAX_MCS)
&& (pkt_type == DOT11_AX)));
/*
* HW structures call this L3 header padding --
* even though this is actually the offset from
@@ -998,6 +1058,8 @@ done:
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_INFO_MED,
FL("mesh pkt filtered"));
DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
1);
qdf_nbuf_free(nbuf);
continue;
@@ -1045,30 +1107,27 @@ done:
nbuf);
DP_STATS_INCC_PKT(peer, rx.multicast, 1, pkt_len,
DP_FRAME_IS_MULTICAST((eh)->ether_dhost
));
DP_STATS_INCC_PKT(peer, rx.unicast, 1, pkt_len,
!(DP_FRAME_IS_MULTICAST(
(eh)->ether_dhost)));
hal_rx_msdu_end_da_is_mcbc_get(
rx_tlv_hdr));
DP_STATS_INC_PKT(peer, rx.to_stack, 1,
pkt_len);
if (hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) {
if (soc->cdp_soc.ol_ops->update_dp_stats)
if (soc->cdp_soc.ol_ops->update_dp_stats) {
soc->cdp_soc.ol_ops->update_dp_stats(
vdev->pdev->osif_pdev,
&peer->stats,
peer_id,
UPDATE_PEER_STATS);
dp_aggregate_vdev_stats(peer->vdev);
dp_aggregate_vdev_stats(peer->vdev);
if (soc->cdp_soc.ol_ops->update_dp_stats)
soc->cdp_soc.ol_ops->update_dp_stats(
vdev->pdev->osif_pdev,
&peer->vdev->stats,
peer->vdev->vdev_id,
UPDATE_VDEV_STATS);
}
}
}

مشاهده پرونده

@@ -380,7 +380,6 @@ dp_rx_null_q_desc_handle(struct dp_soc *soc, struct dp_rx_desc *rx_desc,
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
FL("vdev %p osif_rx %p"), vdev,
vdev->osif_rx);
qdf_nbuf_set_next(nbuf, NULL);
vdev->osif_rx(vdev->osif_vdev, nbuf);
DP_STATS_INC(vdev->pdev, rx.to_stack.num, 1);
@@ -506,6 +505,7 @@ dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
while (qdf_likely((ring_desc =
hal_srng_dst_get_next(hal_soc, hal_ring))
&& quota--)) {
DP_STATS_INC(soc, rx.err_ring_pkts, 1);
error = HAL_RX_ERROR_STATUS_GET(ring_desc);
@@ -538,11 +538,10 @@ dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
/* TODO */
DP_STATS_INC(soc,
rx.err.reo_error[HAL_MPDU_F_FRAGMENT], 1);
rx_bufs_used += dp_rx_frag_handle(soc,
ring_desc, &mpdu_desc_info,
&head, &tail, quota);
DP_STATS_INC(soc, rx.rx_frags, 1);
continue;
}

مشاهده پرونده

@@ -98,7 +98,6 @@ dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
soc = pdev->soc;
DP_STATS_INC(tx_desc->vdev, tx_i.freed.num, 1);
if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
@@ -307,8 +306,10 @@ struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
if (!msdu_ext_desc)
if (!msdu_ext_desc) {
DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
return NULL;
}
if (qdf_unlikely(vdev->mesh_vdev)) {
qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
@@ -381,6 +382,7 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
if (QDF_STATUS_E_RESOURCES == status) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s Tx Resource Full\n", __func__);
DP_STATS_INC(vdev, tx_i.dropped.res_full, 1);
/* TODO Stop Tx Queues */
}
@@ -390,6 +392,7 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
if (qdf_unlikely(!tx_desc)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s Tx Desc Alloc Failed\n", __func__);
DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
return NULL;
}
@@ -461,6 +464,7 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
/* Handle failure */
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"qdf_nbuf_map failed\n");
DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
goto failure;
}
@@ -484,9 +488,6 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
return tx_desc;
failure:
DP_STATS_INC_PKT(vdev, tx_i.dropped.dropped_pkt, 1,
qdf_nbuf_len(nbuf));
DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
dp_tx_desc_release(tx_desc, desc_pool_id);
return NULL;
}
@@ -520,14 +521,17 @@ static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
if (QDF_STATUS_E_RESOURCES == status) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s Tx Resource Full\n", __func__);
DP_STATS_INC(vdev, tx_i.dropped.res_full, 1);
/* TODO Stop Tx Queues */
}
/* Allocate software Tx descriptor */
tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
if (!tx_desc)
if (!tx_desc) {
DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
return NULL;
}
/* Flow control/Congestion Control counters */
qdf_atomic_inc(&pdev->num_tx_outstanding);
@@ -563,9 +567,6 @@ static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
return tx_desc;
failure:
DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
DP_STATS_INC_PKT(vdev, tx_i.dropped.dropped_pkt, 1,
qdf_nbuf_len(nbuf));
if (qdf_unlikely(tx_desc->flags & DP_TX_DESC_FLAG_ME))
dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
dp_tx_desc_release(tx_desc, desc_pool_id);
@@ -591,9 +592,12 @@ static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
QDF_DMA_TO_DEVICE)) {
qdf_print("dma map error\n");
DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
qdf_nbuf_free(nbuf);
return NULL;
}
@@ -701,9 +705,7 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"%s TCL ring full ring_id:%d\n", __func__, ring_id);
DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
DP_STATS_INC_PKT(vdev, tx_i.dropped.dropped_pkt, 1,
length);
DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
hal_srng_access_end(soc->hal_soc,
soc->tcl_data_ring[ring_id].hal_srng);
return QDF_STATUS_E_RESOURCES;
@@ -872,7 +874,6 @@ static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s Tx_desc prepare Fail vdev %p queue %d\n",
__func__, vdev, tx_q->desc_pool_id);
DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
goto fail_return;
}
@@ -901,7 +902,6 @@ static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
"%s Tx_hw_enqueue Fail tx_desc %p queue %d\n",
__func__, tx_desc, tx_q->ring_id);
dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
goto fail_return;
}
@@ -910,8 +910,6 @@ static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
return NULL;
fail_return:
DP_STATS_INC_PKT(pdev, tx_i.dropped.dropped_pkt, 1,
qdf_nbuf_len(nbuf));
return nbuf;
}
@@ -948,9 +946,6 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
"%s %d : HAL RING Access Failed -- %p\n",
__func__, __LINE__, hal_srng);
DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
DP_STATS_INC_PKT(vdev,
tx_i.dropped.dropped_pkt, 1,
qdf_nbuf_len(nbuf));
return nbuf;
}
@@ -981,10 +976,6 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s Tx_desc prepare Fail vdev %p queue %d\n",
__func__, vdev, tx_q->desc_pool_id);
DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
DP_STATS_INC_PKT(vdev,
tx_i.dropped.dropped_pkt, 1,
qdf_nbuf_len(nbuf));
if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
@@ -1003,11 +994,6 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
"%s Tx_hw_enqueue Fail tx_desc %p queue %d\n",
__func__, tx_desc, tx_q->ring_id);
DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
DP_STATS_INC_PKT(pdev,
tx_i.dropped.dropped_pkt, 1,
qdf_nbuf_len(nbuf));
if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
@@ -1093,6 +1079,7 @@ static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
QDF_DMA_TO_DEVICE)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"dma map error\n");
DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
qdf_nbuf_free(nbuf);
return NULL;
@@ -1108,6 +1095,7 @@ static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"frag dma map error\n");
DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
qdf_nbuf_free(nbuf);
return NULL;
}
@@ -1334,6 +1322,7 @@ qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s tso_prepare fail vdev_id:%d\n",
__func__, vdev->vdev_id);
DP_STATS_INC(vdev, tx_i.tso.dropped_host, 1);
return nbuf;
}
@@ -1382,9 +1371,6 @@ qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"%s Raw frame %p\n", __func__, vdev);
DP_STATS_INC_PKT(vdev, tx_i.raw_pkt, 1,
qdf_nbuf_len(nbuf));
goto send_multiple;
}
@@ -1514,8 +1500,6 @@ void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
case HTT_TX_FW2WBM_TX_STATUS_TTL:
{
qdf_atomic_dec(&pdev->num_tx_exception);
DP_STATS_INC_PKT(tx_desc->vdev, tx_i.dropped.dropped_pkt,
1, qdf_nbuf_len(tx_desc->nbuf));
DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
break;
}

مشاهده پرونده

@@ -333,116 +333,6 @@ struct reo_desc_list_node {
struct dp_rx_tid rx_tid;
};
/* TODO: Proper comments have been added in the subsequesnt gerrit */
/* packet info */
struct dp_pkt_info {
uint32_t num; /*no of packets*/
uint32_t bytes; /* total no of bytes */
};
/* per pdev tx stats*/
struct dp_tx_pdev_stats {
struct dp_pkt_info rcvd; /*total packets received for transmission */
struct {
/* Pkt Info for which completions were received */
struct dp_pkt_info comp_pkt;
uint32_t mcs_count[MAX_MCS + 1]; /* MCS Count */
} comp; /* Tx completions received*/
struct dp_pkt_info freed; /* Tx packets freed*/
struct dp_pkt_info processed; /* Tx packets processed*/
struct dp_pkt_info outstanding; /* Tx packets remaining for processing*/
struct {
struct dp_pkt_info dropped_pkt; /* Total packets dropped */
uint32_t desc_total; /* total descriptors dropped */
uint32_t dma_map_error; /* Dropped due to Dma Error */
uint32_t ring_full; /* dropped due to ring full */
uint32_t fw_discard; /* Discarded bu firmware */
uint32_t fw_discard_retired; /* fw_discard_retired */
/* firmware_discard_untransmitted */
uint32_t firmware_discard_untransmitted;
uint32_t mpdu_age_out; /* mpdu_age_out */
uint32_t firmware_discard_reason1; /*firmware_discard_reason1*/
uint32_t firmware_discard_reason2; /*firmware_discard_reason2*/
uint32_t firmware_discard_reason3; /*firmware_discard_reason3*/
} dropped; /* Packets dropped on the Tx side */
struct {
struct dp_pkt_info sg_pkt; /* total scatter gather packets */
uint32_t dropped_host; /* SG packets dropped by host */
uint32_t dropped_target; /* SG packets dropped by target */
} sg; /* Scatter Gather packet info */
struct {
uint32_t num_seg; /* No of segments in TSO packets */
struct dp_pkt_info tso_pkt; /* total no of TSO packets */
uint32_t dropped_host; /* TSO packets dropped by host */
uint32_t dropped_target; /* TSO packets dropped by target */
} tso; /* TSO packets info */
struct {
/* total no of multicast conversion packets */
struct dp_pkt_info mcast_pkt;
/* packets dropped due to map error */
uint32_t dropped_map_error;
/* packets dropped due to self Mac address */
uint32_t dropped_self_mac;
/* Packets dropped due to send fail */
uint32_t dropped_send_fail;
/* total unicast packets transmitted */
uint32_t ucast;
} mcast_en; /* Multicast Enhancement packets info */
/* Total packets passed Reinject handler */
struct dp_pkt_info reinject_pkts;
/* Total packets passed to inspect handler */
struct dp_pkt_info inspect_pkts;
/* Total Raw packets */
struct dp_pkt_info raw_pkt;
};
/* Per pdev RX stats */
struct dp_rx_pdev_stats {
struct dp_pkt_info rcvd_reo; /* packets received on the reo ring */
struct {
/* packets dropped because of no peer */
struct dp_pkt_info no_peer;
/* packets dropped because nsdu_done bit not set */
struct dp_pkt_info msdu_not_done;
} dropped; /* packets dropped on rx */
struct dp_pkt_info replenished; /* total packets replnished */
struct dp_pkt_info to_stack; /* total packets sent up the stack */
struct dp_pkt_info intra_bss; /* Intra BSS packets received */
struct dp_pkt_info wds; /* WDS packets received */
struct dp_pkt_info desc;
struct dp_pkt_info buff;
struct dp_pkt_info raw; /* Raw Pakets received */
struct {
uint32_t rxdma_unitialized; /* rxdma_unitialized errors */
uint32_t desc_alloc_fail; /* desc alloc failed errors */
} err; /* Rx errors */
uint32_t buf_freelist; /* buffers added back in freelist */
uint32_t mcs_count[MAX_MCS + 1]; /* packets in different MCS rates */
uint32_t sgi_count[MAX_MCS + 1]; /* SGI count */
/* Number of MSDUs with no MPDU level aggregation */
uint32_t non_ampdu_cnt;
/* Number of MSDUs part of AMSPU */
uint32_t ampdu_cnt;
/* Number of MSDUs with no MSDU level aggregation */
uint32_t non_amsdu_cnt;
/* Number of MSDUs part of AMSDU*/
uint32_t amsdu_cnt;
/* Packet count in spatiel Streams */
uint32_t nss[SS_COUNT];
/* Packet count in different Bandwidths */
uint32_t bw[SUPPORTED_BW];
/* reception type os packets */
uint32_t reception_type[SUPPORTED_RECEPTION_TYPES];
};
struct dp_ast_entry {
uint16_t ast_idx;
uint8_t mac_addr[DP_MAC_ADDR_LEN];
@@ -643,6 +533,10 @@ struct dp_soc {
/* SOC level RX stats */
struct {
/* Rx errors */
/* Total Packets in Rx Error ring */
uint32_t err_ring_pkts;
/* No of Fragments */
uint32_t rx_frags;
struct {
/* Invalid RBM error count */
uint32_t invalid_rbm;
@@ -658,6 +552,8 @@ struct dp_soc {
uint32_t rxdma_error[MAX_RXDMA_ERRORS];
/* REO Error count */
uint32_t reo_error[REO_ERROR_TYPE_MAX];
/* HAL REO ERR Count */
uint32_t hal_reo_error[CDP_MAX_RX_RINGS];
} err;
/* packet count per core - per ring */