qcacmn: Add Host Lithium Stats

Add per peer stats
Add support to update APSTATS
Change QDF_TRACE to DP_TRACE for improved readablility

Change-Id: I4be2adf2a1d7f978eb2961a544435d525c97923d
CRs-Fixed: 1114641
This commit is contained in:
Ishank Jain
2017-02-17 15:38:39 +05:30
committed by Sandeep Puligilla
parent c0ce628e49
commit 1e7401cbc7
12 changed files with 1412 additions and 571 deletions

View File

@@ -382,4 +382,22 @@ static inline int cdp_delba_process(ol_txrx_soc_handle soc,
return 0;
}
/**
* cdp_get_peer_mac_addr_frm_id: function to return vdev id and and peer
* mac address
* @soc: SOC handle
* @peer_id: peer id of the peer for which mac_address is required
* @mac_addr: reference to mac address
*
* reutm: vdev_id of the vap
*/
static inline uint8_t
cdp_get_peer_mac_addr_frm_id(ol_txrx_soc_handle soc, uint16_t peer_id,
uint8_t *mac_addr)
{
if (soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id)
return soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id(soc,
peer_id, mac_addr);
return CDP_INVALID_VDEV_ID;
}
#endif /* _CDP_TXRX_CMN_H_ */

View File

@@ -42,6 +42,33 @@
#endif
#define OL_TXRX_INVALID_LOCAL_PEER_ID 0xffff
#define CDP_INVALID_VDEV_ID 0xff
#define MAX_MCS 12
#define MAX_MCS_11A 8
#define MAX_MCS_11B 7
#define MAX_MCS_11AC 10
#define SS_COUNT 8
#define SUPPORTED_BW 4
#define SUPPORTED_RECEPTION_TYPES 4
/* WME stream classes */
#define WME_AC_BE 0 /* best effort */
#define WME_AC_BK 1 /* background */
#define WME_AC_VI 2 /* video */
#define WME_AC_VO 3 /* voice */
#define WME_AC_MAX 4 /* MAX AC Value */
#define WME_AC_TO_TID(_ac) ( \
((_ac) == WME_AC_VO) ? 6 : \
((_ac) == WME_AC_VI) ? 5 : \
((_ac) == WME_AC_BK) ? 1 : \
0)
#define TID_TO_WME_AC(_tid) ( \
(((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
(((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
(((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
WME_AC_VO)
/*
* htt_dbg_stats_type -
@@ -405,4 +432,252 @@ struct cdp_soc_t {
#define PER_VDEV_FW_STATS_REQUEST 1
/* Different Packet Types */
enum cdp_packet_type {
DOT11_A = 0,
DOT11_B = 1,
DOT11_N = 2,
DOT11_AC = 3,
DOT11_AX = 4,
DOT11_MAX = 5,
};
/*
* Different Stat update types sent to OL_IF
* @UPDATE_PEER_STATS: update peer stats
* @UPDATE_VDEV_STATS: update vdev stats
* @UPDATE_PDE_STATS: Update pdev stats
*/
enum cdp_stat_update_type {
UPDATE_PEER_STATS = 0,
UPDATE_VDEV_STATS = 1,
UPDATE_PDEV_STATS = 2,
};
/* packet info */
struct cdp_pkt_info {
/*no of packets*/
uint32_t num;
/* total no of bytes */
uint32_t bytes;
};
/* Tx Stats */
struct cdp_tx_stats {
/* Pkt Info for which completions were received */
struct cdp_pkt_info comp_pkt;
/* Unicast Packet Count */
struct cdp_pkt_info ucast;
/* Multicast Packet Count */
struct cdp_pkt_info mcast;
/* Successful Tx Packets */
struct cdp_pkt_info tx_success;
/* Total Tx failure */
uint32_t tx_failed;
/* Total Packets as ofdma*/
uint32_t ofdma;
/* Packets in STBC */
uint32_t stbc;
/* Packets in LDPC */
uint32_t ldpc;
/* Packet retries */
uint32_t retries;
/* Number of MSDUs with no MSDU level aggregation */
uint32_t non_amsdu_cnt;
/* Number of MSDUs part of AMSDU*/
uint32_t amsdu_cnt;
/* RSSI of last packet */
uint32_t last_ack_rssi;
/* Packet Type */
struct {
/* MCS Count */
uint32_t mcs_count[MAX_MCS + 1];
} pkt_type[DOT11_MAX];
/* SGI count */
uint32_t sgi_count[MAX_MCS + 1];
/* Packet Count for different bandwidths */
uint32_t bw[SUPPORTED_BW];
/* Wireless Multimedia type Count */
uint32_t wme_ac_type[WME_AC_MAX];
/* Wireless Multimedia type Count */
uint32_t excess_retries_ac[WME_AC_MAX];
/* Packets dropped on the Tx side */
struct {
uint32_t dma_map_error;
/* dropped due to ring full */
uint32_t ring_full;
/* Discarded bu firmware */
uint32_t fw_discard;
/* fw_discard_retired */
uint32_t fw_discard_retired;
/* firmware_discard_untransmitted */
uint32_t fw_discard_untransmitted;
/* ,pdu_age_out */
uint32_t mpdu_age_out;
/* firmware_discard_reason1 */
uint32_t fw_discard_reason1;
/* firmware_discard_reason2 */
uint32_t fw_discard_reason2;
/* firmware_discard_reason3 */
uint32_t fw_discard_reason3;
} dropped;
};
/* Rx Level Stats */
struct cdp_rx_stats {
/* Total packets sent up the stack */
struct cdp_pkt_info to_stack;
/* Packets received on the reo ring */
struct cdp_pkt_info rcvd_reo;
/* Total multicast packets */
struct cdp_pkt_info unicast;
/* Total unicast packets */
struct cdp_pkt_info multicast;
/* WDS packets received */
struct cdp_pkt_info wds;
/* Intra BSS packets received */
struct cdp_pkt_info intra_bss;
/* Raw Pakets received */
struct cdp_pkt_info raw;
/* Errors */
struct {
/* Rx MIC errors */
uint32_t mic_err;
/* Rx Decryption Errors */
uint32_t decrypt_err;
} err;
/* Wireless Multimedia type Count */
uint32_t wme_ac_type[WME_AC_MAX];
/* Reception type os packets */
uint32_t reception_type[SUPPORTED_RECEPTION_TYPES];
/* packets in different MCS rates */
uint32_t mcs_count[MAX_MCS + 1];
/* SGI count */
uint32_t sgi_count[MAX_MCS + 1];
/* Packet count in spatiel Streams */
uint32_t nss[SS_COUNT];
/* Packet Count in different bandwidths */
uint32_t bw[SUPPORTED_BW];
/* Number of MSDUs with no MPDU level aggregation */
uint32_t non_ampdu_cnt;
/* Number of MSDUs part of AMSPU */
uint32_t ampdu_cnt;
/* Number of MSDUs with no MSDU level aggregation */
uint32_t non_amsdu_cnt;
/* Number of MSDUs part of AMSDU*/
uint32_t amsdu_cnt;
};
/* Tx ingress Stats */
struct cdp_tx_ingress_stats {
/* Total packets received for transmission */
struct cdp_pkt_info rcvd;
/* Tx packets freed*/
struct cdp_pkt_info freed;
/* Tx packets processed*/
struct cdp_pkt_info processed;
/* Total packets passed Reinject handler */
struct cdp_pkt_info reinject_pkts;
/* Total packets passed to inspect handler */
struct cdp_pkt_info inspect_pkts;
/* Total Raw packets */
struct cdp_pkt_info raw_pkt;
/* TSO packets info */
struct {
/* No of segments in TSO packets */
uint32_t num_seg;
/* total no of TSO packets */
struct cdp_pkt_info tso_pkt;
/* TSO packets dropped by host */
uint32_t dropped_host;
/* TSO packets dropped by target */
uint32_t dropped_target;
} tso;
/* Scatter Gather packet info */
struct {
/* Total scatter gather packets */
struct cdp_pkt_info sg_pkt;
/* SG packets dropped by host */
uint32_t dropped_host;
/* SG packets dropped by target */
uint32_t dropped_target;
} sg;
/* Multicast Enhancement packets info */
struct {
/* total no of multicast conversion packets */
struct cdp_pkt_info mcast_pkt;
/* packets dropped due to map error */
uint32_t dropped_map_error;
/* packets dropped due to self Mac address */
uint32_t dropped_self_mac;
/* Packets dropped due to send fail */
uint32_t dropped_send_fail;
/* total unicast packets transmitted */
uint32_t ucast;
} mcast_en;
/* Packets dropped on the Tx side */
struct {
/* Total packets dropped */
struct cdp_pkt_info dropped_pkt;
/* Desc Not Available */
uint32_t desc_na;
/* Ring Full */
uint32_t ring_full;
/* Hwenqueue failed */
uint32_t enqueue_fail;
/* DMA failed */
uint32_t dma_error;
} dropped;
};
struct cdp_vdev_stats {
/* Tx ingress stats */
struct cdp_tx_ingress_stats tx_i;
/* CDP Tx Stats */
struct cdp_tx_stats tx;
/* CDP Rx Stats */
struct cdp_rx_stats rx;
};
struct cdp_peer_stats {
/* CDP Tx Stats */
struct cdp_tx_stats tx;
/* CDP Rx Stats */
struct cdp_rx_stats rx;
};
struct cdp_pdev_stats {
/* packets dropped on rx */
struct {
/* packets dropped because of no peer */
struct cdp_pkt_info no_peer;
/* packets dropped because nsdu_done bit not set */
struct cdp_pkt_info msdu_not_done;
} dropped;
/* total packets replnished */
struct cdp_pkt_info replenished;
/* Rx errors */
struct {
/* rxdma_unitialized errors */
uint32_t rxdma_unitialized;
/* desc alloc failed errors */
uint32_t desc_alloc_fail;
} err;
/* buffers added back in freelist */
uint32_t buf_freelist;
/* Tx Ingress stats */
struct cdp_tx_ingress_stats tx_i;
/* CDP Tx Stats */
struct cdp_tx_stats tx;
/* CDP Rx Stats */
struct cdp_rx_stats rx;
};
#endif

View File

@@ -183,6 +183,9 @@ struct cdp_cmn_ops {
int (*delba_process)(void *peer_handle,
int tid, uint16_t reasoncode);
uint8_t (*get_peer_mac_addr_frm_id)(struct cdp_soc_t *soc_handle,
uint16_t peer_id, uint8_t *mac_addr);
};
struct cdp_ctrl_ops {
@@ -539,6 +542,9 @@ struct ol_if_ops {
uint8_t *wds_macaddr);
QDF_STATUS (*lro_hash_config)(void *scn_handle,
struct cdp_lro_hash_config *lro_hash);
void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
uint8_t type);
/* TODO: Add any other control path calls required to OL_IF/WMA layer */
};

View File

@@ -140,7 +140,7 @@ struct ol_txrx_stats {
ol_txrx_stats_elem udp_ipv6_cksum_err;
} rx;
struct {
/* Number of mcast recieved for conversion */
/* Number of mcast received for conversion */
u_int32_t num_me_rcvd;
/* Number of unicast sent as part of mcast conversion */
u_int32_t num_me_ucast;

View File

@@ -55,7 +55,7 @@ while (0)
#define DP_TRACE(LVL, fmt, args ...) \
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_##LVL, \
"%s:%d: "fmt, __func__, __LINE__, ## args)
fmt, ## args)
#define DP_STATS_INIT(_handle) \
qdf_mem_set(&((_handle)->stats), sizeof((_handle)->stats), 0x0)
@@ -83,7 +83,7 @@ while (0)
#define DP_STATS_UPD(_handle, _field, _delta) \
{ \
_handle->stats._field = _delta \
_handle->stats._field = _delta; \
}
#define DP_STATS_INC_PKT(_handle, _field, _count, _bytes) \
@@ -91,14 +91,35 @@ while (0)
DP_STATS_INC(_handle, _field.num, _count); \
DP_STATS_INC(_handle, _field.bytes, _bytes) \
}
#define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes, _cond) \
{ \
DP_STATS_INCC(_handle, _field.num, _count, _cond); \
DP_STATS_INCC(_handle, _field.bytes, _bytes, _cond) \
}
#define DP_STATS_AGGR(_handle_a, _handle_b, _field) \
{ \
_handle_a->stats._field += _handle_b->stats._field; \
}
#define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field) \
{ \
DP_STATS_AGGR(_handle_a, _handle_b, _field.num); \
DP_STATS_AGGR(_handle_a, _handle_b, _field.bytes);\
}
#else
#define DP_STATS_INC(_handle, _field, _delta)
#define DP_STATS_INCC(_handle, _field, _delta, _cond)
#define DP_STATS_DEC(_handle, _field, _delta)
#define DP_STATS_UPD(_handle, _field, _delta)
#define DP_STATS_INC_PKT(_handle, _field, _count, _bytes)
#define DP_STATS_INCC_PKT(_handle, _field, _count, _bytes)
#define DP_STATS_AGGR(_handle_a, _handle_b, _field)
#define DP_STATS_AGGR_PKT(_handle_a, _handle_b, _field)
#endif
extern int dp_peer_find_attach(struct dp_soc *soc);
extern void dp_peer_find_detach(struct dp_soc *soc);
extern void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer);
@@ -151,13 +172,5 @@ extern QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc,
void (*callback_fn), void *data);
extern void dp_reo_status_ring_handler(struct dp_soc *soc);
int dp_print_host_stats(struct cdp_vdev *vdev_handle,
struct ol_txrx_stats_req *req, enum cdp_host_txrx_stats type);
void dp_print_pdev_tx_stats(struct dp_pdev *pdev);
void dp_print_pdev_rx_stats(struct dp_pdev *pdev);
void dp_print_soc_tx_stats(struct dp_soc *soc);
void dp_print_soc_rx_stats(struct dp_soc *soc);
void dp_txrx_host_stats_clr(struct dp_vdev *vdev);
void dp_print_rx_rates(struct dp_vdev *vdev);
void dp_print_tx_rates(struct dp_vdev *vdev);
void dp_aggregate_vdev_stats(struct dp_vdev *vdev);
#endif /* #ifndef _DP_INTERNAL_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -1361,3 +1361,31 @@ void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
}
#endif
/**
* dp_get_peer_mac_addr_frm_id(): get mac address of the peer
* @soc_handle: DP SOC handle
* @peer_id:peer_id of the peer
*
* return: vdev_id of the vap
*/
uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
uint16_t peer_id, uint8_t *peer_mac)
{
struct dp_soc *soc = (struct dp_soc *)soc_handle;
struct dp_peer *peer;
peer = dp_peer_find_by_id(soc, peer_id);
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
"soc %p peer_id %d", soc, peer_id);
if (!peer) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"peer not found ");
return CDP_INVALID_VDEV_ID;
}
qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
return peer->vdev->vdev_id;
}

View File

@@ -51,5 +51,7 @@ void dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id);
void dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
enum htt_sec_type sec_type, int is_unicast,
u_int32_t *michael_key, u_int32_t *rx_pn);
uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
uint16_t peer_id, uint8_t *peer_mac);
#endif /* _DP_PEER_H_ */

View File

@@ -64,7 +64,7 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
if (!rxdma_srng) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"rxdma srng not initialized");
DP_STATS_INC(dp_pdev, rx.err.rxdma_unitialized, 1);
DP_STATS_INC(dp_pdev, err.rxdma_unitialized, 1);
return QDF_STATUS_E_FAILURE;
}
@@ -83,7 +83,7 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
if (!num_alloc_desc) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"no free rx_descs in freelist");
DP_STATS_INC(dp_pdev, rx.err.desc_alloc_fail,
DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
num_alloc_desc);
return QDF_STATUS_E_NOMEM;
}
@@ -142,7 +142,7 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
next = (*desc_list)->next;
(*desc_list)->rx_desc.nbuf = rx_netbuf;
DP_STATS_INC_PKT(dp_pdev, rx.replenished, 1,
DP_STATS_INC_PKT(dp_pdev, replenished, 1,
qdf_nbuf_len(rx_netbuf));
hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
(*desc_list)->rx_desc.cookie,
@@ -157,7 +157,7 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
"successfully replenished %d buffers", num_req_buffers);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%d rx desc added back to free list", num_desc_to_free);
DP_STATS_INC(dp_pdev, rx.buf_freelist, num_desc_to_free);
DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
/*
* add any available free desc back to the free list
@@ -230,6 +230,8 @@ dp_rx_intrabss_fwd(struct dp_soc *soc,
uint8_t *rx_tlv_hdr,
qdf_nbuf_t nbuf)
{
DP_STATS_INC_PKT(sa_peer, rx.intra_bss, 1,
qdf_nbuf_len(nbuf));
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
FL("Intra-BSS forwarding not implemented"));
return false;
@@ -411,6 +413,7 @@ dp_rx_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
uint8_t mac_id;
uint16_t i, vdev_cnt = 0;
uint32_t ampdu_flag, amsdu_flag;
struct ether_header *eh;
/* Debug -- Remove later */
qdf_assert(soc && hal_ring);
@@ -512,7 +515,7 @@ dp_rx_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
qdf_nbuf_set_chfrag_end(rx_desc->nbuf, 1);
DP_STATS_INC_PKT(vdev->pdev, rx.rcvd_reo, 1,
DP_STATS_INC_PKT(peer, rx.rcvd_reo, 1,
qdf_nbuf_len(rx_desc->nbuf));
ampdu_flag = (mpdu_desc_info.mpdu_flags &
@@ -562,6 +565,7 @@ done:
vdev = vdev_list[i];
while ((nbuf = qdf_nbuf_queue_remove(&vdev->rxq))) {
rx_tlv_hdr = qdf_nbuf_data(nbuf);
eh = (struct ether_header *)qdf_nbuf_data(nbuf);
/*
* Check if DMA completed -- msdu_done is the last bit
@@ -628,10 +632,23 @@ done:
DP_STATS_INC(vdev->pdev,
rx.reception_type[reception_type], 1);
DP_STATS_INCC(vdev->pdev, rx.nss[nss], 1,
((reception_type ==
RECEPTION_TYPE_MU_MIMO) ||
(reception_type ==
RECEPTION_TYPE_MU_OFDMA_MIMO)));
((reception_type == REPT_MU_MIMO) ||
(reception_type == REPT_MU_OFDMA_MIMO))
);
DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
DP_STATS_INC(peer, rx.mcs_count[rate_mcs], 1);
DP_STATS_INCC(peer, rx.err.mic_err, 1,
hal_rx_mpdu_end_mic_err_get(
rx_tlv_hdr));
DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
hal_rx_mpdu_end_decrypt_err_get(
rx_tlv_hdr));
DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)],
1);
DP_STATS_INC(peer, rx.bw[bw], 1);
DP_STATS_INC(peer, rx.reception_type[reception_type],
1);
/*
* HW structures call this L3 header padding --
@@ -693,7 +710,33 @@ done:
DP_RX_LIST_APPEND(deliver_list_head,
deliver_list_tail,
nbuf);
DP_STATS_INC(vdev->pdev, rx.to_stack.num, 1);
DP_STATS_INCC_PKT(peer, rx.multicast, 1, pkt_len,
DP_FRAME_IS_MULTICAST((eh)->ether_dhost
));
DP_STATS_INCC_PKT(peer, rx.unicast, 1, pkt_len,
!(DP_FRAME_IS_MULTICAST(
(eh)->ether_dhost)));
DP_STATS_INC_PKT(peer, rx.to_stack, 1,
pkt_len);
if (hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) {
if (soc->cdp_soc.ol_ops->update_dp_stats)
soc->cdp_soc.ol_ops->update_dp_stats(
vdev->pdev->osif_pdev,
&peer->stats,
peer_id,
UPDATE_PEER_STATS);
dp_aggregate_vdev_stats(peer->vdev);
if (soc->cdp_soc.ol_ops->update_dp_stats)
soc->cdp_soc.ol_ops->update_dp_stats(
vdev->pdev->osif_pdev,
&peer->vdev->stats,
peer->vdev->vdev_id,
UPDATE_VDEV_STATS);
}
}
if (qdf_unlikely(vdev->rx_decap_type == htt_pkt_type_raw))

View File

@@ -19,6 +19,7 @@
#include "htt.h"
#include "dp_tx.h"
#include "dp_tx_desc.h"
#include "dp_peer.h"
#include "dp_types.h"
#include "hal_tx.h"
#include "qdf_mem.h"
@@ -121,7 +122,7 @@ dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
soc = pdev->soc;
DP_STATS_INC(pdev, tx.freed.num, 1);
DP_STATS_INC(tx_desc->vdev, tx_i.freed.num, 1);
if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
@@ -353,8 +354,9 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
return tx_desc;
failure:
DP_STATS_INC_PKT(pdev, tx.dropped.dropped_pkt, 1,
DP_STATS_INC_PKT(vdev, tx_i.dropped.dropped_pkt, 1,
qdf_nbuf_len(nbuf));
DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
dp_tx_desc_release(tx_desc, desc_pool_id);
return NULL;
}
@@ -430,7 +432,8 @@ static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
return tx_desc;
failure:
DP_STATS_INC_PKT(pdev, tx.dropped.dropped_pkt, 1,
DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
DP_STATS_INC_PKT(vdev, tx_i.dropped.dropped_pkt, 1,
qdf_nbuf_len(nbuf));
dp_tx_desc_release(tx_desc, desc_pool_id);
return NULL;
@@ -508,7 +511,6 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
void *hal_tx_desc, *hal_tx_desc_cached;
qdf_dma_addr_t dma_addr;
uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
/* Return Buffer Manager ID */
uint8_t bm_id = ring_id;
@@ -579,7 +581,8 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"%s TCL ring full ring_id:%d\n", __func__, ring_id);
DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
DP_STATS_INC_PKT(pdev, tx.dropped.dropped_pkt, 1,
DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
DP_STATS_INC_PKT(vdev, tx_i.dropped.dropped_pkt, 1,
length);
hal_srng_access_end(soc->hal_soc,
soc->tcl_data_ring[ring_id].hal_srng);
@@ -589,7 +592,7 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
DP_STATS_INC_PKT(pdev, tx.processed, 1, length);
DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
return QDF_STATUS_SUCCESS;
}
@@ -639,6 +642,7 @@ static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s Tx_desc prepare Fail vdev %p queue %d\n",
__func__, vdev, tx_q->desc_pool_id);
DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
goto fail_return;
}
@@ -646,6 +650,7 @@ static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s %d : HAL RING Access Failed -- %p\n",
__func__, __LINE__, hal_srng);
DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
goto fail_return;
}
@@ -658,6 +663,7 @@ static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
"%s Tx_hw_enqueue Fail tx_desc %p queue %d\n",
__func__, tx_desc, tx_q->ring_id);
dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
goto fail_return;
}
@@ -666,7 +672,7 @@ static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
return NULL;
fail_return:
DP_STATS_INC_PKT(pdev, tx.dropped.dropped_pkt, 1,
DP_STATS_INC_PKT(pdev, tx_i.dropped.dropped_pkt, 1,
qdf_nbuf_len(nbuf));
return nbuf;
}
@@ -703,6 +709,10 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s %d : HAL RING Access Failed -- %p\n",
__func__, __LINE__, hal_srng);
DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
DP_STATS_INC_PKT(vdev,
tx_i.dropped.dropped_pkt, 1,
qdf_nbuf_len(tx_desc->nbuf));
return nbuf;
}
@@ -724,6 +734,10 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s Tx_desc prepare Fail vdev %p queue %d\n",
__func__, vdev, tx_q->desc_pool_id);
DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
DP_STATS_INC_PKT(vdev,
tx_i.dropped.dropped_pkt, 1,
qdf_nbuf_len(tx_desc->nbuf));
goto done;
}
@@ -739,8 +753,9 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
"%s Tx_hw_enqueue Fail tx_desc %p queue %d\n",
__func__, tx_desc, tx_q->ring_id);
DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
DP_STATS_INC_PKT(pdev,
tx.dropped.dropped_pkt, 1,
tx_i.dropped.dropped_pkt, 1,
qdf_nbuf_len(tx_desc->nbuf));
dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
goto done;
@@ -960,7 +975,7 @@ qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
* (TID override disabled)
*/
msdu_info.tid = HTT_TX_EXT_TID_INVALID;
DP_STATS_INC_PKT(vdev->pdev, tx.rcvd, 1, qdf_nbuf_len(nbuf));
DP_STATS_INC_PKT(vdev->pdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
if (qdf_unlikely(vdev->mesh_vdev))
dp_tx_extract_mesh_meta_data(vdev, nbuf, &msdu_info);
@@ -1006,7 +1021,7 @@ qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
/* dp_tx_prepare_tso(vdev, nbuf, &seg_info, &msdu_info); */
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"%s TSO frame %p\n", __func__, vdev);
DP_STATS_INC_PKT(vdev->pdev, tx.tso.tso_pkt, 1,
DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
qdf_nbuf_len(nbuf));
goto send_multiple;
@@ -1019,7 +1034,7 @@ qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"%s non-TSO SG frame %p\n", __func__, vdev);
DP_STATS_INC_PKT(vdev->pdev, tx.sg.sg_pkt, 1,
DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
qdf_nbuf_len(nbuf));
goto send_multiple;
@@ -1033,8 +1048,8 @@ qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"%s Mcast frm for ME %p\n", __func__, vdev);
DP_STATS_INC_PKT(vdev->pdev,
tx.mcast_en.mcast_pkt, 1,
DP_STATS_INC_PKT(vdev,
tx_i.mcast_en.mcast_pkt, 1,
qdf_nbuf_len(nbuf));
goto send_multiple;
@@ -1050,8 +1065,7 @@ qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"%s Raw frame %p\n", __func__, vdev);
DP_STATS_INC_PKT(vdev->pdev,
tx.raw_pkt, 1,
DP_STATS_INC_PKT(vdev, tx_i.raw_pkt, 1,
qdf_nbuf_len(nbuf));
goto send_multiple;
@@ -1097,7 +1111,7 @@ void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s Tx reinject path\n", __func__);
DP_STATS_INC_PKT(vdev->pdev, tx.reinject_pkts, 1,
DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
qdf_nbuf_len(tx_desc->nbuf));
if (qdf_unlikely(vdev->mesh_vdev)) {
@@ -1132,7 +1146,7 @@ static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
soc = pdev->soc;
DP_STATS_INC_PKT(pdev, tx.inspect_pkts, 1,
DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
qdf_nbuf_len(tx_desc->nbuf));
DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
@@ -1173,7 +1187,7 @@ void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
case HTT_TX_FW2WBM_TX_STATUS_TTL:
{
qdf_atomic_dec(&pdev->num_tx_exception);
DP_STATS_INC_PKT(pdev, tx.dropped.dropped_pkt,
DP_STATS_INC_PKT(tx_desc->vdev, tx_i.dropped.dropped_pkt,
1, qdf_nbuf_len(tx_desc->nbuf));
DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
break;
@@ -1233,12 +1247,18 @@ void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
/**
* dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
* @tx_desc: software descriptor head pointer
* @length: packet length
*
* Return: none
*/
static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc)
static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
uint32_t length)
{
struct hal_tx_completion_status ts;
struct dp_soc *soc = NULL;
struct dp_vdev *vdev = tx_desc->vdev;
struct dp_peer *peer = NULL;
uint8_t comp_status = 0;
qdf_mem_zero(&ts, sizeof(struct hal_tx_completion_status));
hal_tx_comp_get_status(&tx_desc->comp, &ts);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
@@ -1273,8 +1293,114 @@ static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc)
if (qdf_unlikely(tx_desc->vdev->mesh_vdev))
dp_tx_comp_fill_tx_completion_stats(tx_desc, &ts);
if (!vdev) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"invalid peer");
goto fail;
}
soc = tx_desc->vdev->pdev->soc;
peer = dp_peer_find_by_id(soc, ts.peer_id);
if (!peer) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"invalid peer");
DP_STATS_INC_PKT(vdev->pdev, dropped.no_peer, 1, length);
goto out;
}
DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
hal_tx_comp_get_buffer_source(&tx_desc->comp)) {
comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp);
DP_STATS_INCC(peer, tx.dropped.mpdu_age_out, 1,
(comp_status == HAL_TX_TQM_RR_REM_CMD_AGED));
DP_STATS_INCC(peer, tx.dropped.fw_discard_reason1, 1,
(comp_status == HAL_TX_TQM_RR_FW_REASON1));
DP_STATS_INCC(peer, tx.dropped.fw_discard_reason2, 1,
(comp_status == HAL_TX_TQM_RR_FW_REASON2));
DP_STATS_INCC(peer, tx.dropped.fw_discard_reason3, 1,
(comp_status == HAL_TX_TQM_RR_FW_REASON3));
DP_STATS_INCC(peer, tx.tx_failed, 1,
comp_status != HAL_TX_TQM_RR_FRAME_ACKED);
if (comp_status == HAL_TX_TQM_RR_FRAME_ACKED) {
DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
mcs_count[MAX_MCS], 1,
((ts.mcs >= MAX_MCS_11A) && (ts.pkt_type
== DOT11_A)));
DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
mcs_count[ts.mcs], 1,
((ts.mcs <= MAX_MCS_11A) && (ts.pkt_type
== DOT11_A)));
DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
mcs_count[MAX_MCS], 1,
((ts.mcs >= MAX_MCS_11B)
&& (ts.pkt_type == DOT11_B)));
DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
mcs_count[ts.mcs], 1,
((ts.mcs <= MAX_MCS_11B)
&& (ts.pkt_type == DOT11_B)));
DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
mcs_count[MAX_MCS], 1,
((ts.mcs >= MAX_MCS_11A)
&& (ts.pkt_type == DOT11_N)));
DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
mcs_count[ts.mcs], 1,
((ts.mcs <= MAX_MCS_11A)
&& (ts.pkt_type == DOT11_N)));
DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
mcs_count[MAX_MCS], 1,
((ts.mcs >= MAX_MCS_11AC)
&& (ts.pkt_type == DOT11_AC)));
DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
mcs_count[ts.mcs], 1,
((ts.mcs <= MAX_MCS_11AC)
&& (ts.pkt_type == DOT11_AC)));
DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
mcs_count[MAX_MCS], 1,
((ts.mcs >= MAX_MCS)
&& (ts.pkt_type == DOT11_AX)));
DP_STATS_INCC(peer, tx.pkt_type[ts.pkt_type].
mcs_count[ts.mcs], 1,
((ts.mcs <= MAX_MCS)
&& (ts.pkt_type == DOT11_AX)));
DP_STATS_INC(peer, tx.sgi_count[ts.sgi], 1);
DP_STATS_INC(peer, tx.bw[ts.bw], 1);
DP_STATS_UPD(peer, tx.last_ack_rssi, ts.ack_frame_rssi);
DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts.tid)]
, 1);
DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
DP_STATS_INCC(peer, tx.stbc, 1, ts.stbc);
DP_STATS_INCC(peer, tx.ofdma, 1, ts.ofdma);
DP_STATS_INCC(peer, tx.ldpc, 1, ts.ldpc);
DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1,
(ts.first_msdu && ts.last_msdu));
DP_STATS_INCC(peer, tx.amsdu_cnt, 1,
!(ts.first_msdu && ts.last_msdu));
DP_STATS_INCC(peer, tx.retries, 1, ts.transmit_cnt > 1);
}
}
/* TODO: This call is temporary.
* Stats update has to be attached to the HTT PPDU message
*/
if (soc->cdp_soc.ol_ops->update_dp_stats)
soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev,
&peer->stats, ts.peer_id, UPDATE_PEER_STATS);
out:
dp_aggregate_vdev_stats(tx_desc->vdev);
if (soc->cdp_soc.ol_ops->update_dp_stats)
soc->cdp_soc.ol_ops->update_dp_stats(vdev->pdev->osif_pdev,
&vdev->stats, vdev->vdev_id, UPDATE_VDEV_STATS);
fail:
return;
}
/**
* dp_tx_comp_process_desc() - Tx complete software descriptor handler
* @soc: core txrx main context
@@ -1292,12 +1418,14 @@ static void dp_tx_comp_process_desc(struct dp_soc *soc,
struct dp_tx_desc_s *next;
struct hal_tx_completion_status ts = {0};
uint32_t length;
struct dp_peer *peer;
desc = comp_head;
while (desc) {
hal_tx_comp_get_status(&desc->comp, &ts);
peer = dp_peer_find_by_id(soc, ts.peer_id);
length = qdf_nbuf_len(desc->nbuf);
/* Error Handling */
if (hal_tx_comp_get_buffer_source(&desc->comp) ==
@@ -1310,7 +1438,7 @@ static void dp_tx_comp_process_desc(struct dp_soc *soc,
/* Process Tx status in descriptor */
if (soc->process_tx_status ||
(desc->vdev && desc->vdev->mesh_vdev))
dp_tx_comp_process_tx_status(desc);
dp_tx_comp_process_tx_status(desc, length);
/* 0 : MSDU buffer, 1 : MLE */
if (desc->msdu_ext_desc) {
@@ -1335,13 +1463,6 @@ static void dp_tx_comp_process_desc(struct dp_soc *soc,
DP_TX_FREE_DMA_TO_DEVICE(soc, desc->vdev, desc->nbuf);
}
DP_STATS_INC_PKT(desc->pdev, tx.comp.comp_pkt, 1,
length);
DP_STATS_INCC(desc->pdev, tx.comp.mcs_count[MAX_MCS], 1,
ts.mcs >= MAX_MCS);
DP_STATS_INCC(desc->pdev, tx.comp.mcs_count[ts.mcs], 1,
ts.mcs <= MAX_MCS);
next = desc->next;
dp_tx_desc_release(desc, desc->pool_id);
desc = next;

View File

@@ -39,13 +39,12 @@
#include "hal_rx.h"
#define MAX_TCL_RING 3
#define MAX_MCS 12 /* MCS rate varies from 0-11 */
#define SS_COUNT 8
#define MAX_RXDMA_ERRORS 32
#define SUPPORTED_BW 4
#define SUPPORTED_RECEPTION_TYPES 4
#define RECEPTION_TYPE_MU_MIMO 1
#define RECEPTION_TYPE_MU_OFDMA_MIMO 3
#define REPT_MU_MIMO 1
#define REPT_MU_OFDMA_MIMO 3
#define REO_ERROR_TYPE_MAX (HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET+1)
struct dp_soc_cmn;
struct dp_pdev;
@@ -268,116 +267,6 @@ struct reo_desc_list_node {
struct dp_rx_tid rx_tid;
};
/* TODO: Proper comments have been added in the subsequesnt gerrit */
/* packet info */
struct dp_pkt_info {
uint32_t num; /*no of packets*/
uint32_t bytes; /* total no of bytes */
};
/* per pdev tx stats*/
struct dp_tx_pdev_stats {
struct dp_pkt_info rcvd; /*total packets received for transmission */
struct {
/* Pkt Info for which completions were received */
struct dp_pkt_info comp_pkt;
uint32_t mcs_count[MAX_MCS + 1]; /* MCS Count */
} comp; /* Tx completions received*/
struct dp_pkt_info freed; /* Tx packets freed*/
struct dp_pkt_info processed; /* Tx packets processed*/
struct dp_pkt_info outstanding; /* Tx packets remaining for processing*/
struct {
struct dp_pkt_info dropped_pkt; /* Total packets dropped */
uint32_t desc_total; /* total descriptors dropped */
uint32_t dma_map_error; /* Dropped due to Dma Error */
uint32_t ring_full; /* dropped due to ring full */
uint32_t fw_discard; /* Discarded bu firmware */
uint32_t fw_discard_retired; /* fw_discard_retired */
/* firmware_discard_untransmitted */
uint32_t firmware_discard_untransmitted;
uint32_t mpdu_age_out; /* mpdu_age_out */
uint32_t firmware_discard_reason1; /*firmware_discard_reason1*/
uint32_t firmware_discard_reason2; /*firmware_discard_reason2*/
uint32_t firmware_discard_reason3; /*firmware_discard_reason3*/
} dropped; /* Packets dropped on the Tx side */
struct {
struct dp_pkt_info sg_pkt; /* total scatter gather packets */
uint32_t dropped_host; /* SG packets dropped by host */
uint32_t dropped_target; /* SG packets dropped by target */
} sg; /* Scatter Gather packet info */
struct {
uint32_t num_seg; /* No of segments in TSO packets */
struct dp_pkt_info tso_pkt; /* total no of TSO packets */
uint32_t dropped_host; /* TSO packets dropped by host */
uint32_t dropped_target; /* TSO packets dropped by target */
} tso; /* TSO packets info */
struct {
/* total no of multicast conversion packets */
struct dp_pkt_info mcast_pkt;
/* packets dropped due to map error */
uint32_t dropped_map_error;
/* packets dropped due to self Mac address */
uint32_t dropped_self_mac;
/* Packets dropped due to send fail */
uint32_t dropped_send_fail;
/* total unicast packets transmitted */
uint32_t ucast;
} mcast_en; /* Multicast Enhancement packets info */
/* Total packets passed Reinject handler */
struct dp_pkt_info reinject_pkts;
/* Total packets passed to inspect handler */
struct dp_pkt_info inspect_pkts;
/* Total Raw packets */
struct dp_pkt_info raw_pkt;
};
/* Per pdev RX stats */
struct dp_rx_pdev_stats {
struct dp_pkt_info rcvd_reo; /* packets received on the reo ring */
struct {
/* packets dropped because of no peer */
struct dp_pkt_info no_peer;
/* packets dropped because nsdu_done bit not set */
struct dp_pkt_info msdu_not_done;
} dropped; /* packets dropped on rx */
struct dp_pkt_info replenished; /* total packets replnished */
struct dp_pkt_info to_stack; /* total packets sent up the stack */
struct dp_pkt_info intra_bss; /* Intra BSS packets received */
struct dp_pkt_info wds; /* WDS packets received */
struct dp_pkt_info desc;
struct dp_pkt_info buff;
struct dp_pkt_info raw; /* Raw Pakets received */
struct {
uint32_t rxdma_unitialized; /* rxdma_unitialized errors */
uint32_t desc_alloc_fail; /* desc alloc failed errors */
} err; /* Rx errors */
uint32_t buf_freelist; /* buffers added back in freelist */
uint32_t mcs_count[MAX_MCS + 1]; /* packets in different MCS rates */
uint32_t sgi_count[MAX_MCS + 1]; /* SGI count */
/* Number of MSDUs with no MPDU level aggregation */
uint32_t non_ampdu_cnt;
/* Number of MSDUs part of AMSPU */
uint32_t ampdu_cnt;
/* Number of MSDUs with no MSDU level aggregation */
uint32_t non_amsdu_cnt;
/* Number of MSDUs part of AMSDU*/
uint32_t amsdu_cnt;
/* Packet count in spatiel Streams */
uint32_t nss[SS_COUNT];
/* Packet count in different Bandwidths */
uint32_t bw[SUPPORTED_BW];
/* reception type os packets */
uint32_t reception_type[SUPPORTED_RECEPTION_TYPES];
};
/* SOC level structure for data path */
struct dp_soc {
/* Common base structure - Should be the first member */
@@ -557,13 +446,16 @@ struct dp_soc {
/* SoC level data path statistics */
struct {
/* SOC level TX stats */
struct {
/* descriptors in each tcl ring */
uint32_t tcl_ring_full[MAX_TCL_RING];
/* Descriptors in use at soc */
uint32_t desc_in_use;
} tx; /* SOC level TX stats */
} tx;
/* SOC level RX stats */
struct {
/* Rx errors */
struct {
/* Invalid RBM error count */
uint32_t invalid_rbm;
@@ -576,12 +468,10 @@ struct dp_soc {
/* RX DMA error count */
uint32_t rxdma_error[MAX_RXDMA_ERRORS];
/* REO Error count */
uint32_t reo_error[
HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET+1];
} err; /* Rx eerors */
} rx; /* SOC level RX stats */
/* TBD */
} stats; /* TxRx SOC level stats */
uint32_t reo_error[REO_ERROR_TYPE_MAX];
} err;
} rx;
} stats;
/* Enable processing of Tx completion status words */
bool process_tx_status;
@@ -657,11 +547,7 @@ struct dp_pdev {
/* TBD */
/* PDEV level data path statistics */
struct {
struct dp_tx_pdev_stats tx; /* per pdev tx stats */
struct dp_rx_pdev_stats rx; /* per pdev rx stats */
/* TBD */
} stats;
struct cdp_pdev_stats stats;
/* Global RX decap mode for the device */
enum htt_pkt_type rx_decap_mode;
@@ -800,7 +686,8 @@ struct dp_vdev {
struct dp_tx_desc_pool_s *tx_desc;
struct dp_tx_ext_desc_pool_s *tx_ext_desc;
/* TBD */
/* VDEV Stats */
struct cdp_vdev_stats stats;
};
@@ -866,7 +753,8 @@ struct dp_peer {
qdf_time_t last_assoc_rcvd;
qdf_time_t last_disassoc_rcvd;
qdf_time_t last_deauth_rcvd;
/* TBD */
/* Peer Stats */
struct cdp_peer_stats stats;
};
#endif /* _DP_TYPES_H_ */

View File

@@ -552,6 +552,30 @@ hal_rx_attn_msdu_done_get(uint8_t *buf)
return msdu_done;
}
#define HAL_RX_ATTN_FIRST_MPDU_GET(_rx_attn) \
(_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_attn, \
RX_ATTENTION_1_FIRST_MPDU_OFFSET)), \
RX_ATTENTION_1_FIRST_MPDU_MASK, \
RX_ATTENTION_1_FIRST_MPDU_LSB))
/*
* hal_rx_attn_first_mpdu_get(): get fist_mpdu bit from rx attention
* @buf: pointer to rx_pkt_tlvs
*
* reutm: uint32_t(first_msdu)
*/
static inline uint32_t
hal_rx_attn_first_mpdu_get(uint8_t *buf)
{
struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf;
struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn;
uint32_t first_mpdu;
first_mpdu = HAL_RX_ATTN_FIRST_MPDU_GET(rx_attn);
return first_mpdu;
}
/*
* Get peer_meta_data from RX_MPDU_INFO within RX_MPDU_START
*/
@@ -1232,6 +1256,58 @@ QDF_STATUS hal_rx_mpdu_get_addr2(uint8_t *buf, uint8_t *mac_addr)
* RX ERROR APIS
******************************************************************************/
#define HAL_RX_MPDU_END_DECRYPT_ERR_GET(_rx_mpdu_end) \
(_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_end),\
RX_MPDU_END_1_RX_IN_TX_DECRYPT_BYP_OFFSET)), \
RX_MPDU_END_1_RX_IN_TX_DECRYPT_BYP_MASK, \
RX_MPDU_END_1_RX_IN_TX_DECRYPT_BYP_LSB))
/**
* hal_rx_mpdu_end_decrypt_err_get(): API to get the Decrypt ERR
* from rx_mpdu_end TLV
*
* @buf: pointer to the start of RX PKT TLV headers
* Return: uint32_t(decrypt_err)
*/
static inline uint32_t
hal_rx_mpdu_end_decrypt_err_get(uint8_t *buf)
{
struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf;
struct rx_mpdu_end *mpdu_end =
&pkt_tlvs->mpdu_end_tlv.rx_mpdu_end;
uint32_t decrypt_err;
decrypt_err = HAL_RX_MPDU_END_DECRYPT_ERR_GET(mpdu_end);
return decrypt_err;
}
#define HAL_RX_MPDU_END_MIC_ERR_GET(_rx_mpdu_end) \
(_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_mpdu_end),\
RX_MPDU_END_1_TKIP_MIC_ERR_OFFSET)), \
RX_MPDU_END_1_TKIP_MIC_ERR_MASK, \
RX_MPDU_END_1_TKIP_MIC_ERR_LSB))
/**
* hal_rx_mpdu_end_mic_err_get(): API to get the MIC ERR
* from rx_mpdu_end TLV
*
* @buf: pointer to the start of RX PKT TLV headers
* Return: uint32_t(mic_err)
*/
static inline uint32_t
hal_rx_mpdu_end_mic_err_get(uint8_t *buf)
{
struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf;
struct rx_mpdu_end *mpdu_end =
&pkt_tlvs->mpdu_end_tlv.rx_mpdu_end;
uint32_t mic_err;
mic_err = HAL_RX_MPDU_END_MIC_ERR_GET(mpdu_end);
return mic_err;
}
/*******************************************************************************
* RX REO ERROR APIS
******************************************************************************/