qcacmn: Implement VOW stats for hawkeye

VOW stats is an extension of regular stats to capture
all drops per TID. Delay counters per TID are also
included.

Change-Id: If6087f37b32cf9ae4bb405190a358ad3c9750dd2
This commit is contained in:
Varsha Mishra
2019-03-06 17:57:23 +05:30
committed by nshrivas
parent 012467b128
commit 18281794c8
6 changed files with 302 additions and 36 deletions

View File

@@ -820,6 +820,8 @@ struct cdp_soc_t {
* @CDP_CONFIG_IGMPMLD_TID: Configurable TID value when igmmld_override is set * @CDP_CONFIG_IGMPMLD_TID: Configurable TID value when igmmld_override is set
* @CDP_CONFIG_ARP_DBG_CONF: Enable ARP debug * @CDP_CONFIG_ARP_DBG_CONF: Enable ARP debug
* @CDP_CONFIG_CAPTURE_LATENCY: Capture time latency * @CDP_CONFIG_CAPTURE_LATENCY: Capture time latency
* @CDP_INGRESS_STATS: Accumulate ingress statistics
* @CDP_OSIF_DROP: Accumulate drops in OSIF layer
*/ */
enum cdp_pdev_param_type { enum cdp_pdev_param_type {
CDP_CONFIG_DEBUG_SNIFFER, CDP_CONFIG_DEBUG_SNIFFER,
@@ -830,6 +832,8 @@ enum cdp_pdev_param_type {
CDP_CONFIG_IGMPMLD_TID, CDP_CONFIG_IGMPMLD_TID,
CDP_CONFIG_ARP_DBG_CONF, CDP_CONFIG_ARP_DBG_CONF,
CDP_CONFIG_CAPTURE_LATENCY, CDP_CONFIG_CAPTURE_LATENCY,
CDP_INGRESS_STATS,
CDP_OSIF_DROP,
}; };
/* /*

View File

@@ -106,6 +106,7 @@
* HTT_PPDU_STATS_MAX_TAG declared in FW * HTT_PPDU_STATS_MAX_TAG declared in FW
*/ */
#define CDP_PPDU_STATS_MAX_TAG 14 #define CDP_PPDU_STATS_MAX_TAG 14
#define CDP_MAX_DATA_TIDS 9
#ifdef CONFIG_MCL #ifdef CONFIG_MCL
#define CDP_WDI_NUM_EVENTS WDI_NUM_EVENTS #define CDP_WDI_NUM_EVENTS WDI_NUM_EVENTS
@@ -164,6 +165,141 @@ struct cdp_tidq_stats {
uint32_t stats[TIDQ_STATS_MAX]; uint32_t stats[TIDQ_STATS_MAX];
}; };
/*
* cdp_delay_stats_mode: Different types of delay statistics
*
* @CDP_DELAY_STATS_SW_ENQ: Stack to hw enqueue delay
* @CDP_DELAY_STATS_INTERFRAME: Interframe delay at radio entry point
* @CDP_DELAY_STATS_FW_HW_TRANSMIT: Hw enqueue to tx completion delay
* @CDP_DELAY_STATS_REAP_STACK: Delay in ring reap to indicating network stack
* @CDP_DELAY_STATS_RX_INTERFRAME: Rx inteframe delay
* @CDP_DELAY_STATS_MODE_MAX: Maximum delay mode
*/
enum cdp_delay_stats_mode {
CDP_DELAY_STATS_SW_ENQ,
CDP_DELAY_STATS_INTERFRAME,
CDP_DELAY_STATS_FW_HW_TRANSMIT,
CDP_DELAY_STATS_REAP_STACK,
CDP_DELAY_STATS_RX_INTERFRAME,
CDP_DELAY_STATS_MODE_MAX,
};
/*
* cdp_delay_bucket_index
* Index to be used for all delay stats
*/
enum cdp_delay_bucket_index {
CDP_DELAY_BUCKET_1,
CDP_DELAY_BUCKET_2,
CDP_DELAY_BUCKET_3,
CDP_DELAY_BUCKET_4,
CDP_DELAY_BUCKET_5,
CDP_DELAY_BUCKET_6,
CDP_DELAY_BUCKET_7,
CDP_DELAY_BUCKET_8,
CDP_DELAY_BUCKET_9,
CDP_DELAY_BUCKET_10,
CDP_DELAY_BUCKET_11,
CDP_DELAY_BUCKET_12,
CDP_DELAY_BUCKET_MAX,
};
/*
* struct cdp_tx_host_drop - packet drop due to following reasons.
*/
enum cdp_tx_sw_drop {
TX_DESC_ERR,
TX_HAL_RING_ACCESS_ERR,
TX_DMA_MAP_ERR,
TX_HW_ENQUEUE,
TX_SW_ENQUEUE,
TX_MAX_DROP,
};
/*
* struct cdp_rx_host_drop - packet drop due to following reasons.
*/
enum cdp_rx_sw_drop {
INTRABSS_DROP,
MSDU_DONE_FAILURE,
INVALID_PEER_VDEV,
POLICY_CHECK_DROP,
MEC_DROP,
NAWDS_MCAST_DROP,
MESH_FILTER_DROP,
ENQUEUE_DROP,
RX_MAX_DROP,
};
/*
* struct cdp_delay_stats
* @delay_bucket: division of buckets as per latency
* @min_delay: minimum delay
* @max_delay: maximum delay
* @avg_delay: average delay
*/
struct cdp_delay_stats {
uint64_t delay_bucket[CDP_DELAY_BUCKET_MAX];
uint32_t min_delay;
uint32_t max_delay;
uint32_t avg_delay;
};
/*
* struct cdp_tid_tx_stats
* @swq_delay: delay between wifi driver entry point and enqueue to HW in tx
* @hwtx_delay: delay between wifi driver exit (enqueue to HW) and tx completion
* @intfrm_delay: interframe delay
* @success_cnt: total successful transmit count
* @complete_cnt: total transmit count
* @fwdrop_cnt: firmware drop found in tx completion path
* @swdrop_cnt: software drop in tx path
*/
struct cdp_tid_tx_stats {
struct cdp_delay_stats swq_delay;
struct cdp_delay_stats hwtx_delay;
struct cdp_delay_stats intfrm_delay;
uint64_t success_cnt;
uint64_t complete_cnt;
uint64_t comp_fail_cnt;
uint64_t swdrop_cnt[TX_MAX_DROP];
};
/*
* struct cdp_tid_tx_stats
* @to_stack_delay: Time taken between ring reap to indication to network stack
* @intfrm_delay: Interframe rx delay
* @delivered_cnt: Total packets indicated to stack
* @intrabss_cnt: Rx total intraBSS frames
* @msdu_cnt: number of msdu received from HW
* @mcast_msdu_cnt: Num Mcast Msdus received from HW in Rx
* @bcast_msdu_cnt: Num Bcast Msdus received from HW in Rx
* @fail_cnt: Rx deliver drop counters
*/
struct cdp_tid_rx_stats {
struct cdp_delay_stats to_stack_delay;
struct cdp_delay_stats intfrm_delay;
uint64_t delivered_to_stack;
uint64_t intrabss_cnt;
uint64_t msdu_cnt;
uint64_t mcast_msdu_cnt;
uint64_t bcast_msdu_cnt;
uint64_t fail_cnt[RX_MAX_DROP];
};
/*
* struct cdp_tid_stats
* @ingress_stack: Total packets received from linux stack
* @tid_tx_stats: transmit counters per tid
* @tid_rx_stats: receive counters per tid
*/
struct cdp_tid_stats {
uint64_t ingress_stack;
uint64_t osif_drop;
struct cdp_tid_tx_stats tid_tx_stats[CDP_MAX_DATA_TIDS];
struct cdp_tid_rx_stats tid_rx_stats[CDP_MAX_DATA_TIDS];
};
/* struct cdp_pkt_info - packet info /* struct cdp_pkt_info - packet info
* @num: no of packets * @num: no of packets
* @bytes: total no of bytes * @bytes: total no of bytes
@@ -1065,6 +1201,7 @@ struct cdp_pdev_stats {
/* Received wdi messages from fw */ /* Received wdi messages from fw */
uint32_t wdi_event[CDP_WDI_NUM_EVENTS]; uint32_t wdi_event[CDP_WDI_NUM_EVENTS];
struct cdp_tid_stats tid_stats;
}; };
#ifndef BIG_ENDIAN_HOST #ifndef BIG_ENDIAN_HOST

View File

@@ -7760,6 +7760,36 @@ dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
} }
/*
* dp_pdev_tid_stats_ingress_inc
* @pdev: pdev handle
* @val: increase in value
*
* Return: void
*/
static void
dp_pdev_tid_stats_ingress_inc(struct cdp_pdev *pdev, uint32_t val)
{
struct dp_pdev *dp_pdev = (struct dp_pdev *)pdev;
dp_pdev->stats.tid_stats.ingress_stack += val;
}
/*
* dp_pdev_tid_stats_osif_drop
* @pdev: pdev handle
* @val: increase in value
*
* Return: void
*/
static void
dp_pdev_tid_stats_osif_drop(struct cdp_pdev *pdev, uint32_t val)
{
struct dp_pdev *dp_pdev = (struct dp_pdev *)pdev;
dp_pdev->stats.tid_stats.osif_drop += val;
}
/* /*
* dp_config_debug_sniffer()- API to enable/disable debug sniffer * dp_config_debug_sniffer()- API to enable/disable debug sniffer
* @pdev_handle: DP_PDEV handle * @pdev_handle: DP_PDEV handle
@@ -8006,6 +8036,12 @@ static QDF_STATUS dp_set_pdev_param(struct cdp_pdev *pdev_handle,
else else
pdev->latency_capture_enable = false; pdev->latency_capture_enable = false;
break; break;
case CDP_INGRESS_STATS:
dp_pdev_tid_stats_ingress_inc(pdev_handle, val);
break;
case CDP_OSIF_DROP:
dp_pdev_tid_stats_osif_drop(pdev_handle, val);
break;
default: default:
return QDF_STATUS_E_INVAL; return QDF_STATUS_E_INVAL;
} }

View File

@@ -428,6 +428,9 @@ dp_rx_intrabss_fwd(struct dp_soc *soc,
struct dp_peer *da_peer; struct dp_peer *da_peer;
struct dp_ast_entry *ast_entry; struct dp_ast_entry *ast_entry;
qdf_nbuf_t nbuf_copy; qdf_nbuf_t nbuf_copy;
uint8_t tid = qdf_nbuf_get_priority(nbuf);
struct cdp_tid_rx_stats *tid_stats =
&ta_peer->vdev->pdev->stats.tid_stats.tid_rx_stats[tid];
/* check if the destination peer is available in peer table /* check if the destination peer is available in peer table
* and also check if the source peer and destination peer * and also check if the source peer and destination peer
@@ -480,6 +483,7 @@ dp_rx_intrabss_fwd(struct dp_soc *soc,
* failed and we want to continue with * failed and we want to continue with
* next nbuf. * next nbuf.
*/ */
tid_stats->fail_cnt[INTRABSS_DROP]++;
return true; return true;
} }
} }
@@ -490,7 +494,8 @@ dp_rx_intrabss_fwd(struct dp_soc *soc,
return true; return true;
} else { } else {
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
len); len);
tid_stats->fail_cnt[INTRABSS_DROP]++;
return false; return false;
} }
} }
@@ -513,9 +518,11 @@ dp_rx_intrabss_fwd(struct dp_soc *soc,
if (dp_tx_send(ta_peer->vdev, nbuf_copy)) { if (dp_tx_send(ta_peer->vdev, nbuf_copy)) {
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len); DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
tid_stats->fail_cnt[INTRABSS_DROP]++;
qdf_nbuf_free(nbuf_copy); qdf_nbuf_free(nbuf_copy);
} else { } else {
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len); DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
tid_stats->intrabss_cnt++;
} }
} }
/* return false as we have to still send the original pkt /* return false as we have to still send the original pkt
@@ -1094,6 +1101,8 @@ static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev,
qdf_nbuf_t nbuf_head, qdf_nbuf_t nbuf_head,
qdf_nbuf_t nbuf_tail) qdf_nbuf_t nbuf_tail)
{ {
struct cdp_tid_rx_stats *stats = NULL;
uint8_t tid = 0;
/* /*
* highly unlikely to have a vdev without a registered rx * highly unlikely to have a vdev without a registered rx
* callback function. if so let us free the nbuf_list. * callback function. if so let us free the nbuf_list.
@@ -1103,6 +1112,10 @@ static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev,
do { do {
nbuf = nbuf_head; nbuf = nbuf_head;
nbuf_head = nbuf_head->next; nbuf_head = nbuf_head->next;
tid = qdf_nbuf_get_priority(nbuf);
stats = &vdev->pdev->stats.tid_stats.tid_rx_stats[tid];
stats->fail_cnt[INVALID_PEER_VDEV]++;
stats->delivered_to_stack--;
qdf_nbuf_free(nbuf); qdf_nbuf_free(nbuf);
} while (nbuf_head); } while (nbuf_head);
@@ -1401,6 +1414,7 @@ uint32_t dp_rx_process(struct dp_intr *int_ctx, void *hal_ring,
uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 }; uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
uint8_t mac_id = 0; uint8_t mac_id = 0;
struct dp_pdev *pdev; struct dp_pdev *pdev;
struct dp_pdev *rx_pdev;
struct dp_srng *dp_rxdma_srng; struct dp_srng *dp_rxdma_srng;
struct rx_desc_pool *rx_desc_pool; struct rx_desc_pool *rx_desc_pool;
struct dp_soc *soc = int_ctx->soc; struct dp_soc *soc = int_ctx->soc;
@@ -1412,6 +1426,7 @@ uint32_t dp_rx_process(struct dp_intr *int_ctx, void *hal_ring,
qdf_nbuf_t deliver_list_tail = NULL; qdf_nbuf_t deliver_list_tail = NULL;
int32_t tid = 0; int32_t tid = 0;
uint32_t dst_num_valid = 0; uint32_t dst_num_valid = 0;
struct cdp_tid_rx_stats *tid_stats;
DP_HIST_INIT(); DP_HIST_INIT();
/* Debug -- Remove later */ /* Debug -- Remove later */
@@ -1593,19 +1608,34 @@ done:
while (nbuf) { while (nbuf) {
next = nbuf->next; next = nbuf->next;
rx_tlv_hdr = qdf_nbuf_data(nbuf); rx_tlv_hdr = qdf_nbuf_data(nbuf);
/* Get TID from first msdu per MPDU, save to skb->priority */
if (qdf_nbuf_is_rx_chfrag_start(nbuf))
tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
rx_tlv_hdr);
DP_RX_TID_SAVE(nbuf, tid);
/* /*
* Check if DMA completed -- msdu_done is the last bit * Check if DMA completed -- msdu_done is the last bit
* to be written * to be written
*/ */
rx_pdev = soc->pdev_list[rx_desc->pool_id];
tid_stats = &rx_pdev->stats.tid_stats.tid_rx_stats[tid];
if (qdf_unlikely(!hal_rx_attn_msdu_done_get(rx_tlv_hdr))) { if (qdf_unlikely(!hal_rx_attn_msdu_done_get(rx_tlv_hdr))) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
FL("MSDU DONE failure")); FL("MSDU DONE failure"));
hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
QDF_TRACE_LEVEL_INFO); QDF_TRACE_LEVEL_INFO);
tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
qdf_assert(0); qdf_assert(0);
} }
tid_stats->msdu_cnt++;
if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
tid_stats->mcast_msdu_cnt++;
if (qdf_nbuf_is_bcast_pkt(nbuf))
tid_stats->bcast_msdu_cnt++;
}
peer_mdata = hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr); peer_mdata = hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr);
peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata); peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
peer = dp_peer_find_by_id(soc, peer_id); peer = dp_peer_find_by_id(soc, peer_id);
@@ -1632,12 +1662,14 @@ done:
} else { } else {
DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
qdf_nbuf_len(nbuf)); qdf_nbuf_len(nbuf));
tid_stats->fail_cnt[INVALID_PEER_VDEV]++;
qdf_nbuf_free(nbuf); qdf_nbuf_free(nbuf);
nbuf = next; nbuf = next;
continue; continue;
} }
if (qdf_unlikely(vdev == NULL)) { if (qdf_unlikely(vdev == NULL)) {
tid_stats->fail_cnt[INVALID_PEER_VDEV]++;
qdf_nbuf_free(nbuf); qdf_nbuf_free(nbuf);
nbuf = next; nbuf = next;
DP_STATS_INC(soc, rx.err.invalid_vdev, 1); DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
@@ -1701,6 +1733,7 @@ done:
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_ERROR, QDF_TRACE_LEVEL_ERROR,
FL("Policy Check Drop pkt")); FL("Policy Check Drop pkt"));
tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
/* Drop & free packet */ /* Drop & free packet */
qdf_nbuf_free(nbuf); qdf_nbuf_free(nbuf);
/* Statistics */ /* Statistics */
@@ -1713,6 +1746,7 @@ done:
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_ERROR, QDF_TRACE_LEVEL_ERROR,
FL("received pkt with same src MAC")); FL("received pkt with same src MAC"));
tid_stats->fail_cnt[MEC_DROP]++;
DP_STATS_INC_PKT(peer, rx.mec_drop, 1, msdu_len); DP_STATS_INC_PKT(peer, rx.mec_drop, 1, msdu_len);
/* Drop & free packet */ /* Drop & free packet */
@@ -1726,6 +1760,7 @@ done:
if (qdf_unlikely(peer && (peer->nawds_enabled == true) && if (qdf_unlikely(peer && (peer->nawds_enabled == true) &&
(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) && (hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr)) &&
(hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr) == false))) { (hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr) == false))) {
tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
qdf_nbuf_free(nbuf); qdf_nbuf_free(nbuf);
nbuf = next; nbuf = next;
@@ -1750,14 +1785,14 @@ done:
dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, ring_id); dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, ring_id);
if (qdf_unlikely(vdev->mesh_vdev)) { if (qdf_unlikely(vdev->mesh_vdev)) {
if (dp_rx_filter_mesh_packets(vdev, nbuf, if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
rx_tlv_hdr)
== QDF_STATUS_SUCCESS) { == QDF_STATUS_SUCCESS) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_INFO_MED, QDF_TRACE_LEVEL_INFO_MED,
FL("mesh pkt filtered")); FL("mesh pkt filtered"));
DP_STATS_INC(vdev->pdev, dropped.mesh_filter, tid_stats->fail_cnt[MESH_FILTER_DROP]++;
1); DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
1);
qdf_nbuf_free(nbuf); qdf_nbuf_free(nbuf);
nbuf = next; nbuf = next;
@@ -1812,6 +1847,7 @@ done:
nbuf)) { nbuf)) {
nbuf = next; nbuf = next;
dp_peer_unref_del_find_by_id(peer); dp_peer_unref_del_find_by_id(peer);
tid_stats->intrabss_cnt++;
continue; /* Get next desc */ continue; /* Get next desc */
} }
} }
@@ -1819,18 +1855,13 @@ done:
dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf); dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf);
qdf_nbuf_cb_update_peer_local_id(nbuf, peer->local_id); qdf_nbuf_cb_update_peer_local_id(nbuf, peer->local_id);
/* Get TID from first msdu per MPDU, save to skb->priority */
if (qdf_nbuf_is_rx_chfrag_start(nbuf))
tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
rx_tlv_hdr);
DP_RX_TID_SAVE(nbuf, tid);
DP_RX_LIST_APPEND(deliver_list_head, DP_RX_LIST_APPEND(deliver_list_head,
deliver_list_tail, deliver_list_tail,
nbuf); nbuf);
DP_STATS_INC_PKT(peer, rx.to_stack, 1, DP_STATS_INC_PKT(peer, rx.to_stack, 1,
qdf_nbuf_len(nbuf)); qdf_nbuf_len(nbuf));
tid_stats->delivered_to_stack++;
nbuf = next; nbuf = next;
dp_peer_unref_del_find_by_id(peer); dp_peer_unref_del_find_by_id(peer);
} }

View File

@@ -1157,19 +1157,16 @@ static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
} }
/** /**
* dp_tx_classify_tid() - Obtain TID to be used for this frame * dp_tx_get_tid() - Obtain TID to be used for this frame
* @vdev: DP vdev handle * @vdev: DP vdev handle
* @nbuf: skb * @nbuf: skb
* *
* Extract the DSCP or PCP information from frame and map into TID value. * Extract the DSCP or PCP information from frame and map into TID value.
* Software based TID classification is required when more than 2 DSCP-TID
* mapping tables are needed.
* Hardware supports 2 DSCP-TID mapping tables
* *
* Return: void * Return: void
*/ */
static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf, static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
struct dp_tx_msdu_info_s *msdu_info) struct dp_tx_msdu_info_s *msdu_info)
{ {
uint8_t tos = 0, dscp_tid_override = 0; uint8_t tos = 0, dscp_tid_override = 0;
uint8_t *hdr_ptr, *L3datap; uint8_t *hdr_ptr, *L3datap;
@@ -1181,14 +1178,6 @@ static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev; struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
DP_TX_TID_OVERRIDE(msdu_info, nbuf); DP_TX_TID_OVERRIDE(msdu_info, nbuf);
if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map)
return;
/* for mesh packets don't do any classification */
if (qdf_unlikely(vdev->mesh_vdev))
return;
if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) { if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
eh = (qdf_ether_header_t *)nbuf->data; eh = (qdf_ether_header_t *)nbuf->data;
hdr_ptr = eh->ether_dhost; hdr_ptr = eh->ether_dhost;
@@ -1286,9 +1275,41 @@ static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK; tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos]; msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
} }
if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
return; return;
} }
/**
* dp_tx_classify_tid() - Obtain TID to be used for this frame
* @vdev: DP vdev handle
* @nbuf: skb
*
* Software based TID classification is required when more than 2 DSCP-TID
* mapping tables are needed.
* Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
*
* Return: void
*/
static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
struct dp_tx_msdu_info_s *msdu_info)
{
struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
DP_TX_TID_OVERRIDE(msdu_info, nbuf);
if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map)
return;
/* for mesh packets don't do any classification */
if (qdf_unlikely(vdev->mesh_vdev))
return;
dp_tx_get_tid(vdev, nbuf, msdu_info);
}
#ifdef FEATURE_WLAN_TDLS #ifdef FEATURE_WLAN_TDLS
/** /**
* dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
@@ -1364,6 +1385,7 @@ static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng; void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
uint16_t htt_tcl_metadata = 0; uint16_t htt_tcl_metadata = 0;
uint8_t tid = msdu_info->tid; uint8_t tid = msdu_info->tid;
struct cdp_tid_tx_stats *tid_stats = NULL;
/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */ /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id, tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
@@ -1372,6 +1394,9 @@ static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s Tx_desc prepare Fail vdev %pK queue %d", "%s Tx_desc prepare Fail vdev %pK queue %d",
__func__, vdev, tx_q->desc_pool_id); __func__, vdev, tx_q->desc_pool_id);
dp_tx_get_tid(vdev, nbuf, msdu_info);
tid_stats = &pdev->stats.tid_stats.tid_tx_stats[msdu_info->tid];
tid_stats->swdrop_cnt[TX_DESC_ERR]++;
return nbuf; return nbuf;
} }
@@ -1389,6 +1414,9 @@ static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s %d : HAL RING Access Failed -- %pK", "%s %d : HAL RING Access Failed -- %pK",
__func__, __LINE__, hal_srng); __func__, __LINE__, hal_srng);
dp_tx_get_tid(vdev, nbuf, msdu_info);
tid_stats = &pdev->stats.tid_stats.tid_tx_stats[msdu_info->tid];
tid_stats->swdrop_cnt[TX_HAL_RING_ACCESS_ERR]++;
DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1); DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE); qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE);
@@ -1419,6 +1447,9 @@ static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s Tx_hw_enqueue Fail tx_desc %pK queue %d", "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
__func__, tx_desc, tx_q->ring_id); __func__, tx_desc, tx_q->ring_id);
dp_tx_get_tid(vdev, nbuf, msdu_info);
tid_stats = &pdev->stats.tid_stats.tid_tx_stats[msdu_info->tid];
tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
dp_tx_desc_release(tx_desc, tx_q->desc_pool_id); dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE); qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE);
goto fail_return; goto fail_return;
@@ -1466,11 +1497,15 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
struct dp_tx_queue *tx_q = &msdu_info->tx_queue; struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng; void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
struct cdp_tid_tx_stats *tid_stats = NULL;
if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) { if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s %d : HAL RING Access Failed -- %pK", "%s %d : HAL RING Access Failed -- %pK",
__func__, __LINE__, hal_srng); __func__, __LINE__, hal_srng);
dp_tx_get_tid(vdev, nbuf, msdu_info);
tid_stats = &pdev->stats.tid_stats.tid_tx_stats[msdu_info->tid];
tid_stats->swdrop_cnt[TX_HAL_RING_ACCESS_ERR]++;
DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1); DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
return nbuf; return nbuf;
} }
@@ -1531,9 +1566,13 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
if (status != QDF_STATUS_SUCCESS) { if (status != QDF_STATUS_SUCCESS) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s Tx_hw_enqueue Fail tx_desc %pK queue %d", "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
__func__, tx_desc, tx_q->ring_id); __func__, tx_desc, tx_q->ring_id);
dp_tx_get_tid(vdev, nbuf, msdu_info);
tid_stats = &pdev->stats.tid_stats.
tid_tx_stats[msdu_info->tid];
tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
if (tx_desc->flags & DP_TX_DESC_FLAG_ME) if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
dp_tx_me_free_buf(pdev, tx_desc->me_buffer); dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
@@ -2640,10 +2679,16 @@ dp_tx_update_peer_stats(struct dp_peer *peer,
struct dp_pdev *pdev = peer->vdev->pdev; struct dp_pdev *pdev = peer->vdev->pdev;
struct dp_soc *soc = NULL; struct dp_soc *soc = NULL;
uint8_t mcs, pkt_type; uint8_t mcs, pkt_type;
uint8_t tid = ts->tid;
struct cdp_tid_tx_stats *tid_stats;
if (!pdev) if (!pdev)
return; return;
if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
tid = CDP_MAX_DATA_TIDS - 1;
tid_stats = &pdev->stats.tid_stats.tid_tx_stats[tid];
soc = pdev->soc; soc = pdev->soc;
mcs = ts->mcs; mcs = ts->mcs;
@@ -2655,6 +2700,7 @@ dp_tx_update_peer_stats(struct dp_peer *peer,
} }
DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length); DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
tid_stats->complete_cnt++;
DP_STATS_INCC(peer, tx.dropped.age_out, 1, DP_STATS_INCC(peer, tx.dropped.age_out, 1,
(ts->status == HAL_TX_TQM_RR_REM_CMD_AGED)); (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
@@ -2677,8 +2723,12 @@ dp_tx_update_peer_stats(struct dp_peer *peer,
(ts->status == HAL_TX_TQM_RR_FW_REASON3)); (ts->status == HAL_TX_TQM_RR_FW_REASON3));
if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) { if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
tid_stats->comp_fail_cnt++;
return; return;
} }
tid_stats->success_cnt++;
DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma); DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu); DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
@@ -3077,6 +3127,7 @@ void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
struct hal_tx_completion_status ts = {0}; struct hal_tx_completion_status ts = {0};
uint32_t *htt_desc = (uint32_t *)status; uint32_t *htt_desc = (uint32_t *)status;
struct dp_peer *peer; struct dp_peer *peer;
struct cdp_tid_tx_stats *tid_stats = NULL;
qdf_assert(tx_desc->pdev); qdf_assert(tx_desc->pdev);
@@ -3091,6 +3142,7 @@ void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
case HTT_TX_FW2WBM_TX_STATUS_DROP: case HTT_TX_FW2WBM_TX_STATUS_DROP:
case HTT_TX_FW2WBM_TX_STATUS_TTL: case HTT_TX_FW2WBM_TX_STATUS_TTL:
{ {
uint8_t tid;
if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) { if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
ts.peer_id = ts.peer_id =
HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET( HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
@@ -3111,9 +3163,19 @@ void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
ts.first_msdu = 1; ts.first_msdu = 1;
ts.last_msdu = 1; ts.last_msdu = 1;
tid = ts.tid;
if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
tid = CDP_MAX_DATA_TIDS - 1;
if (tx_status != HTT_TX_FW2WBM_TX_STATUS_OK) tid_stats = &pdev->stats.tid_stats.tid_tx_stats[tid];
tid_stats->complete_cnt++;
if (qdf_unlikely(tx_status != HTT_TX_FW2WBM_TX_STATUS_OK)) {
ts.status = HAL_TX_TQM_RR_REM_CMD_REM; ts.status = HAL_TX_TQM_RR_REM_CMD_REM;
tid_stats->comp_fail_cnt++;
} else {
tid_stats->success_cnt++;
}
peer = dp_peer_find_by_id(soc, ts.peer_id); peer = dp_peer_find_by_id(soc, ts.peer_id);

View File

@@ -223,12 +223,8 @@ void dp_iterate_update_peer_list(void *pdev_hdl);
#define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
#endif #endif
#ifdef ATH_RX_PRI_SAVE
#define DP_RX_TID_SAVE(_nbuf, _tid) \ #define DP_RX_TID_SAVE(_nbuf, _tid) \
(qdf_nbuf_set_priority(_nbuf, _tid)) (qdf_nbuf_set_priority(_nbuf, _tid))
#else
#define DP_RX_TID_SAVE(_nbuf, _tid)
#endif
/* TODO TX_FEATURE_NOT_YET */ /* TODO TX_FEATURE_NOT_YET */
static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc) static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)