qca-wifi: Drop ppdu descriptor in pending ppdu queue and other fix

a. ppdu desc in pending ppdu queue is dropped on queue length exceeds
threshold which is 32.
b. configuring ring map for soc only once.

Change-Id: I7398a478ce9e4d974d9ecc2a06d30821f151a1b5
This commit is contained in:
nobelj
2020-03-10 18:12:50 -07:00
parent 587c236f05
commit 9bd45e8e5b
2 changed files with 54 additions and 3 deletions

View File

@@ -221,6 +221,8 @@ void dp_print_pdev_tx_capture_stats(struct dp_pdev *pdev)
ptr_tx_cap = &(pdev->tx_capture);
DP_PRINT_STATS("tx capture stats:");
DP_PRINT_STATS(" pending ppdu dropped: %u",
ptr_tx_cap->pend_ppdu_dropped);
DP_PRINT_STATS(" mgmt control enqueue stats:");
for (i = 0; i < TXCAP_MAX_TYPE; i++) {
for (j = 0; j < TXCAP_MAX_SUBTYPE; j++) {
@@ -657,6 +659,7 @@ void dp_tx_ppdu_stats_attach(struct dp_pdev *pdev)
STAILQ_INIT(&pdev->tx_capture.ppdu_stats_queue);
STAILQ_INIT(&pdev->tx_capture.ppdu_stats_defer_queue);
qdf_spinlock_create(&pdev->tx_capture.ppdu_stats_lock);
qdf_spinlock_create(&pdev->tx_capture.config_lock);
pdev->tx_capture.ppdu_stats_queue_depth = 0;
pdev->tx_capture.ppdu_stats_next_sched = 0;
pdev->tx_capture.ppdu_stats_defer_queue_depth = 0;
@@ -698,6 +701,7 @@ void dp_tx_ppdu_stats_detach(struct dp_pdev *pdev)
qdf_flush_workqueue(0, pdev->tx_capture.ppdu_stats_workqueue);
qdf_destroy_workqueue(0, pdev->tx_capture.ppdu_stats_workqueue);
qdf_spinlock_destroy(&pdev->tx_capture.config_lock);
qdf_spinlock_destroy(&pdev->tx_capture.ppdu_stats_lock);
STAILQ_FOREACH_SAFE(ppdu_info,
@@ -1087,6 +1091,33 @@ static void dp_iterate_free_peer_msdu_q(void *pdev_hdl)
qdf_spin_unlock_bh(&soc->peer_ref_mutex);
}
/*
* dp_soc_check_enh_tx_capture() - API to get tx capture set in any pdev
* @soc_handle: DP_SOC handle
*
* return: true
*/
uint8_t
dp_soc_is_tx_capture_set_in_pdev(struct dp_soc *soc)
{
struct dp_pdev *pdev;
uint8_t pdev_tx_capture = 0;
uint8_t i;
for (i = 0; i < MAX_PDEV_CNT; i++) {
pdev = soc->pdev_list[i];
if (!pdev) {
continue;
}
if (!pdev->tx_capture_enabled)
continue;
pdev_tx_capture++;
}
return pdev_tx_capture;
}
/*
* dp_enh_tx_capture_disable()- API to disable enhanced tx capture
* @pdev_handle: DP_PDEV handle
@@ -1098,10 +1129,13 @@ dp_enh_tx_capture_disable(struct dp_pdev *pdev)
{
int i, j;
dp_soc_set_txrx_ring_map(pdev->soc);
if (!dp_soc_is_tx_capture_set_in_pdev(pdev->soc))
dp_soc_set_txrx_ring_map(pdev->soc);
dp_h2t_cfg_stats_msg_send(pdev,
DP_PPDU_STATS_CFG_ENH_STATS,
pdev->pdev_id);
dp_iterate_free_peer_msdu_q(pdev);
for (i = 0; i < TXCAP_MAX_TYPE; i++) {
for (j = 0; j < TXCAP_MAX_SUBTYPE; j++) {
@@ -1160,7 +1194,9 @@ dp_config_enh_tx_capture(struct dp_pdev *pdev, uint8_t val)
if (pdev->tx_capture_enabled == CDP_TX_ENH_CAPTURE_ENABLE_ALL_PEERS ||
pdev->tx_capture_enabled == CDP_TX_ENH_CAPTURE_ENDIS_PER_PEER) {
dp_soc_set_txrx_ring_map_single(pdev->soc);
if (dp_soc_is_tx_capture_set_in_pdev(pdev->soc) == 1)
dp_soc_set_txrx_ring_map_single(pdev->soc);
if (!pdev->pktlog_ppdu_stats)
dp_h2t_cfg_stats_msg_send(pdev,
DP_PPDU_STATS_CFG_SNIFFER,
@@ -3204,6 +3240,7 @@ dp_check_ppdu_and_deliver(struct dp_pdev *pdev,
uint32_t ppdu_id;
uint32_t desc_cnt;
qdf_nbuf_t tmp_nbuf;
struct cdp_tx_completion_ppdu *tmp_ppdu_desc;
struct dp_tx_tid *tx_tid = NULL;
int i;
@@ -3441,8 +3478,21 @@ dp_check_ppdu_and_deliver(struct dp_pdev *pdev,
QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
QDF_TRACE_LEVEL_FATAL,
"pending ppdus (%d, %d) : %d\n",
cur_ppdu_desc->user[0].peer_id,
peer_id,
tx_tid->tid, pending_ppdus);
tmp_nbuf =
qdf_nbuf_queue_remove(&tx_tid->pending_ppdu_q);
if (qdf_unlikely(!tmp_nbuf)) {
qdf_assert_always(0);
return;
}
tmp_ppdu_desc = (struct cdp_tx_completion_ppdu *)
qdf_nbuf_data(tmp_nbuf);
dp_send_data_to_stack(pdev, tmp_ppdu_desc);
dp_ppdu_queue_free(tmp_ppdu_desc);
qdf_nbuf_free(tmp_nbuf);
pdev->tx_capture.pend_ppdu_dropped++;
}
}
}