qcacmn: Drop packets from RXDMA2SW ring on the non-monitoring MAC

In monitor mode, if channel is initially configured from 5G
band and then moved to channel from 2G band, packets are still
received on MAC-0 RXDMA2SW ring since IMPS is disabled for both
MACs. Since driver reaps only MAC-1 RXDMA2SW ring, ring full
condition is hit for the MAC-0 ring causing ring backpressure.

Fix is to drop packets received on the non-monitoring MAC
ring.

Change-Id: I49cb276827f4a06791183ee3fe8c88ba84df085d
CRs-Fixed: 2791341
This commit is contained in:
Yeshwanth Sriram Guntuka
2020-10-14 00:30:09 +05:30
committed by snandini
parent 1d687546c2
commit c5a9883e25
5 changed files with 315 additions and 23 deletions

View File

@@ -229,6 +229,8 @@ enum {
* mointor status and monitor destination ring
* @status_ppdu_drop: Number of ppdu dropped from monitor status ring
* @dest_ppdu_drop: Number of ppdu dropped from monitor destination ring
* @mon_link_desc_invalid: msdu link desc invalid count
* @mon_rx_desc_invalid: rx_desc invalid count
*/
struct cdp_pdev_mon_stats {
#ifndef REMOVE_MON_DBG_STATS
@@ -257,5 +259,7 @@ struct cdp_pdev_mon_stats {
uint32_t ppdu_id_match;
uint32_t status_ppdu_drop;
uint32_t dest_ppdu_drop;
uint32_t mon_link_desc_invalid;
uint32_t mon_rx_desc_invalid;
};
#endif

View File

@@ -2061,8 +2061,10 @@ static void dp_interrupt_timer(void *arg)
int budget = 0xffff, i;
uint32_t remaining_quota = budget;
uint64_t start_time;
uint32_t lmac_id;
uint8_t dp_intr_id;
uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
uint8_t dp_intr_id = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
uint32_t lmac_iter;
int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
/*
* this logic makes all data path interfacing rings (UMAC/LMAC)
@@ -2083,32 +2085,36 @@ static void dp_interrupt_timer(void *arg)
if (!qdf_atomic_read(&soc->cmn_init_done))
return;
if (pdev->mon_chan_band == REG_BAND_UNKNOWN) {
qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
return;
if (pdev->mon_chan_band != REG_BAND_UNKNOWN) {
lmac_id = pdev->ch_band_lmac_id_mapping[pdev->mon_chan_band];
if (qdf_likely(lmac_id != DP_MON_INVALID_LMAC_ID)) {
dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
dp_srng_record_timer_entry(soc, dp_intr_id);
}
}
lmac_id = pdev->ch_band_lmac_id_mapping[pdev->mon_chan_band];
if (qdf_unlikely(lmac_id == DP_MON_INVALID_LMAC_ID)) {
qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
return;
}
dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
dp_srng_record_timer_entry(soc, dp_intr_id);
start_time = qdf_get_log_timestamp();
dp_is_hw_dbs_enable(soc, &max_mac_rings);
while (yield == DP_TIMER_NO_YIELD) {
work_done = dp_mon_process(soc, &soc->intr_ctx[dp_intr_id],
lmac_id, remaining_quota);
if (work_done) {
budget -= work_done;
if (budget <= 0) {
yield = DP_TIMER_WORK_EXHAUST;
goto budget_done;
for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
if (lmac_iter == lmac_id)
work_done = dp_mon_process(soc,
&soc->intr_ctx[dp_intr_id],
lmac_iter, remaining_quota);
else
work_done = dp_mon_drop_packets_for_mac(pdev,
lmac_iter,
remaining_quota);
if (work_done) {
budget -= work_done;
if (budget <= 0) {
yield = DP_TIMER_WORK_EXHAUST;
goto budget_done;
}
remaining_quota = budget;
total_work_done += work_done;
}
remaining_quota = budget;
total_work_done += work_done;
}
yield = dp_should_timer_irq_yield(soc, total_work_done,
@@ -2123,7 +2129,8 @@ budget_done:
else
qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
dp_srng_record_timer_exit(soc, dp_intr_id);
if (lmac_id != DP_MON_INVALID_LMAC_ID)
dp_srng_record_timer_exit(soc, dp_intr_id);
}
#ifdef WLAN_FEATURE_DP_EVENT_HISTORY

View File

@@ -26,6 +26,13 @@
*/
#define MON_BUF_MIN_ENTRIES 64
/*
* The below macro defines the maximum number of ring entries that would
* be processed in a single instance when processing each of the non-monitoring
* RXDMA2SW ring.
*/
#define MON_DROP_REAP_LIMIT 64
/*
* The maximum headroom reserved for monitor destination buffer to
* accomodate radiotap header and protocol flow tag
@@ -235,6 +242,20 @@ QDF_STATUS dp_mon_link_free(struct dp_pdev *pdev);
*/
uint32_t dp_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
uint32_t mac_id, uint32_t quota);
/**
* dp_mon_drop_packets_for_mac() - Drop the mon status ring and
* dest ring packets for a given mac. Packets in status ring and
* dest ring are dropped independently.
* @pdev: DP pdev
* @mac_id: mac id
* @quota: max number of status ring entries that can be processed
*
* Return: work done
*/
uint32_t dp_mon_drop_packets_for_mac(struct dp_pdev *pdev, uint32_t mac_id,
uint32_t quota);
QDF_STATUS dp_rx_mon_deliver(struct dp_soc *soc, uint32_t mac_id,
qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu);
/*
@@ -1093,5 +1114,18 @@ dp_rx_mon_init_dbg_ppdu_stats(struct hal_rx_ppdu_info *ppdu_info,
{
}
#endif
#if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
/**
* dp_mon_dest_srng_drop_for_mac() - Drop the mon dest ring packets for
* a given mac
* @pdev: DP pdev
* @mac_id: mac id
*
* Return: None
*/
uint32_t
dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id);
#endif
#endif

View File

@@ -2045,3 +2045,129 @@ dp_rx_pdev_mon_buffers_alloc(struct dp_pdev *pdev)
return status;
}
#if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
uint32_t
dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id)
{
struct dp_soc *soc = pdev->soc;
hal_rxdma_desc_t rxdma_dst_ring_desc;
hal_soc_handle_t hal_soc;
void *mon_dst_srng;
union dp_rx_desc_list_elem_t *head = NULL;
union dp_rx_desc_list_elem_t *tail = NULL;
uint32_t rx_bufs_used = 0;
void *rx_msdu_link_desc;
uint32_t msdu_count = 0;
uint16 num_msdus;
struct hal_buf_info buf_info;
struct hal_rx_msdu_list msdu_list;
qdf_nbuf_t nbuf;
uint32_t i;
uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
struct rx_desc_pool *rx_desc_pool;
uint32_t reap_cnt = 0;
if (qdf_unlikely(!soc || !soc->hal_soc))
return reap_cnt;
mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_id);
if (qdf_unlikely(!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)))
return reap_cnt;
hal_soc = soc->hal_soc;
qdf_spin_lock_bh(&pdev->mon_lock);
if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_dst_srng))) {
qdf_spin_unlock_bh(&pdev->mon_lock);
return reap_cnt;
}
rx_desc_pool = dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id);
while ((rxdma_dst_ring_desc =
hal_srng_dst_peek(hal_soc, mon_dst_srng)) &&
reap_cnt < MON_DROP_REAP_LIMIT) {
msdu_count = 0;
do {
hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc,
&buf_info, &msdu_count);
rx_msdu_link_desc = dp_rx_cookie_2_mon_link_desc(pdev,
buf_info, mac_id);
if (qdf_unlikely(!rx_msdu_link_desc)) {
pdev->rx_mon_stats.mon_link_desc_invalid++;
goto next_entry;
}
hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
&msdu_list, &num_msdus);
for (i = 0; i < num_msdus; i++) {
struct dp_rx_desc *rx_desc;
rx_desc = dp_rx_get_mon_desc(soc,
msdu_list.sw_cookie[i]);
if (qdf_unlikely(!rx_desc)) {
pdev->rx_mon_stats.
mon_rx_desc_invalid++;
continue;
}
nbuf = DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc);
rx_bufs_used++;
if (!rx_desc->unmapped) {
dp_rx_mon_buffer_unmap(soc, rx_desc,
rx_desc_pool->buf_size);
rx_desc->unmapped = 1;
}
qdf_nbuf_free(nbuf);
dp_rx_add_to_free_desc_list(&head, &tail,
rx_desc);
}
/*
* Store the current link buffer into to the local
* structure to be used for release purpose.
*/
hal_rxdma_buff_addr_info_set(rx_link_buf_info,
buf_info.paddr,
buf_info.sw_cookie,
buf_info.rbm);
hal_rx_mon_next_link_desc_get(rx_msdu_link_desc,
&buf_info);
if (dp_rx_monitor_link_desc_return(pdev,
(hal_buff_addrinfo_t)
rx_link_buf_info,
mac_id, bm_action) !=
QDF_STATUS_SUCCESS)
dp_info_rl("monitor link desc return failed");
} while (buf_info.paddr && msdu_count);
next_entry:
reap_cnt++;
rxdma_dst_ring_desc = hal_srng_dst_get_next(hal_soc,
mon_dst_srng);
}
hal_srng_access_end(hal_soc, mon_dst_srng);
qdf_spin_unlock_bh(&pdev->mon_lock);
if (rx_bufs_used) {
dp_rx_buffers_replenish(soc, mac_id,
dp_rxdma_get_mon_buf_ring(pdev, mac_id),
rx_desc_pool,
rx_bufs_used, &head, &tail);
}
return reap_cnt;
}
#endif

View File

@@ -2416,3 +2416,124 @@ QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
return QDF_STATUS_SUCCESS;
}
#if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
/**
* dp_mon_status_srng_drop_for_mac() - Drop the mon status ring packets for
* a given mac
* @pdev: DP pdev
* @mac_id: mac id
* @quota: maximum number of ring entries that can be processed
*
* Return: Number of ring entries reaped
*/
static uint32_t
dp_mon_status_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id,
uint32_t quota)
{
struct dp_soc *soc = pdev->soc;
void *mon_status_srng;
hal_soc_handle_t hal_soc;
void *ring_desc;
uint32_t reap_cnt = 0;
if (qdf_unlikely(!soc || !soc->hal_soc))
return reap_cnt;
mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng;
if (qdf_unlikely(!mon_status_srng ||
!hal_srng_initialized(mon_status_srng)))
return reap_cnt;
hal_soc = soc->hal_soc;
if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng)))
return reap_cnt;
while ((ring_desc =
hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng)) &&
reap_cnt < MON_DROP_REAP_LIMIT && quota--) {
uint64_t buf_addr;
buf_addr = (HAL_RX_BUFFER_ADDR_31_0_GET(ring_desc) |
((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(ring_desc)) << 32));
if (qdf_unlikely(!buf_addr)) {
struct rx_desc_pool *rx_desc_pool;
qdf_dma_addr_t iova;
qdf_nbuf_t status_nbuf;
struct dp_rx_desc *rx_desc;
union dp_rx_desc_list_elem_t *rx_desc_elem;
rx_desc_pool = dp_rx_get_mon_desc_pool(soc, mac_id,
pdev->pdev_id);
qdf_spin_lock_bh(&rx_desc_pool->lock);
if (!rx_desc_pool->freelist) {
qdf_spin_unlock_bh(&rx_desc_pool->lock);
break;
}
rx_desc_elem = rx_desc_pool->freelist;
rx_desc_pool->freelist = rx_desc_pool->freelist->next;
qdf_spin_unlock_bh(&rx_desc_pool->lock);
rx_desc = &rx_desc_elem->rx_desc;
status_nbuf = dp_rx_nbuf_prepare(soc, pdev);
if (qdf_unlikely(!status_nbuf)) {
union dp_rx_desc_list_elem_t *desc_list = NULL;
union dp_rx_desc_list_elem_t *tail = NULL;
dp_info_rl("fail to allocate or map nbuf");
dp_rx_add_to_free_desc_list(&desc_list, &tail,
rx_desc);
dp_rx_add_desc_list_to_free_list(soc,
&desc_list,
&tail, mac_id,
rx_desc_pool);
hal_rxdma_buff_addr_info_set(ring_desc, 0, 0,
HAL_RX_BUF_RBM_SW3_BM);
break;
}
iova = qdf_nbuf_get_frag_paddr(status_nbuf, 0);
rx_desc->nbuf = status_nbuf;
rx_desc->in_use = 1;
hal_rxdma_buff_addr_info_set(ring_desc, iova,
rx_desc->cookie,
HAL_RX_BUF_RBM_SW3_BM);
}
reap_cnt++;
hal_srng_src_get_next(hal_soc, mon_status_srng);
}
hal_srng_access_end(hal_soc, mon_status_srng);
return reap_cnt;
}
uint32_t dp_mon_drop_packets_for_mac(struct dp_pdev *pdev, uint32_t mac_id,
uint32_t quota)
{
uint32_t work_done;
work_done = dp_mon_status_srng_drop_for_mac(pdev, mac_id, quota);
dp_mon_dest_srng_drop_for_mac(pdev, mac_id);
return work_done;
}
#else
uint32_t dp_mon_drop_packets_for_mac(struct dp_pdev *pdev, uint32_t mac_id,
uint32_t quota)
{
return 0;
}
#endif