qcacld-3.0: Fix rx_starvation on multiple-rx case

In case of parallel rx execution, fill_count was being updated
by multiple threads and this was causing fill_count to be incorrect
in parallel rx execution cases. This was causing rx ring to be under
filled and in time was causing rx starvation (data stall).

Fix this by:
- making sure that we always fill the right number of buffers based
  on the msdu_count in the in_order_indication message.
- making sure that only one execution thread fills the ring
- making sure that the lock holder (filling the ring) does not
  run too long by introducing a max fill limit, in which case the
  filler will give up and the contender (the other rx execution
  thread) takes the lock and fills the ring itself.

CRs-Fixed: 1103851

Change-Id: I59ee2903d51877afc112a1a76b7a5529f1c6a5c0
这个提交包含在:
Orhan K AKYILDIZ
2016-12-19 20:51:47 -08:00
提交者 snandini
父节点 ba81c51032
当前提交 adf0610a93
修改 4 个文件,包含 76 行新增5 行删除

查看文件

@@ -408,13 +408,17 @@ static void htt_rx_ring_refill_retry(void *arg)
} }
#endif #endif
static void htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num) /* full_reorder_offload case: this function is called with lock held */
static int htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
{ {
int idx; int idx;
QDF_STATUS status; QDF_STATUS status;
struct htt_host_rx_desc_base *rx_desc; struct htt_host_rx_desc_base *rx_desc;
int filled = 0;
idx = *(pdev->rx_ring.alloc_idx.vaddr); idx = *(pdev->rx_ring.alloc_idx.vaddr);
moretofill:
while (num > 0) { while (num > 0) {
qdf_dma_addr_t paddr; qdf_dma_addr_t paddr;
qdf_nbuf_t rx_netbuf; qdf_nbuf_t rx_netbuf;
@@ -504,14 +508,21 @@ static void htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
num--; num--;
idx++; idx++;
filled++;
idx &= pdev->rx_ring.size_mask; idx &= pdev->rx_ring.size_mask;
} }
if (qdf_atomic_read(&pdev->rx_ring.refill_debt) > 0) {
num = qdf_atomic_read(&pdev->rx_ring.refill_debt);
/* Ideally the following gives 0, but sub is safer */
qdf_atomic_sub(num, &pdev->rx_ring.refill_debt);
goto moretofill;
}
fail: fail:
*(pdev->rx_ring.alloc_idx.vaddr) = idx; *(pdev->rx_ring.alloc_idx.vaddr) = idx;
htt_rx_dbg_rxbuf_indupd(pdev, idx); htt_rx_dbg_rxbuf_indupd(pdev, idx);
return; return filled;
} }
static inline unsigned htt_rx_ring_elems(struct htt_pdev_t *pdev) static inline unsigned htt_rx_ring_elems(struct htt_pdev_t *pdev)
@@ -583,6 +594,9 @@ void htt_rx_detach(struct htt_pdev_t *pdev)
pdev->rx_ring.base_paddr, pdev->rx_ring.base_paddr,
qdf_get_dma_mem_context((&pdev->rx_ring.buf), qdf_get_dma_mem_context((&pdev->rx_ring.buf),
memctx)); memctx));
/* destroy the rx-parallelization refill spinlock */
qdf_spinlock_destroy(&(pdev->rx_ring.refill_lock));
} }
#endif #endif
@@ -2836,6 +2850,31 @@ void htt_rx_msdu_buff_replenish(htt_pdev_handle pdev)
qdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt); qdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt);
} }
#define RX_RING_REFILL_DEBT_MAX 128
int htt_rx_msdu_buff_in_order_replenish(htt_pdev_handle pdev, uint32_t num)
{
int filled = 0;
if (!qdf_spin_trylock_bh(&(pdev->rx_ring.refill_lock))) {
if (qdf_atomic_read(&pdev->rx_ring.refill_debt)
< RX_RING_REFILL_DEBT_MAX) {
qdf_atomic_add(num, &pdev->rx_ring.refill_debt);
return filled; /* 0 */
}
/*
* else:
* If we have quite a debt, then it is better for the lock
* holder to finish its work and then acquire the lock and
* fill our own part.
*/
qdf_spin_lock_bh(&(pdev->rx_ring.refill_lock));
}
filled = htt_rx_ring_fill_n(pdev, num);
qdf_spin_unlock_bh(&(pdev->rx_ring.refill_lock));
return filled;
}
#define AR600P_ASSEMBLE_HW_RATECODE(_rate, _nss, _pream) \ #define AR600P_ASSEMBLE_HW_RATECODE(_rate, _nss, _pream) \
(((_pream) << 6) | ((_nss) << 4) | (_rate)) (((_pream) << 6) | ((_nss) << 4) | (_rate))
@@ -3227,6 +3266,11 @@ int htt_rx_attach(struct htt_pdev_t *pdev)
qdf_atomic_init(&pdev->rx_ring.refill_ref_cnt); qdf_atomic_init(&pdev->rx_ring.refill_ref_cnt);
qdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt); qdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt);
/* Initialize the refill_lock and debt (for rx-parallelization) */
qdf_spinlock_create(&(pdev->rx_ring.refill_lock));
qdf_atomic_init(&pdev->rx_ring.refill_debt);
/* Initialize the Rx refill retry timer */ /* Initialize the Rx refill retry timer */
qdf_timer_init(pdev->osdev, qdf_timer_init(pdev->osdev,
&pdev->rx_ring.refill_retry_timer, &pdev->rx_ring.refill_retry_timer,

查看文件

@@ -352,7 +352,9 @@ struct htt_pdev_t {
* variable is used to guarantee that only one thread tries * variable is used to guarantee that only one thread tries
* to replenish Rx ring. * to replenish Rx ring.
*/ */
qdf_atomic_t refill_ref_cnt; qdf_atomic_t refill_ref_cnt;
qdf_spinlock_t refill_lock;
qdf_atomic_t refill_debt;
#ifdef DEBUG_DMA_DONE #ifdef DEBUG_DMA_DONE
uint32_t dbg_initial_msdu_payld; uint32_t dbg_initial_msdu_payld;
uint32_t dbg_mpdu_range; uint32_t dbg_mpdu_range;

查看文件

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2011-2016 The Linux Foundation. All rights reserved. * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
* *
* Previously licensed under the ISC license by Qualcomm Atheros, Inc. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
* *
@@ -787,6 +787,23 @@ void htt_rx_msdu_desc_free(htt_pdev_handle htt_pdev, qdf_nbuf_t msdu);
*/ */
void htt_rx_msdu_buff_replenish(htt_pdev_handle pdev); void htt_rx_msdu_buff_replenish(htt_pdev_handle pdev);
/**
* @brief Add new MSDU buffers for the target to fill.
* @details
* This is full_reorder_offload version of the replenish function.
* In full_reorder, FW sends HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND
* msg to host. It includes the number of MSDUs. Thgis will be fed
* into htt_rx_msdu_buff_in_order_replenish function.
* The reason for creating yet another function is to avoid checks
* in real-time.
*
* @param pdev - the HTT instance the rx data will be received on
* @num - number of buffers to replenish
*
* Return: number of buffers actually replenished
*/
int htt_rx_msdu_buff_in_order_replenish(htt_pdev_handle pdev, uint32_t num);
/** /**
* @brief Links list of MSDUs into an single MPDU. Updates RX stats * @brief Links list of MSDUs into an single MPDU. Updates RX stats
* @details * @details

查看文件

@@ -1343,6 +1343,9 @@ ol_rx_in_order_indication_handler(ol_txrx_pdev_handle pdev,
htt_pdev_handle htt_pdev = NULL; htt_pdev_handle htt_pdev = NULL;
int status; int status;
qdf_nbuf_t head_msdu, tail_msdu = NULL; qdf_nbuf_t head_msdu, tail_msdu = NULL;
uint8_t *rx_ind_data;
uint32_t *msg_word;
uint32_t msdu_count;
#ifdef WDI_EVENT_ENABLE #ifdef WDI_EVENT_ENABLE
uint8_t pktlog_bit; uint8_t pktlog_bit;
#endif #endif
@@ -1368,6 +1371,11 @@ ol_rx_in_order_indication_handler(ol_txrx_pdev_handle pdev,
pktlog_bit = (htt_rx_amsdu_rx_in_order_get_pktlog(rx_ind_msg) == 0x01); pktlog_bit = (htt_rx_amsdu_rx_in_order_get_pktlog(rx_ind_msg) == 0x01);
#endif #endif
rx_ind_data = qdf_nbuf_data(rx_ind_msg);
msg_word = (uint32_t *)rx_ind_data;
/* Get the total number of MSDUs */
msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1));
/* /*
* Get a linked list of the MSDUs in the rx in order indication. * Get a linked list of the MSDUs in the rx in order indication.
* This also attaches each rx MSDU descriptor to the * This also attaches each rx MSDU descriptor to the
@@ -1382,7 +1390,7 @@ ol_rx_in_order_indication_handler(ol_txrx_pdev_handle pdev,
/* Replenish the rx buffer ring first to provide buffers to the target /* Replenish the rx buffer ring first to provide buffers to the target
rather than waiting for the indeterminate time taken by the OS rather than waiting for the indeterminate time taken by the OS
to consume the rx frames */ to consume the rx frames */
htt_rx_msdu_buff_replenish(htt_pdev); htt_rx_msdu_buff_in_order_replenish(htt_pdev, msdu_count);
/* Send the chain of MSDUs to the OS */ /* Send the chain of MSDUs to the OS */
/* rx_opt_proc takes a NULL-terminated list of msdu netbufs */ /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */