qcacld-3.0: Enhance htt_rx_ring_fill_n processing

Enhance htt_rx_ring_fill_n by:
- Start timer in case the function is not able to find memory to fill rx
  buffers.
- Handle case where one stream can starve the other stream by constantly
  adding to the debt.

CRs-Fixed: 2001325
Change-Id: Idff33d9c28daaf7a00334134459a3633f5ccde7c
This commit is contained in:
Mohit Khanna
2017-01-31 21:07:12 -08:00
committed by snandini
parent 4900e1d357
commit c68622e61b
4 changed files with 116 additions and 76 deletions

View File

@@ -663,7 +663,7 @@ void htt_rx_dbg_rxbuf_init(struct htt_pdev_t *pdev)
pdev->rx_buff_recvd_err = 0;
pdev->refill_retry_timer_starts = 0;
pdev->refill_retry_timer_calls = 0;
pdev->refill_retry_timer_doubles = 0;
}
}
@@ -687,16 +687,21 @@ static inline int htt_display_rx_buf_debug(struct htt_pdev_t *pdev)
buf[i].posted,
buf[i].recved);
}
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"rxbuf_idx %d all_posted: %d all_recvd: %d recv_err: %d timer_starts :%d timer_calls :%d",
"rxbuf_idx %d all_posted: %d all_recvd: %d recv_err: %d",
pdev->rx_buff_index,
pdev->rx_buff_posted_cum,
pdev->rx_buff_recvd_cum,
pdev->rx_buff_recvd_err,
pdev->rx_buff_recvd_err);
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"timer kicks :%d actual :%d restarts:%d debtors: %d fill_n: %d",
pdev->refill_retry_timer_starts,
pdev->refill_retry_timer_calls);
pdev->refill_retry_timer_calls,
pdev->refill_retry_timer_doubles,
pdev->rx_buff_debt_invoked,
pdev->rx_buff_fill_n_invoked);
} else
return -EINVAL;
return 0;

View File

@@ -413,73 +413,6 @@ htt_rx_in_ord_paddr_get(uint32_t *u32p)
#endif /* HELIUMPLUS_PADDR64 */
#endif /* CONFIG_HL_SUPPORT*/
#ifndef CONFIG_HL_SUPPORT
static int htt_rx_ring_size(struct htt_pdev_t *pdev)
{
int size;
/*
* It is expected that the host CPU will typically be able to service
* the rx indication from one A-MPDU before the rx indication from
* the subsequent A-MPDU happens, roughly 1-2 ms later.
* However, the rx ring should be sized very conservatively, to
* accomodate the worst reasonable delay before the host CPU services
* a rx indication interrupt.
* The rx ring need not be kept full of empty buffers. In theory,
* the htt host SW can dynamically track the low-water mark in the
* rx ring, and dynamically adjust the level to which the rx ring
* is filled with empty buffers, to dynamically meet the desired
* low-water mark.
* In contrast, it's difficult to resize the rx ring itself, once
* it's in use.
* Thus, the ring itself should be sized very conservatively, while
* the degree to which the ring is filled with empty buffers should
* be sized moderately conservatively.
*/
size =
ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
(8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
if (size < HTT_RX_RING_SIZE_MIN)
size = HTT_RX_RING_SIZE_MIN;
else if (size > HTT_RX_RING_SIZE_MAX)
size = HTT_RX_RING_SIZE_MAX;
size = qdf_get_pwr2(size);
return size;
}
static int htt_rx_ring_fill_level(struct htt_pdev_t *pdev)
{
int size;
size = ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
(8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
size = qdf_get_pwr2(size);
/*
* Make sure the fill level is at least 1 less than the ring size.
* Leaving 1 element empty allows the SW to easily distinguish
* between a full ring vs. an empty ring.
*/
if (size >= pdev->rx_ring.size)
size = pdev->rx_ring.size - 1;
return size;
}
static void htt_rx_ring_refill_retry(void *arg)
{
htt_pdev_handle pdev = (htt_pdev_handle) arg;
pdev->refill_retry_timer_calls++;
htt_rx_msdu_buff_replenish(pdev);
}
#endif
/* full_reorder_offload case: this function is called with lock held */
static int htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
{
@@ -487,6 +420,7 @@ static int htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
QDF_STATUS status;
struct htt_host_rx_desc_base *rx_desc;
int filled = 0;
int debt_served = 0;
idx = *(pdev->rx_ring.alloc_idx.vaddr);
@@ -582,10 +516,9 @@ moretofill:
filled++;
idx &= pdev->rx_ring.size_mask;
}
if (qdf_atomic_read(&pdev->rx_ring.refill_debt) > 0) {
if (debt_served < qdf_atomic_read(&pdev->rx_ring.refill_debt)) {
num = qdf_atomic_read(&pdev->rx_ring.refill_debt);
/* Ideally the following gives 0, but sub is safer */
qdf_atomic_sub(num, &pdev->rx_ring.refill_debt);
debt_served += num;
goto moretofill;
}
@@ -596,6 +529,94 @@ fail:
return filled;
}
#ifndef CONFIG_HL_SUPPORT
static int htt_rx_ring_size(struct htt_pdev_t *pdev)
{
int size;
/*
* It is expected that the host CPU will typically be able to service
* the rx indication from one A-MPDU before the rx indication from
* the subsequent A-MPDU happens, roughly 1-2 ms later.
* However, the rx ring should be sized very conservatively, to
* accommodate the worst reasonable delay before the host CPU services
* a rx indication interrupt.
* The rx ring need not be kept full of empty buffers. In theory,
* the htt host SW can dynamically track the low-water mark in the
* rx ring, and dynamically adjust the level to which the rx ring
* is filled with empty buffers, to dynamically meet the desired
* low-water mark.
* In contrast, it's difficult to resize the rx ring itself, once
* it's in use.
* Thus, the ring itself should be sized very conservatively, while
* the degree to which the ring is filled with empty buffers should
* be sized moderately conservatively.
*/
size =
ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
(8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
if (size < HTT_RX_RING_SIZE_MIN)
size = HTT_RX_RING_SIZE_MIN;
else if (size > HTT_RX_RING_SIZE_MAX)
size = HTT_RX_RING_SIZE_MAX;
size = qdf_get_pwr2(size);
return size;
}
static int htt_rx_ring_fill_level(struct htt_pdev_t *pdev)
{
int size;
size = ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
(8 * HTT_RX_AVG_FRM_BYTES) *
HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
size = qdf_get_pwr2(size);
/*
* Make sure the fill level is at least 1 less than the ring size.
* Leaving 1 element empty allows the SW to easily distinguish
* between a full ring vs. an empty ring.
*/
if (size >= pdev->rx_ring.size)
size = pdev->rx_ring.size - 1;
return size;
}
static void htt_rx_ring_refill_retry(void *arg)
{
htt_pdev_handle pdev = (htt_pdev_handle) arg;
int filled = 0;
int num;
pdev->refill_retry_timer_calls++;
qdf_spin_lock_bh(&(pdev->rx_ring.refill_lock));
num = qdf_atomic_read(&pdev->rx_ring.refill_debt);
qdf_atomic_sub(num, &pdev->rx_ring.refill_debt);
filled = htt_rx_ring_fill_n(pdev, num);
qdf_spin_unlock_bh(&(pdev->rx_ring.refill_lock));
if (filled > num) {
/* we served ourselves and some other debt */
/* sub is safer than = 0 */
qdf_atomic_sub(filled - num, &pdev->rx_ring.refill_debt);
} else if (num == filled) { /* nothing to be done */
} else {
/* we could not fill all, timer must have been started */
pdev->refill_retry_timer_doubles++;
}
}
#endif
static inline unsigned htt_rx_ring_elems(struct htt_pdev_t *pdev)
{
return
@@ -2928,6 +2949,7 @@ int htt_rx_msdu_buff_in_order_replenish(htt_pdev_handle pdev, uint32_t num)
if (qdf_atomic_read(&pdev->rx_ring.refill_debt)
< RX_RING_REFILL_DEBT_MAX) {
qdf_atomic_add(num, &pdev->rx_ring.refill_debt);
pdev->rx_buff_debt_invoked++;
return filled; /* 0 */
}
/*
@@ -2938,9 +2960,17 @@ int htt_rx_msdu_buff_in_order_replenish(htt_pdev_handle pdev, uint32_t num)
*/
qdf_spin_lock_bh(&(pdev->rx_ring.refill_lock));
}
pdev->rx_buff_fill_n_invoked++;
filled = htt_rx_ring_fill_n(pdev, num);
qdf_spin_unlock_bh(&(pdev->rx_ring.refill_lock));
if (filled > num) {
/* we served ourselves and some other debt */
/* sub is safer than = 0 */
qdf_atomic_sub(filled - num, &pdev->rx_ring.refill_debt);
}
return filled;
}

View File

@@ -423,8 +423,11 @@ struct htt_pdev_t {
int rx_buff_posted_cum;
int rx_buff_recvd_cum;
int rx_buff_recvd_err;
int rx_buff_debt_invoked;
int rx_buff_fill_n_invoked;
int refill_retry_timer_starts;
int refill_retry_timer_calls;
int refill_retry_timer_doubles;
#endif
/* callback function for packetdump */

View File

@@ -6247,6 +6247,8 @@ void wlan_hdd_display_tx_rx_histogram(hdd_context_t *hdd_ctx)
hdd_ctx->hdd_txrx_hist[i].
next_tx_level));
}
return;
}
/**