qcacmn: Avoid using small buffer address

The memory below 0x2000 is reserved for the target use,
so any memory in this region should not used by host.
But on some third-party platforms, we observe such
memory is allocated for RX buffer, which cause HW/FW
NOC error, then RX is stuck. To address this,
re-allocate RX buffers when small buffer appear.

Change-Id: Iad118e82f3fe10f92cbf5f7388dc0960542fc03c
CRs-Fixed: 2707190
This commit is contained in:
Lihua Liu
2020-06-10 20:45:25 +08:00
committed by snandini
parent f16765d8bd
commit 74efc61848
5 changed files with 114 additions and 73 deletions

View File

@@ -4805,6 +4805,8 @@ static void dp_soc_deinit(void *txrx_soc)
qdf_nbuf_queue_free(&soc->htt_stats.msg);
qdf_nbuf_queue_free(&soc->invalid_buf_queue);
qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
qdf_spinlock_destroy(&soc->vdev_map_lock);
@@ -12529,6 +12531,7 @@ void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
INIT_RX_HW_STATS_LOCK(soc);
qdf_nbuf_queue_init(&soc->invalid_buf_queue);
/* fill the tx/rx cpu ring map*/
dp_soc_set_txrx_ring_map(soc);

View File

@@ -251,14 +251,10 @@ dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc,
rx_desc_pool->buf_size,
true);
ret = check_x86_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
&nbuf_frag_info_t->paddr,
rx_desc_pool);
if (ret == QDF_STATUS_E_FAILURE) {
qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
(nbuf_frag_info_t->virt_addr).nbuf,
QDF_DMA_FROM_DEVICE,
rx_desc_pool->buf_size);
DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
return QDF_STATUS_E_ADDRNOTAVAIL;
}
@@ -3042,15 +3038,10 @@ dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc,
nbuf_frag_info_t->paddr =
qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
ret = check_x86_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
&nbuf_frag_info_t->paddr,
rx_desc_pool);
if (ret == QDF_STATUS_E_FAILURE) {
qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
(nbuf_frag_info_t->virt_addr).nbuf,
QDF_DMA_FROM_DEVICE,
rx_desc_pool->buf_size);
qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf);
dp_err("nbuf check x86 failed");
DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
return ret;

View File

@@ -923,44 +923,70 @@ void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
qdf_nbuf_set_next((ptail), NULL); \
} while (0)
/*for qcn9000 emulation the pcie is complete phy and no address restrictions*/
#if !defined(BUILD_X86) || defined(QCA_WIFI_QCN9000)
static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
qdf_dma_addr_t *paddr, struct rx_desc_pool *rx_desc_pool)
{
return QDF_STATUS_SUCCESS;
}
#else
#define MAX_RETRY 100
static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
qdf_dma_addr_t *paddr, struct rx_desc_pool *rx_desc_pool)
{
uint32_t nbuf_retry = 0;
int32_t ret;
const uint32_t x86_phy_addr = 0x50000000;
#if defined(QCA_PADDR_CHECK_ON_3TH_PLATFORM)
/*
* on some third-party platform, the memory below 0x2000
* is reserved for target use, so any memory allocated in this
* region should not be used by host
*/
#define MAX_RETRY 50
#define DP_PHY_ADDR_RESERVED 0x2000
#elif defined(BUILD_X86)
/*
* in M2M emulation platforms (x86) the memory below 0x50000000
* is reserved for target use, so any memory allocated in this
* region should not be used by host
*/
do {
if (qdf_likely(*paddr > x86_phy_addr))
return QDF_STATUS_SUCCESS;
else {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"phy addr %pK exceeded 0x50000000 trying again",
paddr);
#define MAX_RETRY 100
#define DP_PHY_ADDR_RESERVED 0x50000000
#endif
#if defined(QCA_PADDR_CHECK_ON_3TH_PLATFORM) || defined(BUILD_X86)
/**
* dp_check_paddr() - check if current phy address is valid or not
* @dp_soc: core txrx main context
* @rx_netbuf: skb buffer
* @paddr: physical address
* @rx_desc_pool: struct of rx descriptor pool
* check if the physical address of the nbuf->data is less
* than DP_PHY_ADDR_RESERVED then free the nbuf and try
* allocating new nbuf. We can try for 100 times.
*
* This is a temp WAR till we fix it properly.
*
* Return: success or failure.
*/
static inline
int dp_check_paddr(struct dp_soc *dp_soc,
qdf_nbuf_t *rx_netbuf,
qdf_dma_addr_t *paddr,
struct rx_desc_pool *rx_desc_pool)
{
uint32_t nbuf_retry = 0;
int32_t ret;
if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED))
return QDF_STATUS_SUCCESS;
do {
dp_debug("invalid phy addr 0x%llx, trying again",
(uint64_t)(*paddr));
nbuf_retry++;
if ((*rx_netbuf)) {
qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
QDF_DMA_FROM_DEVICE);
/* Not freeing buffer intentionally.
* Observed that same buffer is getting
* re-allocated resulting in longer load time
* WMI init timeout.
* This buffer is anyway not useful so skip it.
*.Add such buffer to invalid list and free
*.them when driver unload.
**/
qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
*rx_netbuf,
QDF_DMA_FROM_DEVICE,
rx_desc_pool->buf_size);
qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue,
*rx_netbuf);
}
*rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
@@ -972,8 +998,10 @@ static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
if (qdf_unlikely(!(*rx_netbuf)))
return QDF_STATUS_E_FAILURE;
ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf,
QDF_DMA_FROM_DEVICE);
ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
*rx_netbuf,
QDF_DMA_FROM_DEVICE,
rx_desc_pool->buf_size);
if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
qdf_nbuf_free(*rx_netbuf);
@@ -982,17 +1010,34 @@ static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
}
*paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
}
if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED))
return QDF_STATUS_SUCCESS;
} while (nbuf_retry < MAX_RETRY);
if ((*rx_netbuf)) {
qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
QDF_DMA_FROM_DEVICE);
qdf_nbuf_free(*rx_netbuf);
qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
*rx_netbuf,
QDF_DMA_FROM_DEVICE,
rx_desc_pool->buf_size);
qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue,
*rx_netbuf);
}
return QDF_STATUS_E_FAILURE;
}
#else
static inline
int dp_check_paddr(struct dp_soc *dp_soc,
qdf_nbuf_t *rx_netbuf,
qdf_dma_addr_t *paddr,
struct rx_desc_pool *rx_desc_pool)
{
return QDF_STATUS_SUCCESS;
}
#endif
/**

View File

@@ -1330,7 +1330,7 @@ static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer,
paddr = qdf_nbuf_get_frag_paddr(head, 0);
ret = check_x86_paddr(soc, &head, &paddr, rx_desc_pool);
ret = dp_check_paddr(soc, &head, &paddr, rx_desc_pool);
if (ret == QDF_STATUS_E_FAILURE) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,

View File

@@ -1723,6 +1723,8 @@ struct dp_soc {
/* Dp runtime refcount */
qdf_atomic_t dp_runtime_refcount;
#endif
/* Invalid buffer that allocated for RX buffer */
qdf_nbuf_queue_t invalid_buf_queue;
};
#ifdef IPA_OFFLOAD