
The issue sceanrio is as follows: 1) Packets are received in the rx exception ring and the rx_descs processed are put into the pdev rx_desc freelist. 2) In the buffers replenish path, the above pdev rx_desc freelist, on nbuf allocation or map error, is moved to the soc rx_desc_pool freelist. The tail of the pdev rx_desc freelist is set to NULL but not the head. 3) On receiving packets in the rx exception ring post above, the new rx_desc are added to the pdev rx_desc freelist but since the head of the list was not NULL prior to this, the tail of the new list will have its next pointing to the previous stale rx_desc list which is already moved to the soc rx_desc_pool list. 4) In the replenish path, the buffers would be replenished till tail of the new list but the desc_list would point to the same aforementioned stale rx_desc list instead of NULL. So even though replenish is successful, the desc_list check to add the list back to soc rx_desc_pool freelist would be true. This would cause next pointer of the tail and in effect the nbuf pointer of the rx_desc pointed by tail to get updated. 5) On receiving the rx_desc mentioned in the previous step, nbuf sanity check fails since nbuf address is pointing to another rx_desc. Fix is to set the local_desc_list also to NULL in dp_rx_add_desc_list_to_free_list. Change-Id: I984a4c122592547492b9d9625a71c0a90142b442 CRs-Fixed: 2704771
453 lines
13 KiB
C
453 lines
13 KiB
C
/*
|
|
* Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
|
|
*
|
|
* Permission to use, copy, modify, and/or distribute this software for
|
|
* any purpose with or without fee is hereby granted, provided that the
|
|
* above copyright notice and this permission notice appear in all
|
|
* copies.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
|
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
|
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
|
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
|
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
|
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
|
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
|
* PERFORMANCE OF THIS SOFTWARE.
|
|
*/
|
|
|
|
#include "dp_types.h"
|
|
#include "dp_rx.h"
|
|
#include "dp_ipa.h"
|
|
|
|
#ifdef RX_DESC_MULTI_PAGE_ALLOC
|
|
A_COMPILE_TIME_ASSERT(cookie_size_check,
|
|
PAGE_SIZE / sizeof(union dp_rx_desc_list_elem_t) <=
|
|
1 << DP_RX_DESC_PAGE_ID_SHIFT);
|
|
|
|
/*
|
|
* dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
|
|
* rx descriptor pool
|
|
*
|
|
* @rx_desc_pool: rx descriptor pool pointer
|
|
* Return: QDF_STATUS QDF_STATUS_SUCCESS
|
|
* QDF_STATUS_E_NOMEM
|
|
*/
|
|
QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
|
|
{
|
|
if (!rx_desc_pool->desc_pages.num_pages) {
|
|
dp_err("Multi page alloc fail, size=%d, elem=%d",
|
|
rx_desc_pool->elem_size, rx_desc_pool->pool_size);
|
|
return QDF_STATUS_E_NOMEM;
|
|
}
|
|
return QDF_STATUS_SUCCESS;
|
|
}
|
|
|
|
/*
|
|
* dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
|
|
* descriptors
|
|
*
|
|
* @soc: core txrx main context
|
|
* @num_elem: number of rx descriptors (size of the pool)
|
|
* @rx_desc_pool: rx descriptor pool pointer
|
|
*
|
|
* Return: QDF_STATUS QDF_STATUS_SUCCESS
|
|
* QDF_STATUS_E_NOMEM
|
|
* QDF_STATUS_E_FAULT
|
|
*/
|
|
QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
|
|
uint32_t num_elem,
|
|
struct rx_desc_pool *rx_desc_pool)
|
|
{
|
|
uint32_t desc_size;
|
|
union dp_rx_desc_list_elem_t *rx_desc_elem;
|
|
|
|
desc_size = sizeof(*rx_desc_elem);
|
|
rx_desc_pool->elem_size = desc_size;
|
|
|
|
qdf_mem_multi_pages_alloc(soc->osdev, &rx_desc_pool->desc_pages,
|
|
desc_size, num_elem, 0, true);
|
|
if (!rx_desc_pool->desc_pages.num_pages) {
|
|
qdf_err("Multi page alloc fail,size=%d, elem=%d",
|
|
desc_size, num_elem);
|
|
return QDF_STATUS_E_NOMEM;
|
|
}
|
|
|
|
if (qdf_mem_multi_page_link(soc->osdev,
|
|
&rx_desc_pool->desc_pages,
|
|
desc_size, num_elem, true)) {
|
|
qdf_err("overflow num link,size=%d, elem=%d",
|
|
desc_size, num_elem);
|
|
goto free_rx_desc_pool;
|
|
}
|
|
return QDF_STATUS_SUCCESS;
|
|
|
|
free_rx_desc_pool:
|
|
dp_rx_desc_pool_free(soc, rx_desc_pool);
|
|
|
|
return QDF_STATUS_E_FAULT;
|
|
}
|
|
|
|
/*
|
|
* dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
|
|
* convert the pool of memory into a list of
|
|
* rx descriptors and create locks to access this
|
|
* list of rx descriptors.
|
|
*
|
|
* @soc: core txrx main context
|
|
* @pool_id: pool_id which is one of 3 mac_ids
|
|
* @pool_size: size of the rx descriptor pool
|
|
* @rx_desc_pool: rx descriptor pool pointer
|
|
*/
|
|
void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
|
|
uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
|
|
{
|
|
uint32_t id, page_id, offset, num_desc_per_page;
|
|
uint32_t count = 0;
|
|
union dp_rx_desc_list_elem_t *rx_desc_elem;
|
|
|
|
/* Initialize the lock */
|
|
qdf_spinlock_create(&rx_desc_pool->lock);
|
|
|
|
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
|
rx_desc_pool->pool_size = pool_size;
|
|
|
|
num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
|
|
rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
|
|
*rx_desc_pool->desc_pages.cacheable_pages;
|
|
|
|
rx_desc_elem = rx_desc_pool->freelist;
|
|
while (rx_desc_elem) {
|
|
page_id = count / num_desc_per_page;
|
|
offset = count % num_desc_per_page;
|
|
/*
|
|
* Below cookie size is from REO destination ring
|
|
* reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
|
|
* cookie size = 21 bits
|
|
* 8 bits - offset
|
|
* 8 bits - page ID
|
|
* 4 bits - pool ID
|
|
*/
|
|
id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
|
|
(page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
|
|
offset);
|
|
rx_desc_elem->rx_desc.cookie = id;
|
|
rx_desc_elem->rx_desc.pool_id = pool_id;
|
|
rx_desc_elem->rx_desc.in_use = 0;
|
|
rx_desc_elem = rx_desc_elem->next;
|
|
count++;
|
|
}
|
|
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
|
}
|
|
|
|
union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
|
|
struct rx_desc_pool *rx_desc_pool)
|
|
{
|
|
return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
|
|
rx_desc_pool->elem_size * offset;
|
|
}
|
|
|
|
static QDF_STATUS __dp_rx_desc_nbuf_free(struct dp_soc *soc,
|
|
struct rx_desc_pool *rx_desc_pool)
|
|
{
|
|
uint32_t i, num_desc, page_id, offset, num_desc_per_page;
|
|
union dp_rx_desc_list_elem_t *rx_desc_elem;
|
|
struct dp_rx_desc *rx_desc;
|
|
qdf_nbuf_t nbuf;
|
|
|
|
if (qdf_unlikely(!(rx_desc_pool->
|
|
desc_pages.cacheable_pages))) {
|
|
qdf_err("No pages found on this desc pool");
|
|
return QDF_STATUS_E_INVAL;
|
|
}
|
|
num_desc = rx_desc_pool->pool_size;
|
|
num_desc_per_page =
|
|
rx_desc_pool->desc_pages.num_element_per_page;
|
|
for (i = 0; i < num_desc; i++) {
|
|
page_id = i / num_desc_per_page;
|
|
offset = i % num_desc_per_page;
|
|
rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
|
|
rx_desc = &rx_desc_elem->rx_desc;
|
|
dp_rx_desc_free_dbg_info(rx_desc);
|
|
if (rx_desc->in_use) {
|
|
nbuf = rx_desc->nbuf;
|
|
if (!rx_desc->unmapped) {
|
|
dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
|
|
false);
|
|
qdf_nbuf_unmap_single(soc->osdev, nbuf,
|
|
QDF_DMA_BIDIRECTIONAL);
|
|
}
|
|
qdf_nbuf_free(nbuf);
|
|
}
|
|
}
|
|
|
|
return QDF_STATUS_SUCCESS;
|
|
}
|
|
|
|
void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
|
|
struct rx_desc_pool *rx_desc_pool)
|
|
{
|
|
QDF_STATUS qdf_status;
|
|
|
|
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
|
qdf_status = __dp_rx_desc_nbuf_free(soc, rx_desc_pool);
|
|
if (QDF_IS_STATUS_SUCCESS(qdf_status))
|
|
dp_rx_desc_pool_free(soc, rx_desc_pool);
|
|
|
|
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
|
|
|
qdf_spinlock_destroy(&rx_desc_pool->lock);
|
|
}
|
|
|
|
void dp_rx_desc_nbuf_free(struct dp_soc *soc,
|
|
struct rx_desc_pool *rx_desc_pool)
|
|
{
|
|
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
|
__dp_rx_desc_nbuf_free(soc, rx_desc_pool);
|
|
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
|
}
|
|
|
|
void dp_rx_desc_pool_free(struct dp_soc *soc,
|
|
struct rx_desc_pool *rx_desc_pool)
|
|
{
|
|
if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
|
|
return;
|
|
qdf_mem_multi_pages_free(soc->osdev,
|
|
&rx_desc_pool->desc_pages, 0, true);
|
|
}
|
|
|
|
void dp_rx_desc_pool_deinit(struct dp_soc *soc,
|
|
struct rx_desc_pool *rx_desc_pool)
|
|
{
|
|
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
|
|
|
rx_desc_pool->freelist = NULL;
|
|
rx_desc_pool->pool_size = 0;
|
|
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
|
qdf_spinlock_destroy(&rx_desc_pool->lock);
|
|
}
|
|
#else
|
|
/*
|
|
* dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
|
|
* rx descriptor pool
|
|
*
|
|
* @rx_desc_pool: rx descriptor pool pointer
|
|
*
|
|
* Return: QDF_STATUS QDF_STATUS_SUCCESS
|
|
* QDF_STATUS_E_NOMEM
|
|
*/
|
|
QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
|
|
{
|
|
if (!rx_desc_pool->array) {
|
|
dp_err("nss-wifi<4> skip Rx refil");
|
|
return QDF_STATUS_E_NOMEM;
|
|
}
|
|
return QDF_STATUS_SUCCESS;
|
|
}
|
|
|
|
/*
|
|
* dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
|
|
* descriptors
|
|
*
|
|
* @soc: core txrx main context
|
|
* @num_elem: number of rx descriptors (size of the pool)
|
|
* @rx_desc_pool: rx descriptor pool pointer
|
|
*
|
|
* Return: QDF_STATUS QDF_STATUS_SUCCESS
|
|
* QDF_STATUS_E_NOMEM
|
|
* QDF_STATUS_E_FAULT
|
|
*/
|
|
QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
|
|
uint32_t pool_size,
|
|
struct rx_desc_pool *rx_desc_pool)
|
|
{
|
|
rx_desc_pool->array = qdf_mem_malloc(pool_size *
|
|
sizeof(union dp_rx_desc_list_elem_t));
|
|
|
|
if (!(rx_desc_pool->array)) {
|
|
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
|
"RX Desc Pool allocation failed");
|
|
return QDF_STATUS_E_NOMEM;
|
|
}
|
|
return QDF_STATUS_SUCCESS;
|
|
}
|
|
|
|
/*
|
|
* dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
|
|
* convert the pool of memory into a list of
|
|
* rx descriptors and create locks to access this
|
|
* list of rx descriptors.
|
|
*
|
|
* @soc: core txrx main context
|
|
* @pool_id: pool_id which is one of 3 mac_ids
|
|
* @pool_size: size of the rx descriptor pool
|
|
* @rx_desc_pool: rx descriptor pool pointer
|
|
*/
|
|
void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
|
|
uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
|
|
{
|
|
int i;
|
|
/* Initialize the lock */
|
|
qdf_spinlock_create(&rx_desc_pool->lock);
|
|
|
|
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
|
rx_desc_pool->pool_size = pool_size;
|
|
|
|
/* link SW rx descs into a freelist */
|
|
rx_desc_pool->freelist = &rx_desc_pool->array[0];
|
|
qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
|
|
for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
|
|
if (i == rx_desc_pool->pool_size - 1)
|
|
rx_desc_pool->array[i].next = NULL;
|
|
else
|
|
rx_desc_pool->array[i].next =
|
|
&rx_desc_pool->array[i + 1];
|
|
rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
|
|
rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
|
|
rx_desc_pool->array[i].rx_desc.in_use = 0;
|
|
}
|
|
|
|
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
|
}
|
|
|
|
void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
|
|
struct rx_desc_pool *rx_desc_pool)
|
|
{
|
|
qdf_nbuf_t nbuf;
|
|
int i;
|
|
|
|
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
|
for (i = 0; i < rx_desc_pool->pool_size; i++) {
|
|
if (rx_desc_pool->array[i].rx_desc.in_use) {
|
|
nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
|
|
|
|
if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
|
|
dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
|
|
false);
|
|
|
|
qdf_nbuf_unmap_single(soc->osdev, nbuf,
|
|
QDF_DMA_FROM_DEVICE);
|
|
}
|
|
qdf_nbuf_free(nbuf);
|
|
}
|
|
}
|
|
qdf_mem_free(rx_desc_pool->array);
|
|
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
|
qdf_spinlock_destroy(&rx_desc_pool->lock);
|
|
}
|
|
|
|
void dp_rx_desc_nbuf_free(struct dp_soc *soc,
|
|
struct rx_desc_pool *rx_desc_pool)
|
|
{
|
|
qdf_nbuf_t nbuf;
|
|
int i;
|
|
|
|
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
|
for (i = 0; i < rx_desc_pool->pool_size; i++) {
|
|
if (rx_desc_pool->array[i].rx_desc.in_use) {
|
|
nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
|
|
|
|
if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
|
|
dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
|
|
false);
|
|
|
|
qdf_nbuf_unmap_single(soc->osdev, nbuf,
|
|
QDF_DMA_FROM_DEVICE);
|
|
}
|
|
qdf_nbuf_free(nbuf);
|
|
}
|
|
}
|
|
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
|
}
|
|
|
|
void dp_rx_desc_pool_free(struct dp_soc *soc,
|
|
struct rx_desc_pool *rx_desc_pool)
|
|
{
|
|
qdf_mem_free(rx_desc_pool->array);
|
|
}
|
|
|
|
void dp_rx_desc_pool_deinit(struct dp_soc *soc,
|
|
struct rx_desc_pool *rx_desc_pool)
|
|
{
|
|
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
|
|
|
rx_desc_pool->freelist = NULL;
|
|
rx_desc_pool->pool_size = 0;
|
|
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
|
qdf_spinlock_destroy(&rx_desc_pool->lock);
|
|
}
|
|
|
|
#endif /* RX_DESC_MULTI_PAGE_ALLOC */
|
|
|
|
/*
|
|
* dp_rx_get_free_desc_list() - provide a list of descriptors from
|
|
* the free rx desc pool.
|
|
*
|
|
* @soc: core txrx main context
|
|
* @pool_id: pool_id which is one of 3 mac_ids
|
|
* @rx_desc_pool: rx descriptor pool pointer
|
|
* @num_descs: number of descs requested from freelist
|
|
* @desc_list: attach the descs to this list (output parameter)
|
|
* @tail: attach the point to last desc of free list (output parameter)
|
|
*
|
|
* Return: number of descs allocated from free list.
|
|
*/
|
|
uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
|
|
struct rx_desc_pool *rx_desc_pool,
|
|
uint16_t num_descs,
|
|
union dp_rx_desc_list_elem_t **desc_list,
|
|
union dp_rx_desc_list_elem_t **tail)
|
|
{
|
|
uint16_t count;
|
|
|
|
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
|
|
|
*desc_list = *tail = rx_desc_pool->freelist;
|
|
|
|
for (count = 0; count < num_descs; count++) {
|
|
|
|
if (qdf_unlikely(!rx_desc_pool->freelist)) {
|
|
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
|
return count;
|
|
}
|
|
*tail = rx_desc_pool->freelist;
|
|
rx_desc_pool->freelist = rx_desc_pool->freelist->next;
|
|
}
|
|
(*tail)->next = NULL;
|
|
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
|
return count;
|
|
}
|
|
|
|
/*
|
|
* dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
|
|
* freelist.
|
|
*
|
|
* @soc: core txrx main context
|
|
* @local_desc_list: local desc list provided by the caller
|
|
* @tail: attach the point to last desc of local desc list
|
|
* @pool_id: pool_id which is one of 3 mac_ids
|
|
* @rx_desc_pool: rx descriptor pool pointer
|
|
*/
|
|
void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
|
|
union dp_rx_desc_list_elem_t **local_desc_list,
|
|
union dp_rx_desc_list_elem_t **tail,
|
|
uint16_t pool_id,
|
|
struct rx_desc_pool *rx_desc_pool)
|
|
{
|
|
union dp_rx_desc_list_elem_t *temp_list = NULL;
|
|
|
|
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
|
|
|
|
|
temp_list = rx_desc_pool->freelist;
|
|
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
|
"temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
|
|
temp_list, *local_desc_list, *tail, (*tail)->next);
|
|
rx_desc_pool->freelist = *local_desc_list;
|
|
(*tail)->next = temp_list;
|
|
*tail = NULL;
|
|
*local_desc_list = NULL;
|
|
|
|
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
|
}
|