qcacmn: Add support for HW cookie conversion

Support HW cookie conversion for BE platform.

Change-Id: I39058fbf256266557f5e734ba376db4db0731b24
CRs-Fixed: 2929533
This commit is contained in:
Jinwei Chen
2021-02-22 03:22:07 -08:00
committed by Madan Koyyalamudi
parent 1bb3155d2c
commit 4083155141
30 changed files with 1618 additions and 124 deletions

View File

@@ -21,6 +21,7 @@
#include "dp_be.h"
#include "dp_be_tx.h"
#include "dp_be_rx.h"
#include <hal_be_api.h>
qdf_size_t dp_get_context_size_be(enum dp_context_type context_type)
{
@@ -38,6 +39,263 @@ qdf_size_t dp_get_context_size_be(enum dp_context_type context_type)
}
}
static QDF_STATUS dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc)
{
struct dp_soc *soc = &be_soc->soc;
struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
uint32_t max_tx_rx_desc_num, num_spt_pages, i = 0;
struct dp_spt_page_desc *page_desc_elem;
struct qdf_mem_dma_page_t *dma_page;
/* get CMEM for cookie conversion */
if (soc->cmem_size < DP_CC_PPT_MEM_SIZE) {
dp_err("cmem_size %llu bytes < 4K", soc->cmem_size);
return QDF_STATUS_E_RESOURCES;
}
cc_ctx->cmem_base = (uint32_t)(soc->cmem_base +
DP_CC_MEM_OFFSET_IN_CMEM);
/* estimate how many SPT DDR pages needed */
max_tx_rx_desc_num = WLAN_CFG_NUM_TX_DESC_MAX * MAX_TXDESC_POOLS +
WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX * MAX_RXDESC_POOLS;
num_spt_pages = max_tx_rx_desc_num / DP_CC_SPT_PAGE_MAX_ENTRIES;
num_spt_pages = num_spt_pages <= DP_CC_PPT_MAX_ENTRIES ?
num_spt_pages : DP_CC_PPT_MAX_ENTRIES;
dp_info("num_spt_pages needed %d", num_spt_pages);
dp_desc_multi_pages_mem_alloc(soc, DP_HW_CC_SPT_PAGE_TYPE,
&cc_ctx->page_pool, qdf_page_size,
num_spt_pages, 0, false);
if (!cc_ctx->page_pool.dma_pages) {
dp_err("spt ddr pages allocation failed");
return QDF_STATUS_E_RESOURCES;
}
cc_ctx->page_desc_base = qdf_mem_malloc(
num_spt_pages * sizeof(struct dp_spt_page_desc));
if (!cc_ctx->page_desc_base) {
dp_err("spt page descs allocation failed");
goto fail_0;
}
/* initial page desc */
page_desc_elem = cc_ctx->page_desc_base;
dma_page = cc_ctx->page_pool.dma_pages;
while (i < num_spt_pages) {
/* check if page address 4K aligned */
if (qdf_unlikely(dma_page[i].page_p_addr & 0xFFF)) {
dp_err("non-4k aligned pages addr %pK",
(void *)dma_page[i].page_p_addr);
goto fail_1;
}
page_desc_elem[i].page_v_addr =
dma_page[i].page_v_addr_start;
page_desc_elem[i].page_p_addr =
dma_page[i].page_p_addr;
i++;
}
cc_ctx->total_page_num = num_spt_pages;
qdf_spinlock_create(&cc_ctx->cc_lock);
return QDF_STATUS_SUCCESS;
fail_1:
qdf_mem_free(cc_ctx->page_desc_base);
fail_0:
dp_desc_multi_pages_mem_free(soc, DP_HW_CC_SPT_PAGE_TYPE,
&cc_ctx->page_pool, 0, false);
return QDF_STATUS_E_FAILURE;
}
static QDF_STATUS dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc)
{
struct dp_soc *soc = &be_soc->soc;
struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
qdf_mem_free(cc_ctx->page_desc_base);
dp_desc_multi_pages_mem_free(soc, DP_HW_CC_SPT_PAGE_TYPE,
&cc_ctx->page_pool, 0, false);
qdf_spinlock_destroy(&cc_ctx->cc_lock);
return QDF_STATUS_SUCCESS;
}
#ifdef DP_FEATURE_HW_COOKIE_CONVERSION
/**
* dp_cc_reg_cfg_init() - initialize and configure HW cookie
conversion register
* @soc: SOC handle
* @cc_ctx: cookie conversion context pointer
* @is_4k_align: page address 4k alignd
*
* Return: None
*/
static void dp_cc_reg_cfg_init(struct dp_soc *soc,
struct dp_hw_cookie_conversion_t *cc_ctx,
bool is_4k_align)
{
struct hal_hw_cc_config cc_cfg = { 0 };
cc_cfg.lut_base_addr_31_0 = cc_ctx->cmem_base;
cc_cfg.cc_global_en = soc->wlan_cfg_ctx->hw_cc_enabled;
cc_cfg.page_4k_align = is_4k_align;
cc_cfg.cookie_offset_msb = DP_CC_DESC_ID_SPT_VA_OS_MSB;
cc_cfg.cookie_page_msb = is_4k_align ?
DP_CC_DESC_ID_PPT_PAGE_OS_4K_ALIGNED_MSB :
DP_CC_DESC_ID_PPT_PAGE_OS_4K_UNALIGNED_MSB;
/* 36th bit should be 1 then HW know this is CMEM address */
cc_cfg.lut_base_addr_39_32 = 0x10;
cc_cfg.wbm2sw6_cc_en = 1;
cc_cfg.wbm2sw5_cc_en = 1;
cc_cfg.wbm2sw4_cc_en = 1;
cc_cfg.wbm2sw3_cc_en = 1;
cc_cfg.wbm2sw2_cc_en = 1;
cc_cfg.wbm2sw1_cc_en = 1;
cc_cfg.wbm2sw0_cc_en = 1;
cc_cfg.wbm2fw_cc_en = 0;
hal_cookie_conversion_reg_cfg_be(soc->hal_soc, &cc_cfg);
}
/**
* dp_hw_cc_cmem_write() - DP wrapper function for CMEM buffer writing
* @hal_soc_hdl: HAL SOC handle
* @offset: CMEM address
* @value: value to write
*
* Return: None.
*/
static inline void dp_hw_cc_cmem_write(hal_soc_handle_t hal_soc_hdl,
uint32_t offset,
uint32_t value)
{
hal_cmem_write(hal_soc_hdl, offset, value);
}
#else
static inline void dp_cc_reg_cfg_init(struct dp_soc *soc,
struct dp_hw_cookie_conversion_t *cc_ctx,
bool is_4k_align) {}
static inline void dp_hw_cc_cmem_write(hal_soc_handle_t hal_soc_hdl,
uint32_t offset,
uint32_t value)
{ }
#endif
static QDF_STATUS dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc)
{
struct dp_soc *soc = &be_soc->soc;
struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
uint32_t i = 0;
struct dp_spt_page_desc *page_desc_elem;
if (!cc_ctx->total_page_num) {
dp_err("total page num is 0");
return QDF_STATUS_E_INVAL;
}
page_desc_elem = cc_ctx->page_desc_base;
while (i < cc_ctx->total_page_num) {
/* write page PA to CMEM */
dp_hw_cc_cmem_write(soc->hal_soc,
(cc_ctx->cmem_base +
i * DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED),
(page_desc_elem[i].page_p_addr >>
DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED));
page_desc_elem[i].ppt_index = i;
page_desc_elem[i].avail_entry_index = 0;
/* link page desc */
if ((i + 1) != cc_ctx->total_page_num)
page_desc_elem[i].next = &page_desc_elem[i + 1];
else
page_desc_elem[i].next = NULL;
i++;
}
cc_ctx->page_desc_freelist = cc_ctx->page_desc_base;
cc_ctx->free_page_num = cc_ctx->total_page_num;
/* write WBM/REO cookie conversion CFG register */
dp_cc_reg_cfg_init(soc, cc_ctx, true);
return QDF_STATUS_SUCCESS;
}
static QDF_STATUS dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc)
{
struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
cc_ctx->page_desc_freelist = NULL;
cc_ctx->free_page_num = 0;
return QDF_STATUS_SUCCESS;
}
uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
struct dp_spt_page_desc **list_head,
struct dp_spt_page_desc **list_tail,
uint16_t desc_num)
{
uint16_t num_pages, count;
struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
num_pages = (desc_num / DP_CC_SPT_PAGE_MAX_ENTRIES) +
(desc_num % DP_CC_SPT_PAGE_MAX_ENTRIES ? 1 : 0);
if (num_pages > cc_ctx->free_page_num) {
dp_err("fail: num_pages required %d > free_page_num %d",
num_pages,
cc_ctx->free_page_num);
return 0;
}
qdf_spin_lock_bh(&cc_ctx->cc_lock);
*list_head = *list_tail = cc_ctx->page_desc_freelist;
for (count = 0; count < num_pages; count++) {
if (qdf_unlikely(!cc_ctx->page_desc_freelist)) {
cc_ctx->page_desc_freelist = *list_head;
*list_head = *list_tail = NULL;
qdf_spin_unlock_bh(&cc_ctx->cc_lock);
return 0;
}
*list_tail = cc_ctx->page_desc_freelist;
cc_ctx->page_desc_freelist = cc_ctx->page_desc_freelist->next;
}
(*list_tail)->next = NULL;
cc_ctx->free_page_num -= count;
qdf_spin_unlock_bh(&cc_ctx->cc_lock);
return count;
}
void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
struct dp_spt_page_desc **list_head,
struct dp_spt_page_desc **list_tail,
uint16_t page_nums)
{
struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
struct dp_spt_page_desc *temp_list = NULL;
qdf_spin_lock_bh(&cc_ctx->cc_lock);
temp_list = cc_ctx->page_desc_freelist;
cc_ctx->page_desc_freelist = *list_head;
(*list_tail)->next = temp_list;
cc_ctx->free_page_num += page_nums;
*list_tail = NULL;
*list_head = NULL;
qdf_spin_unlock_bh(&cc_ctx->cc_lock);
}
static QDF_STATUS dp_soc_attach_be(struct dp_soc *soc)
{
struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
@@ -46,6 +304,9 @@ static QDF_STATUS dp_soc_attach_be(struct dp_soc *soc)
soc->wbm_sw0_bm_id = hal_tx_get_wbm_sw0_bm_id();
qdf_status = dp_tx_init_bank_profiles(be_soc);
/* cookie conversion */
qdf_status = dp_hw_cookie_conversion_attach(be_soc);
return qdf_status;
}
@@ -55,6 +316,27 @@ static QDF_STATUS dp_soc_detach_be(struct dp_soc *soc)
dp_tx_deinit_bank_profiles(be_soc);
dp_hw_cookie_conversion_detach(be_soc);
return QDF_STATUS_SUCCESS;
}
static QDF_STATUS dp_soc_init_be(struct dp_soc *soc)
{
struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
qdf_status = dp_hw_cookie_conversion_init(be_soc);
return qdf_status;
}
static QDF_STATUS dp_soc_deinit_be(struct dp_soc *soc)
{
struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
dp_hw_cookie_conversion_deinit(be_soc);
return QDF_STATUS_SUCCESS;
}
@@ -217,10 +499,21 @@ void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops)
arch_ops->dp_rx_process = dp_rx_process_be;
arch_ops->tx_comp_get_params_from_hal_desc =
dp_tx_comp_get_params_from_hal_desc_be;
arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_be;
arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_be;
arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_be;
arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_be;
arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
dp_wbm_get_rx_desc_from_hal_desc_be;
#endif
arch_ops->txrx_get_context_size = dp_get_context_size_be;
arch_ops->dp_rx_desc_cookie_2_va =
dp_rx_desc_cookie_2_va_be;
arch_ops->txrx_soc_attach = dp_soc_attach_be;
arch_ops->txrx_soc_detach = dp_soc_detach_be;
arch_ops->txrx_soc_init = dp_soc_init_be;
arch_ops->txrx_soc_deinit = dp_soc_deinit_be;
arch_ops->txrx_pdev_attach = dp_pdev_attach_be;
arch_ops->txrx_pdev_detach = dp_pdev_detach_be;
arch_ops->txrx_vdev_attach = dp_vdev_attach_be;

View File

@@ -21,6 +21,112 @@
#include <dp_types.h>
#include <hal_be_tx.h>
/* maximum number of entries in one page of secondary page table */
#define DP_CC_SPT_PAGE_MAX_ENTRIES 512
/* maximum number of entries in primary page table */
#define DP_CC_PPT_MAX_ENTRIES 1024
/* cookie conversion required CMEM offset from CMEM pool */
#define DP_CC_MEM_OFFSET_IN_CMEM 0
/* cookie conversion primary page table size 4K */
#define DP_CC_PPT_MEM_SIZE 4096
/* FST required CMEM offset from CMEM pool */
#define DP_FST_MEM_OFFSET_IN_CMEM \
(DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
/* lower 9 bits in Desc ID for offset in page of SPT */
#define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
#define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
#define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
#define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
/* for 4k unaligned case */
#define DP_CC_DESC_ID_PPT_PAGE_OS_4K_UNALIGNED_SHIFT 9
#define DP_CC_DESC_ID_PPT_PAGE_OS_4K_UNALIGNED_MASK 0xFFE00
#define DP_CC_DESC_ID_PPT_PAGE_OS_4K_UNALIGNED_LSB 9
#define DP_CC_DESC_ID_PPT_PAGE_OS_4K_UNALIGNED_MSB 19
#define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
/* for 4k aligned case */
#define DP_CC_DESC_ID_PPT_PAGE_OS_4K_ALIGNED_SHIFT 10
#define DP_CC_DESC_ID_PPT_PAGE_OS_4K_ALIGNED_MASK 0xFFC00
#define DP_CC_DESC_ID_PPT_PAGE_OS_4K_ALIGNED_LSB 10
#define DP_CC_DESC_ID_PPT_PAGE_OS_4K_ALIGNED_MSB 19
#define DP_CC_DESC_ID_PPT_PAGE_HIGH_32BIT_4K_ALIGNED_SHIFT 9
#define DP_CC_DESC_ID_PPT_PAGE_HIGH_32BIT_4K_ALIGNED_MASK 0x200
#define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
/* 4K aligned case, number of bits HW append for one PPT entry value */
#define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
/**
* struct dp_spt_page_desc - secondary page table page descriptors
* @next: pointer to next linked SPT page Desc
* @page_v_addr: page virtual address
* @page_p_addr: page physical address
* @ppt_index: entry index in primary page table where this page physical
address stored
* @avail_entry_index: index for available entry that store TX/RX Desc VA
*/
struct dp_spt_page_desc {
struct dp_spt_page_desc *next;
uint8_t *page_v_addr;
qdf_dma_addr_t page_p_addr;
uint16_t ppt_index;
uint16_t avail_entry_index;
};
/**
* struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
* @cmem_base: CMEM base address for primary page table setup
* @total_page_num: total DDR page allocated
* @free_page_num: available DDR page number for TX/RX Desc ID initialization
* @page_desc_freelist: available page Desc list
* @page_desc_base: page Desc buffer base address.
* @page_pool: DDR pages pool
* @cc_lock: locks for page acquiring/free
*/
struct dp_hw_cookie_conversion_t {
uint32_t cmem_base;
uint32_t total_page_num;
uint32_t free_page_num;
struct dp_spt_page_desc *page_desc_freelist;
struct dp_spt_page_desc *page_desc_base;
struct qdf_mem_multi_page_t page_pool;
qdf_spinlock_t cc_lock;
};
/**
* struct dp_spt_page_desc_list - containor of SPT page desc list info
* @spt_page_list_head: head of SPT page descriptor list
* @spt_page_list_tail: tail of SPT page descriptor list
* @num_spt_pages: number of SPT page descriptor allocated
*/
struct dp_spt_page_desc_list {
struct dp_spt_page_desc *spt_page_list_head;
struct dp_spt_page_desc *spt_page_list_tail;
uint16_t num_spt_pages;
};
#define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
{ ((uint64_t *)(_page_base_va))[_index] = (uint64_t)(_desc_va); }
/**
* struct dp_tx_bank_profile - DP wrapper for TCL banks
* @is_configured: flag indicating if this bank is configured
@@ -38,13 +144,18 @@ struct dp_tx_bank_profile {
* @soc: dp soc structure
* @num_bank_profiles: num TX bank profiles
* @bank_profiles: bank profiles for various TX banks
* @hw_cc_ctx: core context of HW cookie conversion
* @tx_spt_page_desc: spt page desc allocated for TX desc pool
* @rx_spt_page_desc: spt page desc allocated for RX desc pool
*/
struct dp_soc_be {
struct dp_soc soc;
uint8_t num_bank_profiles;
qdf_mutex_t tx_bank_lock;
struct dp_tx_bank_profile *bank_profiles;
struct dp_hw_cookie_conversion_t hw_cc_ctx;
struct dp_spt_page_desc_list tx_spt_page_desc[MAX_TXDESC_POOLS];
struct dp_spt_page_desc_list rx_spt_page_desc[MAX_RXDESC_POOLS];
};
/**
@@ -144,4 +255,117 @@ struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
{
return (struct dp_peer_be *)peer;
}
/**
* dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
* @be_soc: beryllium soc handler
* @list_head: pointer to page desc head
* @list_tail: pointer to page desc tail
* @desc_num: number of TX/RX Descs required for SPT pages
*
* Return: number of SPT page Desc allocated
*/
uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
struct dp_spt_page_desc **list_head,
struct dp_spt_page_desc **list_tail,
uint16_t desc_num);
/**
* dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
* @be_soc: beryllium soc handler
* @list_head: pointer to page desc head
* @list_tail: pointer to page desc tail
* @page_nums: number of page desc freed back to pool
*/
void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
struct dp_spt_page_desc **list_head,
struct dp_spt_page_desc **list_tail,
uint16_t page_nums);
/**
* dp_cc_desc_id_generate() - generate SW cookie ID according to
DDR page 4K aligned or not
* @ppt_index: offset index in primary page table
* @spt_index: offset index in sceondary DDR page
* @page_4k_align: DDR page address 4K aligned or not
*
* for 4K aligned DDR page, ppt_index offset is using 4 bytes entry,
* while HW use 8 bytes offset index, need 10th bit to indicate it's
* in high 32bits or low 32bits.
* for 4k un-aligned DDR page, ppt_index offset is using 8bytes entry,
* it's match with HW assuming.
*
* Return: cookie ID
*/
static inline uint32_t dp_cc_desc_id_generate(uint16_t ppt_index,
uint16_t spt_index,
bool page_4k_align)
{
uint32_t id = 0;
if (qdf_likely(page_4k_align))
id =
((ppt_index / 2) <<
DP_CC_DESC_ID_PPT_PAGE_OS_4K_ALIGNED_SHIFT) |
((ppt_index % 2) <<
DP_CC_DESC_ID_PPT_PAGE_HIGH_32BIT_4K_ALIGNED_SHIFT) |
spt_index;
else
id =
(ppt_index <<
DP_CC_DESC_ID_PPT_PAGE_OS_4K_UNALIGNED_SHIFT) |
spt_index;
return id;
}
/**
* dp_cc_desc_va_find() - find TX/RX Descs virtual address by ID
* @be_soc: be soc handle
* @desc_id: TX/RX Dess ID
* @page_4k_align: DDR page address 4K aligned or not
*
* Return: TX/RX Desc virtual address
*/
static inline void *dp_cc_desc_find(struct dp_soc *soc,
uint32_t desc_id,
bool page_4k_align)
{
struct dp_soc_be *be_soc;
struct dp_hw_cookie_conversion_t *cc_ctx;
uint16_t ppt_page_id, spt_va_id;
uint64_t *spt_page_va;
be_soc = dp_get_be_soc_from_dp_soc(soc);
cc_ctx = &be_soc->hw_cc_ctx;
if (qdf_likely(page_4k_align))
ppt_page_id =
(((desc_id &
DP_CC_DESC_ID_PPT_PAGE_OS_4K_ALIGNED_MASK) >>
DP_CC_DESC_ID_PPT_PAGE_OS_4K_ALIGNED_SHIFT) * 2) +
((desc_id &
DP_CC_DESC_ID_PPT_PAGE_HIGH_32BIT_4K_ALIGNED_MASK) >>
DP_CC_DESC_ID_PPT_PAGE_HIGH_32BIT_4K_ALIGNED_SHIFT);
else
ppt_page_id =
(desc_id &
DP_CC_DESC_ID_PPT_PAGE_OS_4K_UNALIGNED_MASK) >>
DP_CC_DESC_ID_PPT_PAGE_OS_4K_UNALIGNED_SHIFT;
spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
/*
* ppt index in cmem is same order where the page in the
* page desc array during initialization.
* entry size in DDR page is 64 bits, then
* (1) 64 bits OS, (uint64_t *) --> (void *) conversion, no issue.
* (2) 32 bits OS, TX/RX Desc VA size is 32bits, (uint64_t *) -->
* (void *) conversion, lower 32 bits from uint64_t is saved, no issue
* as higer 32 bits is 0.
*/
spt_page_va =
(uint64_t *)cc_ctx->page_desc_base[ppt_page_id].page_v_addr;
return (void *)(*(spt_page_va + spt_va_id));
}
#endif

View File

@@ -168,7 +168,10 @@ more_data:
break;
}
rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
rx_desc = (struct dp_rx_desc *)
hal_rx_get_reo_desc_va(ring_desc);
dp_rx_desc_sw_cc_check(soc, rx_buf_cookie, &rx_desc);
status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl,
ring_desc, rx_desc);
if (QDF_IS_STATUS_ERROR(status)) {
@@ -753,3 +756,227 @@ done:
return rx_bufs_used; /* Assume no scale factor for now */
}
#ifdef RX_DESC_MULTI_PAGE_ALLOC
/**
* dp_rx_desc_pool_init_be_cc() - initial RX desc pool for cookie conversion
* @soc: Handle to DP Soc structure
* @rx_desc_pool: Rx descriptor pool handler
* @pool_id: Rx descriptor pool ID
*
* Return: QDF_STATUS_SUCCESS - succeeded, others - failed
*/
static QDF_STATUS
dp_rx_desc_pool_init_be_cc(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id)
{
struct dp_soc_be *be_soc;
union dp_rx_desc_list_elem_t *rx_desc_elem;
struct dp_spt_page_desc *page_desc;
struct dp_spt_page_desc_list *page_desc_list;
be_soc = dp_get_be_soc_from_dp_soc(soc);
page_desc_list = &be_soc->rx_spt_page_desc[pool_id];
/* allocate SPT pages from page desc pool */
page_desc_list->num_spt_pages =
dp_cc_spt_page_desc_alloc(be_soc,
&page_desc_list->spt_page_list_head,
&page_desc_list->spt_page_list_tail,
rx_desc_pool->pool_size);
if (!page_desc_list->num_spt_pages) {
dp_err("fail to allocate cookie conversion spt pages");
return QDF_STATUS_E_FAILURE;
}
/* put each RX Desc VA to SPT pages and get corresponding ID */
page_desc = page_desc_list->spt_page_list_head;
rx_desc_elem = rx_desc_pool->freelist;
while (rx_desc_elem) {
DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
page_desc->avail_entry_index,
&rx_desc_elem->rx_desc);
rx_desc_elem->rx_desc.cookie =
dp_cc_desc_id_generate(page_desc->ppt_index,
page_desc->avail_entry_index,
true);
rx_desc_elem->rx_desc.pool_id = pool_id;
rx_desc_elem->rx_desc.in_use = 0;
rx_desc_elem = rx_desc_elem->next;
page_desc->avail_entry_index++;
if (page_desc->avail_entry_index >=
DP_CC_SPT_PAGE_MAX_ENTRIES)
page_desc = page_desc->next;
}
return QDF_STATUS_SUCCESS;
}
#else
static QDF_STATUS
dp_rx_desc_pool_init_be_cc(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id)
{
struct dp_soc_be *be_soc;
struct dp_spt_page_desc *page_desc;
struct dp_spt_page_desc_list *page_desc_list;
int i;
be_soc = dp_get_be_soc_from_dp_soc(soc);
page_desc_list = &be_soc->rx_spt_page_desc[pool_id];
/* allocate SPT pages from page desc pool */
page_desc_list->num_spt_pages =
dp_cc_spt_page_desc_alloc(
be_soc,
&page_desc_list->spt_page_list_head,
&page_desc_list->spt_page_list_tail,
rx_desc_pool->pool_size);
if (!page_desc_list->num_spt_pages) {
dp_err("fail to allocate cookie conversion spt pages");
return QDF_STATUS_E_FAILURE;
}
/* put each RX Desc VA to SPT pages and get corresponding ID */
page_desc = page_desc_list->spt_page_list_head;
for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
if (i == rx_desc_pool->pool_size - 1)
rx_desc_pool->array[i].next = NULL;
else
rx_desc_pool->array[i].next =
&rx_desc_pool->array[i + 1];
DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
page_desc->avail_entry_index,
&rx_desc_pool->array[i].rx_desc);
rx_desc_pool->array[i].rx_desc.cookie =
dp_cc_desc_id_generate(page_desc->ppt_index,
page_desc->avail_entry_index,
true);
rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
rx_desc_pool->array[i].rx_desc.in_use = 0;
page_desc->avail_entry_index++;
if (page_desc->avail_entry_index >=
DP_CC_SPT_PAGE_MAX_ENTRIES)
page_desc = page_desc->next;
}
return QDF_STATUS_SUCCESS;
}
#endif
static void
dp_rx_desc_pool_deinit_be_cc(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id)
{
struct dp_soc_be *be_soc;
struct dp_spt_page_desc *page_desc;
struct dp_spt_page_desc_list *page_desc_list;
be_soc = dp_get_be_soc_from_dp_soc(soc);
page_desc_list = &be_soc->rx_spt_page_desc[pool_id];
/* cleanup for each page */
page_desc = page_desc_list->spt_page_list_head;
while (page_desc) {
page_desc->avail_entry_index = 0;
qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
page_desc = page_desc->next;
}
/* free pages desc back to pool */
dp_cc_spt_page_desc_free(be_soc,
&page_desc_list->spt_page_list_head,
&page_desc_list->spt_page_list_tail,
page_desc_list->num_spt_pages);
page_desc_list->num_spt_pages = 0;
}
QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id)
{
QDF_STATUS status = QDF_STATUS_SUCCESS;
/* Only regular RX buffer desc pool use HW cookie conversion */
if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE) {
dp_info("rx_desc_buf pool init");
status = dp_rx_desc_pool_init_be_cc(soc,
rx_desc_pool,
pool_id);
} else {
dp_info("non_rx_desc_buf_pool init");
status = dp_rx_desc_pool_init_generic(soc, rx_desc_pool, pool_id);
}
return status;
}
void dp_rx_desc_pool_deinit_be(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id)
{
if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE)
dp_rx_desc_pool_deinit_be_cc(soc, rx_desc_pool, pool_id);
}
#ifdef DP_FEATURE_HW_COOKIE_CONVERSION
#ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
void *ring_desc,
struct dp_rx_desc **r_rx_desc)
{
if (hal_rx_wbm_get_cookie_convert_done(ring_desc)) {
/* HW cookie conversion done */
*r_rx_desc = (struct dp_rx_desc *)
hal_rx_wbm_get_desc_va(ring_desc);
} else {
/* SW do cookie conversion */
uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc);
*r_rx_desc = (struct dp_rx_desc *)
dp_cc_desc_find(soc, cookie, true);
}
return QDF_STATUS_SUCCESS;
}
#else
QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
void *ring_desc,
struct dp_rx_desc **r_rx_desc)
{
*r_rx_desc = (struct dp_rx_desc *)
hal_rx_wbm_get_desc_va(ring_desc);
return QDF_STATUS_SUCCESS;
}
#endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
#else
QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
void *ring_desc,
struct dp_rx_desc **r_rx_desc)
{
/* SW do cookie conversion */
uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc);
*r_rx_desc = (struct dp_rx_desc *)
dp_cc_desc_find(soc, cookie, true);
return QDF_STATUS_SUCCESS;
}
#endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc,
uint32_t cookie)
{
return (struct dp_rx_desc *)dp_cc_desc_find(soc, cookie, true);
}

View File

@@ -25,4 +25,83 @@
uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
uint32_t quota);
/**
* dp_rx_desc_pool_init_be() - Initialize Rx Descriptor pool(s)
* @soc: Handle to DP Soc structure
* @rx_desc_pool: Rx descriptor pool handler
* @pool_id: Rx descriptor pool ID
*
* Return: QDF_STATUS_SUCCESS - succeeded, others - failed
*/
QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id);
/**
* dp_rx_desc_pool_deinit_be() - De-initialize Rx Descriptor pool(s)
* @soc: Handle to DP Soc structure
* @rx_desc_pool: Rx descriptor pool handler
* @pool_id: Rx descriptor pool ID
*
* Return: None
*/
void dp_rx_desc_pool_deinit_be(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id);
/**
* dp_wbm_get_rx_desc_from_hal_desc_be() - Get corresponding Rx Desc
* address from WBM ring Desc
* @soc: Handle to DP Soc structure
* @ring_desc: ring descriptor structure pointer
* @r_rx_desc: pointer to a pointer of Rx Desc
*
* Return: QDF_STATUS_SUCCESS - succeeded, others - failed
*/
QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
void *ring_desc,
struct dp_rx_desc **r_rx_desc);
/**
* dp_rx_desc_cookie_2_va_be() - Convert RX Desc cookie ID to VA
* @soc:Handle to DP Soc structure
* @cookie: cookie used to lookup virtual address
*
* Return: Rx descriptor virtual address
*/
struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc,
uint32_t cookie);
#if !defined(DP_FEATURE_HW_COOKIE_CONVERSION) || \
defined(DP_HW_COOKIE_CONVERT_EXCEPTION)
/**
* dp_rx_desc_sw_cc_check() - check if RX desc VA is got correctly,
if not, do SW cookie conversion.
* @soc:Handle to DP Soc structure
* @rx_buf_cookie: RX desc cookie ID
* @r_rx_desc: double pointer for RX desc
*
* Return: None
*/
static inline void
dp_rx_desc_sw_cc_check(struct dp_soc *soc,
uint32_t rx_buf_cookie,
struct dp_rx_desc **r_rx_desc)
{
if (qdf_unlikely(!(*r_rx_desc))) {
*r_rx_desc = (struct dp_rx_desc *)
dp_cc_desc_find(soc,
rx_buf_cookie,
true);
}
}
#else
static inline void
dp_rx_desc_sw_cc_check(struct dp_soc *soc,
uint32_t rx_buf_cookie,
struct dp_rx_desc **r_rx_desc)
{
}
#endif /* DP_FEATURE_HW_COOKIE_CONVERSION && DP_HW_COOKIE_CONVERT_EXCEPTION */
#endif

View File

@@ -24,24 +24,49 @@
#include "hal_tx.h"
#include <hal_be_api.h>
#ifdef DP_FEATURE_HW_COOKIE_CONVERSION
#ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
void *tx_comp_hal_desc,
struct dp_tx_desc_s **r_tx_desc)
{
uint8_t pool_id;
uint32_t tx_desc_id;
if (qdf_likely(
hal_tx_comp_get_cookie_convert_done(tx_comp_hal_desc))) {
/* HW cookie conversion done */
*r_tx_desc = (struct dp_tx_desc_s *)
hal_tx_comp_get_desc_va(tx_comp_hal_desc);
} else {
/* SW do cookie conversion to VA */
tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
DP_TX_DESC_ID_POOL_OS;
/* Find Tx descriptor */
*r_tx_desc = dp_tx_desc_find(soc, pool_id,
(tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
DP_TX_DESC_ID_PAGE_OS,
(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
DP_TX_DESC_ID_OFFSET_OS);
*r_tx_desc =
(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id, true);
}
}
#else
void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
void *tx_comp_hal_desc,
struct dp_tx_desc_s **r_tx_desc)
{
*r_tx_desc = (struct dp_tx_desc_s *)
hal_tx_comp_get_desc_va(tx_comp_hal_desc);
}
#endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
#else
void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
void *tx_comp_hal_desc,
struct dp_tx_desc_s **r_tx_desc)
{
uint32_t tx_desc_id;
/* SW do cookie conversion to VA */
tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
*r_tx_desc =
(struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id, true);
}
#endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
QDF_STATUS
dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
@@ -323,3 +348,84 @@ void dp_tx_update_bank_profile(struct dp_soc_be *be_soc,
dp_tx_put_bank_profile(be_soc, be_vdev);
be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev);
}
QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc,
uint16_t pool_desc_num,
uint8_t pool_id)
{
struct dp_tx_desc_pool_s *tx_desc_pool;
struct dp_soc_be *be_soc;
struct dp_spt_page_desc *page_desc;
struct dp_spt_page_desc_list *page_desc_list;
struct dp_tx_desc_s *tx_desc_elem;
if (!pool_desc_num) {
dp_err("desc_num 0 !!");
return QDF_STATUS_E_FAILURE;
}
be_soc = dp_get_be_soc_from_dp_soc(soc);
tx_desc_pool = &soc->tx_desc[pool_id];
page_desc_list = &be_soc->tx_spt_page_desc[pool_id];
/* allocate SPT pages from page desc pool */
page_desc_list->num_spt_pages =
dp_cc_spt_page_desc_alloc(be_soc,
&page_desc_list->spt_page_list_head,
&page_desc_list->spt_page_list_tail,
pool_desc_num);
if (!page_desc_list->num_spt_pages) {
dp_err("fail to allocate cookie conversion spt pages");
return QDF_STATUS_E_FAILURE;
}
/* put each TX Desc VA to SPT pages and get corresponding ID */
page_desc = page_desc_list->spt_page_list_head;
tx_desc_elem = tx_desc_pool->freelist;
while (tx_desc_elem) {
DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
page_desc->avail_entry_index,
tx_desc_elem);
tx_desc_elem->id =
dp_cc_desc_id_generate(page_desc->ppt_index,
page_desc->avail_entry_index,
true);
tx_desc_elem->pool_id = pool_id;
tx_desc_elem = tx_desc_elem->next;
page_desc->avail_entry_index++;
if (page_desc->avail_entry_index >=
DP_CC_SPT_PAGE_MAX_ENTRIES)
page_desc = page_desc->next;
}
return QDF_STATUS_SUCCESS;
}
void dp_tx_desc_pool_deinit_be(struct dp_soc *soc,
struct dp_tx_desc_pool_s *tx_desc_pool,
uint8_t pool_id)
{
struct dp_soc_be *be_soc;
struct dp_spt_page_desc *page_desc;
struct dp_spt_page_desc_list *page_desc_list;
be_soc = dp_get_be_soc_from_dp_soc(soc);
page_desc_list = &be_soc->tx_spt_page_desc[pool_id];
/* cleanup for each page */
page_desc = page_desc_list->spt_page_list_head;
while (page_desc) {
page_desc->avail_entry_index = 0;
qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
page_desc = page_desc->next;
}
/* free pages desc back to pool */
dp_cc_spt_page_desc_free(be_soc,
&page_desc_list->spt_page_list_head,
&page_desc_list->spt_page_list_tail,
page_desc_list->num_spt_pages);
page_desc_list->num_spt_pages = 0;
}

View File

@@ -109,4 +109,27 @@ void dp_tx_put_bank_profile(struct dp_soc_be *soc, struct dp_vdev_be *be_vdev);
*/
void dp_tx_update_bank_profile(struct dp_soc_be *be_soc,
struct dp_vdev_be *be_vdev);
/**
* dp_tx_desc_pool_init_be() - Initialize Tx Descriptor pool(s)
* @soc: Handle to DP Soc structure
* @pool_desc_num: pool descriptor number
* @pool_id: pool ID to allocate
*
* Return: QDF_STATUS_SUCCESS - success, others - failure
*/
QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc,
uint16_t pool_desc_num,
uint8_t pool_id);
/**
* dp_tx_desc_pool_deinit_be() - De-initialize Tx Descriptor pool(s)
* @soc: Handle to DP Soc structure
* @tx_desc_pool: Tx descriptor pool handler
* @pool_id: pool ID to deinit
*
* Return: None
*/
void dp_tx_desc_pool_deinit_be(struct dp_soc *soc,
struct dp_tx_desc_pool_s *tx_desc_pool,
uint8_t pool_id);
#endif

View File

@@ -5280,6 +5280,8 @@ static void dp_soc_deinit(void *txrx_soc)
qdf_atomic_set(&soc->cmn_init_done, 0);
soc->arch_ops.txrx_soc_deinit(soc);
/* free peer tables & AST tables allocated during peer_map_attach */
if (soc->peer_map_attach_success) {
dp_peer_find_detach(soc);
@@ -12745,12 +12747,16 @@ dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
dp_err("DP SOC memory allocation failed");
goto fail0;
}
dp_info("soc memory allocated %pk", soc);
soc->hif_handle = hif_handle;
soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
if (!soc->hal_soc)
goto fail1;
hif_get_cmem_info(soc->hif_handle,
&soc->cmem_base,
&soc->cmem_size);
int_ctx = 0;
soc->device_id = device_id;
soc->cdp_soc.ops = &dp_txrx_ops;
@@ -12852,21 +12858,26 @@ void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
WLAN_MD_DP_SOC, "dp_soc");
if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_init(soc))) {
dp_err("unable to do target specific init");
goto fail0;
}
htt_soc = htt_soc_attach(soc, htc_handle);
if (!htt_soc)
goto fail0;
goto fail1;
soc->htt_handle = htt_soc;
if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
goto fail1;
goto fail2;
htt_set_htc_handle(htt_soc, htc_handle);
soc->hif_handle = hif_handle;
soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
if (!soc->hal_soc)
goto fail2;
goto fail3;
dp_soc_cfg_init(soc);
@@ -12886,7 +12897,7 @@ void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
if (num_dp_msi < 0) {
dp_init_err("%pK: dp_interrupt assignment failed", soc);
goto fail3;
goto fail4;
}
wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
soc->intr_mode, is_monitor_mode);
@@ -12894,20 +12905,20 @@ void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
/* initialize WBM_IDLE_LINK ring */
if (dp_hw_link_desc_ring_init(soc)) {
dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
goto fail3;
goto fail4;
}
dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
if (dp_soc_srng_init(soc)) {
dp_init_err("%pK: dp_soc_srng_init failed", soc);
goto fail4;
goto fail5;
}
if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
htt_get_htc_handle(htt_soc),
soc->hal_soc, soc->osdev) == NULL)
goto fail5;
goto fail6;
/* Initialize descriptors in TCL Rings */
for (i = 0; i < soc->num_tcl_data_rings; i++) {
@@ -12917,7 +12928,7 @@ void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
if (dp_soc_tx_desc_sw_pools_init(soc)) {
dp_init_err("%pK: dp_tx_soc_attach failed", soc);
goto fail6;
goto fail7;
}
wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
@@ -13028,18 +13039,20 @@ void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
qdf_skb_total_mem_stats_read());
return soc;
fail6:
fail7:
htt_soc_htc_dealloc(soc->htt_handle);
fail5:
fail6:
dp_soc_srng_deinit(soc);
fail4:
fail5:
dp_hw_link_desc_ring_deinit(soc);
fail3:
fail4:
dp_hw_link_desc_ring_free(soc);
fail2:
fail3:
htt_htc_pkt_pool_free(htt_soc);
fail1:
fail2:
htt_soc_detach(htt_soc);
fail1:
soc->arch_ops.txrx_soc_deinit(soc);
fail0:
return NULL;
}

View File

@@ -2551,12 +2551,12 @@ QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
struct dp_srng *dp_rxdma_srng;
struct rx_desc_pool *rx_desc_pool;
rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
/**
* If NSS is enabled, rx_desc_pool is already filled.
* Hence, just disable desc_pool frag flag.
*/
rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
@@ -2564,7 +2564,6 @@ QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
return QDF_STATUS_SUCCESS;
}
rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM)
return QDF_STATUS_E_NOMEM;
@@ -2602,7 +2601,7 @@ void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev)
rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
dp_rx_desc_pool_deinit(soc, rx_desc_pool);
dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev);
}
/*

View File

@@ -540,7 +540,7 @@ void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
if (qdf_unlikely(index >= rx_desc_pool->pool_size))
return NULL;
return &(soc->rx_desc_buf[pool_id].array[index].rx_desc);
return &rx_desc_pool->array[index].rx_desc;
}
/**
@@ -653,7 +653,8 @@ void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev);
QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev);
void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev);
void dp_rx_desc_pool_deinit(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool);
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id);
QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev);
@@ -1990,4 +1991,21 @@ dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
return false;
}
#endif
/**
* dp_rx_desc_pool_init_generic() - Generic Rx descriptors initialization
* @soc: SOC handle
* @rx_desc_pool: pointer to RX descriptor pool
* @pool_id: pool ID
*
* Return: None
*/
QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id);
void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id);
#endif /* _DP_RX_H */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -89,33 +89,15 @@ free_rx_desc_pool:
return QDF_STATUS_E_FAULT;
}
/*
* dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
* convert the pool of memory into a list of
* rx descriptors and create locks to access this
* list of rx descriptors.
*
* @soc: core txrx main context
* @pool_id: pool_id which is one of 3 mac_ids
* @pool_size: size of the rx descriptor pool
* @rx_desc_pool: rx descriptor pool pointer
*/
void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id)
{
uint32_t id, page_id, offset, num_desc_per_page;
uint32_t count = 0;
union dp_rx_desc_list_elem_t *rx_desc_elem;
/* Initialize the lock */
qdf_spinlock_create(&rx_desc_pool->lock);
qdf_spin_lock_bh(&rx_desc_pool->lock);
rx_desc_pool->pool_size = pool_size;
num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
*rx_desc_pool->desc_pages.cacheable_pages;
rx_desc_elem = rx_desc_pool->freelist;
while (rx_desc_elem) {
@@ -138,6 +120,39 @@ void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
rx_desc_elem = rx_desc_elem->next;
count++;
}
return QDF_STATUS_SUCCESS;
}
/*
* dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
* convert the pool of memory into a list of
* rx descriptors and create locks to access this
* list of rx descriptors.
*
* @soc: core txrx main context
* @pool_id: pool_id which is one of 3 mac_ids
* @pool_size: size of the rx descriptor pool
* @rx_desc_pool: rx descriptor pool pointer
*/
void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
{
QDF_STATUS status;
/* Initialize the lock */
qdf_spinlock_create(&rx_desc_pool->lock);
qdf_spin_lock_bh(&rx_desc_pool->lock);
rx_desc_pool->pool_size = pool_size;
rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
*rx_desc_pool->desc_pages.cacheable_pages;
status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
pool_id);
if (!QDF_IS_STATUS_SUCCESS(status))
dp_err("RX desc pool initialization failed");
qdf_spin_unlock_bh(&rx_desc_pool->lock);
}
@@ -249,7 +264,8 @@ void dp_rx_desc_pool_free(struct dp_soc *soc,
}
void dp_rx_desc_pool_deinit(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool)
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id)
{
qdf_spin_lock_bh(&rx_desc_pool->lock);
@@ -259,6 +275,8 @@ void dp_rx_desc_pool_deinit(struct dp_soc *soc,
/* Deinitialize rx mon desr frag flag */
rx_desc_pool->rx_mon_dest_frag_enable = false;
soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
qdf_spin_unlock_bh(&rx_desc_pool->lock);
qdf_spinlock_destroy(&rx_desc_pool->lock);
}
@@ -308,6 +326,25 @@ QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
return QDF_STATUS_SUCCESS;
}
QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id)
{
int i;
for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
if (i == rx_desc_pool->pool_size - 1)
rx_desc_pool->array[i].next = NULL;
else
rx_desc_pool->array[i].next =
&rx_desc_pool->array[i + 1];
rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
rx_desc_pool->array[i].rx_desc.in_use = 0;
}
return QDF_STATUS_SUCCESS;
}
/*
* dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
* convert the pool of memory into a list of
@@ -322,7 +359,8 @@ QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
{
int i;
QDF_STATUS status;
/* Initialize the lock */
qdf_spinlock_create(&rx_desc_pool->lock);
@@ -332,16 +370,11 @@ void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
/* link SW rx descs into a freelist */
rx_desc_pool->freelist = &rx_desc_pool->array[0];
qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
if (i == rx_desc_pool->pool_size - 1)
rx_desc_pool->array[i].next = NULL;
else
rx_desc_pool->array[i].next =
&rx_desc_pool->array[i + 1];
rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
rx_desc_pool->array[i].rx_desc.in_use = 0;
}
status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
pool_id);
if (!QDF_IS_STATUS_SUCCESS(status))
dp_err("RX desc pool initialization failed");
qdf_spin_unlock_bh(&rx_desc_pool->lock);
}
@@ -446,7 +479,8 @@ void dp_rx_desc_pool_free(struct dp_soc *soc,
}
void dp_rx_desc_pool_deinit(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool)
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id)
{
qdf_spin_lock_bh(&rx_desc_pool->lock);
@@ -456,12 +490,20 @@ void dp_rx_desc_pool_deinit(struct dp_soc *soc,
/* Deinitialize rx mon desr frag flag */
rx_desc_pool->rx_mon_dest_frag_enable = false;
soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
qdf_spin_unlock_bh(&rx_desc_pool->lock);
qdf_spinlock_destroy(&rx_desc_pool->lock);
}
#endif /* RX_DESC_MULTI_PAGE_ALLOC */
void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id)
{
}
/*
* dp_rx_get_free_desc_list() - provide a list of descriptors from
* the free rx desc pool.

View File

@@ -321,8 +321,8 @@ more_msdu_link_desc:
&mpdu_desc_info->msdu_count);
for (i = 0; (i < mpdu_desc_info->msdu_count); i++) {
rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc,
msdu_list.sw_cookie[i]);
rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
soc, msdu_list.sw_cookie[i]);
qdf_assert_always(rx_desc);
@@ -560,7 +560,7 @@ more_msdu_link_desc:
hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
&num_msdus);
for (i = 0; i < num_msdus; i++) {
rx_desc = dp_rx_cookie_2_va_rxdma_buf(
rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
soc,
msdu_list.sw_cookie[i]);
@@ -1937,7 +1937,8 @@ dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
goto next_entry;
}
rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc,
rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
soc,
msdu_list.sw_cookie[0]);
qdf_assert_always(rx_desc);
@@ -2167,7 +2168,6 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
uint8_t msdu_continuation = 0;
bool process_sg_buf = false;
uint32_t wbm_err_src;
struct hal_buf_info buf_info = {0};
/* Debug -- Remove later */
qdf_assert(soc && hal_ring_hdl);
@@ -2206,30 +2206,18 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) ||
(wbm_err_src == HAL_RX_WBM_ERR_SRC_REO));
/*
* Check if the buffer is to be processed on this processor
*/
/* only cookie and rbm will be valid in buf_info */
hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc,
&buf_info);
if (qdf_unlikely(buf_info.rbm !=
HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id))) {
/* TODO */
/* Call appropriate handler */
DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
dp_rx_err_err("%pK: Invalid RBM %d", soc,
buf_info.rbm);
if (soc->arch_ops.dp_wbm_get_rx_desc_from_hal_desc(soc,
ring_desc,
&rx_desc)) {
dp_rx_err_err("get rx desc from hal_desc failed");
continue;
}
rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, buf_info.sw_cookie);
qdf_assert_always(rx_desc);
if (!dp_rx_desc_check_magic(rx_desc)) {
dp_rx_err_err("%pk: Invalid rx_desc cookie=%d",
soc, buf_info.sw_cookie);
dp_rx_err_err("%pk: Invalid rx_desc %pk",
soc, rx_desc);
continue;
}
@@ -2677,7 +2665,9 @@ dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
else {
for (i = 0; i < num_msdus; i++) {
struct dp_rx_desc *rx_desc =
dp_rx_cookie_2_va_rxdma_buf(soc,
soc->arch_ops.
dp_rx_desc_cookie_2_va(
soc,
msdu_list.sw_cookie[i]);
qdf_assert_always(rx_desc);
msdu = rx_desc->nbuf;
@@ -2860,7 +2850,7 @@ dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
for (i = 0; i < num_msdus; i++) {
struct dp_rx_desc *rx_desc =
dp_rx_cookie_2_va_rxdma_buf(
soc->arch_ops.dp_rx_desc_cookie_2_va(
soc,
msdu_list.sw_cookie[i]);
qdf_assert_always(rx_desc);
@@ -2954,7 +2944,9 @@ dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, buf_info.sw_cookie);
rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
soc,
buf_info.sw_cookie);
if (rx_desc && rx_desc->nbuf) {
rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];

View File

@@ -1894,7 +1894,7 @@ dp_rx_pdev_mon_buf_desc_pool_deinit(struct dp_pdev *pdev, uint32_t mac_id)
dp_debug("Mon RX Desc buf Pool[%d] deinit", pdev_id);
dp_rx_desc_pool_deinit(soc, rx_desc_pool);
dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_id);
/* Detach full monitor mode resources */
dp_full_mon_detach(pdev);

View File

@@ -2294,7 +2294,7 @@ dp_rx_pdev_mon_status_desc_pool_deinit(struct dp_pdev *pdev, uint32_t mac_id) {
dp_debug("Mon RX Desc status Pool[%d] deinit", pdev_id);
dp_rx_desc_pool_deinit(soc, rx_desc_pool);
dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_id);
}
void

View File

@@ -140,15 +140,12 @@ void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem)
{
uint32_t id, count, page_id, offset, pool_id_32;
struct dp_tx_desc_pool_s *tx_desc_pool;
struct dp_tx_desc_s *tx_desc_elem;
uint16_t num_desc_per_page;
uint32_t desc_size;
desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
tx_desc_pool = &soc->tx_desc[pool_id];
if (qdf_mem_multi_page_link(soc->osdev,
&tx_desc_pool->desc_pages,
desc_size, num_elem, true)) {
@@ -159,23 +156,13 @@ QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
tx_desc_pool->freelist = (struct dp_tx_desc_s *)
*tx_desc_pool->desc_pages.cacheable_pages;
/* Set unique IDs for each Tx descriptor */
tx_desc_elem = tx_desc_pool->freelist;
count = 0;
pool_id_32 = (uint32_t)pool_id;
num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page;
while (tx_desc_elem) {
page_id = count / num_desc_per_page;
offset = count % num_desc_per_page;
id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
(page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
tx_desc_elem->id = id;
tx_desc_elem->pool_id = pool_id;
tx_desc_elem = tx_desc_elem->next;
count++;
if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
soc, num_elem, pool_id)) {
dp_err("initialization per target failed");
return QDF_STATUS_E_FAULT;
}
tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
@@ -193,7 +180,8 @@ void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id)
{
struct dp_tx_desc_pool_s *tx_desc_pool;
tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
tx_desc_pool = &soc->tx_desc[pool_id];
soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool, pool_id);
TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -791,7 +791,7 @@ static inline void dp_tx_desc_update_fast_comp_flag(struct dp_soc *soc,
static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
uint8_t pool_id, uint16_t page_id, uint16_t offset)
{
struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
struct dp_tx_desc_pool_s *tx_desc_pool = &soc->tx_desc[pool_id];
return tx_desc_pool->desc_pages.cacheable_pages[page_id] +
tx_desc_pool->elem_size * offset;

View File

@@ -423,6 +423,9 @@ enum dp_desc_type {
DP_RX_DESC_BUF_TYPE,
DP_RX_DESC_STATUS_TYPE,
DP_HW_LINK_DESC_TYPE,
#ifdef CONFIG_BERYLLIUM
DP_HW_CC_SPT_PAGE_TYPE,
#endif
};
/**
@@ -1532,6 +1535,8 @@ struct dp_arch_ops {
/* INIT/DEINIT Arch Ops */
QDF_STATUS (*txrx_soc_attach)(struct dp_soc *soc);
QDF_STATUS (*txrx_soc_detach)(struct dp_soc *soc);
QDF_STATUS (*txrx_soc_init)(struct dp_soc *soc);
QDF_STATUS (*txrx_soc_deinit)(struct dp_soc *soc);
QDF_STATUS (*txrx_pdev_attach)(struct dp_pdev *pdev);
QDF_STATUS (*txrx_pdev_detach)(struct dp_pdev *pdev);
QDF_STATUS (*txrx_vdev_attach)(struct dp_soc *soc,
@@ -1553,6 +1558,30 @@ struct dp_arch_ops {
uint32_t (*dp_rx_process)(struct dp_intr *int_ctx,
hal_ring_handle_t hal_ring_hdl,
uint8_t reo_ring_num, uint32_t quota);
QDF_STATUS (*dp_tx_desc_pool_init)(struct dp_soc *soc,
uint16_t pool_desc_num,
uint8_t pool_id);
void (*dp_tx_desc_pool_deinit)(
struct dp_soc *soc,
struct dp_tx_desc_pool_s *tx_desc_pool,
uint8_t pool_id);
QDF_STATUS (*dp_rx_desc_pool_init)(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id);
void (*dp_rx_desc_pool_deinit)(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id);
QDF_STATUS (*dp_wbm_get_rx_desc_from_hal_desc)(
struct dp_soc *soc,
void *ring_desc,
struct dp_rx_desc **r_rx_desc);
struct dp_rx_desc *(*dp_rx_desc_cookie_2_va)(struct dp_soc *soc,
uint32_t cookie);
/* Control Arch Ops */
QDF_STATUS (*txrx_set_vdev_param)(struct dp_soc *soc,
struct dp_vdev *vdev,
@@ -2034,6 +2063,11 @@ struct dp_soc {
/* link desc ID start per device type */
uint32_t link_desc_id_start;
/* CMEM buffer target reserved for host usage */
uint64_t cmem_base;
/* CMEM size in bytes */
uint64_t cmem_size;
};
#ifdef IPA_OFFLOAD

View File

@@ -51,6 +51,16 @@ static QDF_STATUS dp_soc_detach_li(struct dp_soc *soc)
return QDF_STATUS_SUCCESS;
}
static QDF_STATUS dp_soc_init_li(struct dp_soc *soc)
{
return QDF_STATUS_SUCCESS;
}
static QDF_STATUS dp_soc_deinit_li(struct dp_soc *soc)
{
return QDF_STATUS_SUCCESS;
}
static QDF_STATUS dp_pdev_attach_li(struct dp_pdev *pdev)
{
return QDF_STATUS_SUCCESS;
@@ -252,13 +262,28 @@ void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
arch_ops->dp_rx_process = dp_rx_process_li;
arch_ops->tx_comp_get_params_from_hal_desc =
dp_tx_comp_get_params_from_hal_desc_li;
arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
dp_wbm_get_rx_desc_from_hal_desc_li;
arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_li;
arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_li;
arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_li;
arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_li;
#else
arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_generic;
arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_generic;
#endif
arch_ops->txrx_get_context_size = dp_get_context_size_li;
arch_ops->txrx_soc_attach = dp_soc_attach_li;
arch_ops->txrx_soc_detach = dp_soc_detach_li;
arch_ops->txrx_soc_init = dp_soc_init_li;
arch_ops->txrx_soc_deinit = dp_soc_deinit_li;
arch_ops->txrx_pdev_attach = dp_pdev_attach_li;
arch_ops->txrx_pdev_detach = dp_pdev_detach_li;
arch_ops->txrx_vdev_attach = dp_vdev_attach_li;
arch_ops->txrx_vdev_detach = dp_vdev_detach_li;
arch_ops->dp_rx_desc_cookie_2_va =
dp_rx_desc_cookie_2_va_li;
arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_li;
}

View File

@@ -58,7 +58,6 @@ struct dp_peer_li {
*
* Return: value in bytes for LI specific soc structure
*/
qdf_size_t dp_get_soc_context_size_li(void);
/**

View File

@@ -787,3 +787,43 @@ done:
return rx_bufs_used; /* Assume no scale factor for now */
}
QDF_STATUS dp_rx_desc_pool_init_li(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id)
{
return dp_rx_desc_pool_init_generic(soc, rx_desc_pool, pool_id);
}
void dp_rx_desc_pool_deinit_li(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id)
{
}
QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_li(
struct dp_soc *soc,
void *ring_desc,
struct dp_rx_desc **r_rx_desc)
{
struct hal_buf_info buf_info = {0};
hal_soc_handle_t hal_soc = soc->hal_soc;
/* only cookie and rbm will be valid in buf_info */
hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc,
&buf_info);
if (qdf_unlikely(buf_info.rbm !=
HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id))) {
/* TODO */
/* Call appropriate handler */
DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
dp_rx_err("%pK: Invalid RBM %d", soc, buf_info.rbm);
return QDF_STATUS_E_INVAL;
}
*r_rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, buf_info.sw_cookie);
return QDF_STATUS_SUCCESS;
}

View File

@@ -20,9 +20,62 @@
#define _DP_LI_RX_H_
#include <dp_types.h>
#include <dp_rx.h>
#include "dp_li.h"
uint32_t dp_rx_process_li(struct dp_intr *int_ctx,
hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
uint32_t quota);
/**
* dp_rx_desc_pool_init_li() - Initialize Rx Descriptor pool(s)
* @soc: Handle to DP Soc structure
* @rx_desc_pool: Rx descriptor pool handler
* @pool_id: Rx descriptor pool ID
*
* Return: None
*/
QDF_STATUS dp_rx_desc_pool_init_li(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id);
/**
* dp_rx_desc_pool_deinit_li() - De-initialize Rx Descriptor pool(s)
* @soc: Handle to DP Soc structure
* @rx_desc_pool: Rx descriptor pool handler
* @pool_id: Rx descriptor pool ID
*
* Return: None
*/
void dp_rx_desc_pool_deinit_li(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool,
uint32_t pool_id);
/**
* dp_wbm_get_rx_desc_from_hal_desc_li() - Get corresponding Rx Desc
* address from WBM ring Desc
* @soc: Handle to DP Soc structure
* @ring_desc: ring descriptor structure pointer
* @r_rx_desc: pointer to a pointer of Rx Desc
*
* Return: QDF_STATUS_SUCCESS - succeeded, others - failed
*/
QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_li(
struct dp_soc *soc,
void *ring_desc,
struct dp_rx_desc **r_rx_desc);
/**
* dp_rx_desc_cookie_2_va_li() - Convert RX Desc cookie ID to VA
* @soc:Handle to DP Soc structure
* @cookie: cookie used to lookup virtual address
*
* Return: Rx descriptor virtual address
*/
static inline
struct dp_rx_desc *dp_rx_desc_cookie_2_va_li(struct dp_soc *soc,
uint32_t cookie)
{
return dp_rx_cookie_2_va_rxdma_buf(soc, cookie);
}
#endif

View File

@@ -192,3 +192,38 @@ ring_access_fail:
return status;
}
QDF_STATUS dp_tx_desc_pool_init_li(struct dp_soc *soc,
uint16_t pool_desc_num,
uint8_t pool_id)
{
uint32_t id, count, page_id, offset, pool_id_32;
struct dp_tx_desc_s *tx_desc_elem;
struct dp_tx_desc_pool_s *tx_desc_pool;
uint16_t num_desc_per_page;
tx_desc_pool = &soc->tx_desc[pool_id];
tx_desc_elem = tx_desc_pool->freelist;
count = 0;
pool_id_32 = (uint32_t)pool_id;
num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page;
while (tx_desc_elem) {
page_id = count / num_desc_per_page;
offset = count % num_desc_per_page;
id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
(page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
tx_desc_elem->id = id;
tx_desc_elem->pool_id = pool_id;
tx_desc_elem = tx_desc_elem->next;
count++;
}
return QDF_STATUS_SUCCESS;
}
void dp_tx_desc_pool_deinit_li(struct dp_soc *soc,
struct dp_tx_desc_pool_s *tx_desc_pool,
uint8_t pool_id)
{
}

View File

@@ -52,4 +52,28 @@ dp_tx_hw_enqueue_li(struct dp_soc *soc, struct dp_vdev *vdev,
void dp_tx_comp_get_params_from_hal_desc_li(struct dp_soc *soc,
void *tx_comp_hal_desc,
struct dp_tx_desc_s **r_tx_desc);
/**
* dp_tx_desc_pool_init_li() - Initialize Tx Descriptor pool(s)
* @soc: Handle to DP Soc structure
* @pool_desc_num: pool descriptor number
* @pool_id: pool to allocate
*
* Return: QDF_STATUS_SUCCESS - success, others - failure
*/
QDF_STATUS dp_tx_desc_pool_init_li(struct dp_soc *soc,
uint16_t pool_desc_num,
uint8_t pool_id);
/**
* dp_tx_desc_pool_deinit_li() - De-initialize Tx Descriptor pool(s)
* @soc: Handle to DP Soc structure
* @tx_desc_pool: Tx descriptor pool handler
* @pool_id: pool to deinit
*
* Return: None.
*/
void dp_tx_desc_pool_deinit_li(struct dp_soc *soc,
struct dp_tx_desc_pool_s *tx_desc_pool,
uint8_t pool_id);
#endif

View File

@@ -22,6 +22,24 @@
#include "hal_hw_headers.h"
#include "hal_rx.h"
struct hal_hw_cc_config {
uint32_t lut_base_addr_31_0;
uint32_t cc_global_en:1,
page_4k_align:1,
cookie_offset_msb:5,
cookie_page_msb:5,
lut_base_addr_39_32:8,
wbm2sw6_cc_en:1,
wbm2sw5_cc_en:1,
wbm2sw4_cc_en:1,
wbm2sw3_cc_en:1,
wbm2sw2_cc_en:1,
wbm2sw1_cc_en:1,
wbm2sw0_cc_en:1,
wbm2fw_cc_en:1,
reserved:4;
};
#define HAL_RX_MSDU_EXT_DESC_INFO_GET(msdu_details_ptr) \
((struct rx_msdu_ext_desc_info *) \
_OFFSET_TO_BYTE_PTR(msdu_details_ptr, \
@@ -104,4 +122,15 @@ void hal_reo_qdesc_setup_be(hal_soc_handle_t hal_soc_hdl,
qdf_dma_addr_t hw_qdesc_paddr,
int pn_type);
/**
* hal_cookie_conversion_reg_cfg_be() - set cookie conversion relevant register
* for REO/WBM
* @soc: HAL soc handle
* @cc_cfg: structure pointer for HW cookie conversion configuration
*
* Return: None
*/
void hal_cookie_conversion_reg_cfg_be(hal_soc_handle_t hal_soc_hdl,
struct hal_hw_cc_config *cc_cfg);
#endif /* _HAL_BE_API_H_ */

View File

@@ -710,6 +710,113 @@ static uint8_t hal_rx_reo_buf_type_get_be(hal_ring_desc_t rx_desc)
return HAL_RX_REO_BUF_TYPE_GET(rx_desc);
}
#ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
#define HAL_WBM_MISC_CONTROL_SPARE_CONTROL_FIELD_BIT15 0x8000
#endif
void hal_cookie_conversion_reg_cfg_be(hal_soc_handle_t hal_soc_hdl,
struct hal_hw_cc_config *cc_cfg)
{
uint32_t reg_addr, reg_val = 0;
struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
/* REO CFG */
reg_addr = HWIO_REO_R0_SW_COOKIE_CFG0_ADDR(REO_REG_REG_BASE);
reg_val = cc_cfg->lut_base_addr_31_0;
HAL_REG_WRITE(soc, reg_addr, reg_val);
reg_addr = HWIO_REO_R0_SW_COOKIE_CFG1_ADDR(REO_REG_REG_BASE);
reg_val = 0;
reg_val |= HAL_SM(HWIO_REO_R0_SW_COOKIE_CFG1,
SW_COOKIE_CONVERT_GLOBAL_ENABLE,
cc_cfg->cc_global_en);
reg_val |= HAL_SM(HWIO_REO_R0_SW_COOKIE_CFG1,
SW_COOKIE_CONVERT_ENABLE,
cc_cfg->cc_global_en);
reg_val |= HAL_SM(HWIO_REO_R0_SW_COOKIE_CFG1,
PAGE_ALIGNMENT,
cc_cfg->page_4k_align);
reg_val |= HAL_SM(HWIO_REO_R0_SW_COOKIE_CFG1,
COOKIE_OFFSET_MSB,
cc_cfg->cookie_offset_msb);
reg_val |= HAL_SM(HWIO_REO_R0_SW_COOKIE_CFG1,
COOKIE_PAGE_MSB,
cc_cfg->cookie_page_msb);
reg_val |= HAL_SM(HWIO_REO_R0_SW_COOKIE_CFG1,
CMEM_LUT_BASE_ADDR_39_32,
cc_cfg->lut_base_addr_39_32);
HAL_REG_WRITE(soc, reg_addr, reg_val);
/* WBM CFG */
reg_addr = HWIO_WBM_R0_SW_COOKIE_CFG0_ADDR(WBM_REG_REG_BASE);
reg_val = cc_cfg->lut_base_addr_31_0;
HAL_REG_WRITE(soc, reg_addr, reg_val);
reg_addr = HWIO_WBM_R0_SW_COOKIE_CFG1_ADDR(WBM_REG_REG_BASE);
reg_val = 0;
reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CFG1,
PAGE_ALIGNMENT,
cc_cfg->page_4k_align);
reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CFG1,
COOKIE_OFFSET_MSB,
cc_cfg->cookie_offset_msb);
reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CFG1,
COOKIE_PAGE_MSB,
cc_cfg->cookie_page_msb);
reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CFG1,
CMEM_LUT_BASE_ADDR_39_32,
cc_cfg->lut_base_addr_39_32);
HAL_REG_WRITE(soc, reg_addr, reg_val);
/*
* WCSS_UMAC_WBM_R0_SW_COOKIE_CONVERT_CFG default value is 0x1FE,
*/
reg_addr = HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG_ADDR(WBM_REG_REG_BASE);
reg_val = 0;
reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG,
WBM_COOKIE_CONV_GLOBAL_ENABLE,
cc_cfg->cc_global_en);
reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG,
WBM2SW6_COOKIE_CONVERSION_EN,
cc_cfg->wbm2sw6_cc_en);
reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG,
WBM2SW5_COOKIE_CONVERSION_EN,
cc_cfg->wbm2sw5_cc_en);
reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG,
WBM2SW4_COOKIE_CONVERSION_EN,
cc_cfg->wbm2sw4_cc_en);
reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG,
WBM2SW3_COOKIE_CONVERSION_EN,
cc_cfg->wbm2sw3_cc_en);
reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG,
WBM2SW2_COOKIE_CONVERSION_EN,
cc_cfg->wbm2sw2_cc_en);
reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG,
WBM2SW1_COOKIE_CONVERSION_EN,
cc_cfg->wbm2sw1_cc_en);
reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG,
WBM2SW0_COOKIE_CONVERSION_EN,
cc_cfg->wbm2sw0_cc_en);
reg_val |= HAL_SM(HWIO_WBM_R0_SW_COOKIE_CONVERT_CFG,
WBM2FW_COOKIE_CONVERSION_EN,
cc_cfg->wbm2fw_cc_en);
HAL_REG_WRITE(soc, reg_addr, reg_val);
#ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
/*
* To enable indication for HW cookie conversion done or not for
* WBM, WCSS_UMAC_WBM_R0_MISC_CONTROL spare_control field 15th
* bit spare_control[15] should be set.
*/
reg_addr = HWIO_WBM_R0_MISC_CONTROL_ADDR(WBM_REG_REG_BASE);
reg_val = HAL_REG_READ(soc, reg_addr);
reg_val |= HAL_SM(HWIO_WCSS_UMAC_WBM_R0_MISC_CONTROL,
SPARE_CONTROL,
HAL_WBM_MISC_CONTROL_SPARE_CONTROL_FIELD_BIT15);
HAL_REG_WRITE(soc, reg_addr, reg_val);
#endif
}
qdf_export_symbol(hal_cookie_conversion_reg_cfg_be);
/**
* hal_hw_txrx_default_ops_attach_be() - Attach the default hal ops for
* beryllium chipsets.

View File

@@ -279,6 +279,41 @@ hal_rx_msdu_link_desc_reinject(struct hal_soc *soc, uint64_t pa,
/* TODO */
}
#ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
/* HW set dowrd-2 bit16 to 1 if HW CC is done */
#define HAL_WBM2SW_COMPLETION_RING_RX_CC_DONE_OFFSET 0x8
#define HAL_WBM2SW_COMPLETION_RING_RX_CC_DONE_MASK 0x10000
#define HAL_WBM2SW_COMPLETION_RING_RX_CC_DONE_LSB 0x10
/**
* hal_rx_wbm_get_cookie_convert_done() - Get cookie conversion done flag
* @hal_desc: wbm Rx ring descriptor pointer
*
* This function will get the bit value that indicate HW cookie
* conversion done or not
*
* Return: 1 - HW cookie conversion done, 0 - not
*/
static inline uint8_t hal_rx_wbm_get_cookie_convert_done(void *hal_desc)
{
return HAL_RX_GET(hal_desc, HAL_WBM2SW_COMPLETION_RING_RX,
CC_DONE);
}
#endif
/**
* hal_rx_wbm_get_desc_va() - Get Desc virtual address within WBM Desc
* @hal_desc: RX WBM2SW ring descriptor pointer
*
* Return: RX descriptor virtual address
*/
static inline uint64_t hal_rx_wbm_get_desc_va(void *hal_desc)
{
return HAL_RX_GET(hal_desc, WBM2SW_COMPLETION_RING_RX,
BUFFER_VIRT_ADDR_31_0) |
(((uint64_t)HAL_RX_GET(hal_desc, WBM2SW_COMPLETION_RING_RX,
BUFFER_VIRT_ADDR_63_32)) << 32);
}
#define HAL_RX_WBM_FIRST_MSDU_GET(wbm_desc) \
(((*(((uint32_t *)wbm_desc) + \
(WBM_RELEASE_RING_FIRST_MSDU_OFFSET >> 2))) & \
@@ -376,4 +411,18 @@ hal_rx_msdu_desc_info_get_be(void *desc_addr,
msdu_desc_info->msdu_len = HAL_RX_MSDU_PKT_LENGTH_GET(msdu_info);
}
/**
* hal_rx_get_reo_desc_va() - Get Desc virtual address within REO Desc
* @reo_desc: REO2SW ring descriptor pointer
*
* Return: RX descriptor virtual address
*/
static inline uint64_t hal_rx_get_reo_desc_va(void *reo_desc)
{
return HAL_RX_GET(reo_desc, REO_DESTINATION_RING,
BUFFER_VIRT_ADDR_31_0) |
(((uint64_t)HAL_RX_GET(reo_desc, REO_DESTINATION_RING,
BUFFER_VIRT_ADDR_63_32)) << 32);
}
#endif /* _HAL_BE_RX_H_ */

View File

@@ -20,6 +20,7 @@
#define _HAL_BE_TX_H_
#include "hal_be_hw_headers.h"
#include "hal_tx.h"
enum hal_be_tx_ret_buf_manager {
HAL_BE_WBM_SW0_BM_ID = 5,
@@ -279,6 +280,45 @@ static inline qdf_dma_addr_t hal_tx_comp_get_paddr(void *hal_desc)
return (qdf_dma_addr_t)(paddr_lo | (((uint64_t)paddr_hi) << 32));
}
#ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
/* HW set dowrd-2 bit30 to 1 if HW CC is done */
#define HAL_WBM2SW_COMPLETION_RING_TX_CC_DONE_OFFSET 0x8
#define HAL_WBM2SW_COMPLETION_RING_TX_CC_DONE_MASK 0x40000000
#define HAL_WBM2SW_COMPLETION_RING_TX_CC_DONE_LSB 0x1E
/**
* hal_tx_comp_get_cookie_convert_done() - Get cookie conversion done flag
* @hal_desc: completion ring descriptor pointer
*
* This function will get the bit value that indicate HW cookie
* conversion done or not
*
* Return: 1 - HW cookie conversion done, 0 - not
*/
static inline uint8_t hal_tx_comp_get_cookie_convert_done(void *hal_desc)
{
return HAL_TX_DESC_GET(hal_desc, HAL_WBM2SW_COMPLETION_RING_TX,
CC_DONE);
}
#endif
/**
* hal_tx_comp_get_desc_va() - Get Desc virtual address within completion Desc
* @hal_desc: completion ring descriptor pointer
*
* This function will get the TX Desc virtual address
*
* Return: TX desc virtual address
*/
static inline uint64_t hal_tx_comp_get_desc_va(void *hal_desc)
{
return HAL_TX_DESC_GET(hal_desc, WBM2SW_COMPLETION_RING_TX,
BUFFER_VIRT_ADDR_31_0) |
(((uint64_t)HAL_TX_DESC_GET(
hal_desc,
WBM2SW_COMPLETION_RING_TX,
BUFFER_VIRT_ADDR_63_32)) << 32);
}
/*---------------------------------------------------------------------------
* TX BANK register accessor APIs
* ---------------------------------------------------------------------------

View File

@@ -2833,4 +2833,21 @@ uint32_t hal_get_ring_usage(
ring_usage = (100 * num_valid) / srng->num_entries;
return ring_usage;
}
/**
* hal_cmem_write() - function for CMEM buffer writing
* @hal_soc_hdl: HAL SOC handle
* @offset: CMEM address
* @value: value to write
*
* Return: None.
*/
static inline void hal_cmem_write(hal_soc_handle_t hal_soc_hdl,
uint32_t offset,
uint32_t value)
{
struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
hal_write32_mb(hal, offset, value);
}
#endif /* _HAL_APIH_ */

View File

@@ -1123,6 +1123,24 @@
CFG_INI_BOOL("gForceRX64BA", \
false, "Enable/Disable force 64 blockack in RX side")
/*
* <ini>
* ghw_cc_enable - enable HW cookie conversion by register
* @Min: 0
* @Max: 1
* @Default: 1
*
* This ini is used to control HW based 20 bits cookie to 64 bits
* Desc virtual address conversion
*
* Usage: Internal
*
* </ini>
*/
#define CFG_DP_HW_CC_ENABLE \
CFG_INI_BOOL("ghw_cc_enable", \
true, "Enable/Disable HW cookie conversion")
#ifdef IPA_OFFLOAD
/*
* <ini>
@@ -1272,6 +1290,7 @@
CFG(CFG_DP_RX_RADIO_1_DEFAULT_REO) \
CFG(CFG_DP_RX_RADIO_2_DEFAULT_REO) \
CFG(CFG_DP_WOW_CHECK_RX_PENDING) \
CFG(CFG_DP_HW_CC_ENABLE) \
CFG(CFG_FORCE_RX_64_BA) \
CFG(CFG_DP_DELAY_MON_REPLENISH) \
CFG_DP_IPA_TX_RING_CFG

View File

@@ -913,6 +913,23 @@ wlan_soc_ipa_cfg_attach(struct cdp_ctrl_objmgr_psoc *psoc,
}
#endif
#ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
static void
wlan_soc_hw_cc_cfg_attach(struct cdp_ctrl_objmgr_psoc *psoc,
struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx)
{
wlan_cfg_ctx->hw_cc_enabled =
cfg_get(psoc, CFG_DP_HW_CC_ENABLE);
}
#else
static void
wlan_soc_hw_cc_cfg_attach(struct cdp_ctrl_objmgr_psoc *psoc,
struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx)
{
wlan_cfg_ctx->hw_cc_enabled = true;
}
#endif
/**
* wlan_cfg_soc_attach() - Allocate and prepare SoC configuration
* @psoc - Object manager psoc
@@ -1090,6 +1107,7 @@ wlan_cfg_soc_attach(struct cdp_ctrl_objmgr_psoc *psoc)
wlan_cfg_ctx->delay_mon_replenish = cfg_get(psoc,
CFG_DP_DELAY_MON_REPLENISH);
wlan_soc_ipa_cfg_attach(psoc, wlan_cfg_ctx);
wlan_soc_hw_cc_cfg_attach(psoc, wlan_cfg_ctx);
return wlan_cfg_ctx;
}

View File

@@ -323,6 +323,7 @@ struct wlan_cfg_dp_soc_ctxt {
uint32_t ipa_tx_ring_size;
uint32_t ipa_tx_comp_ring_size;
#endif
bool hw_cc_enabled;
};
/**