
Adds support for flow control on convergence branch. Allocate Tx descriptors dynamically when vdev comes up. Tx queue is paused and unpaused internally in host based on the stop and start thresholds. Changes are added under compilation flag QCA_LL_TX_FLOW_CONTROL_V2. Change-Id: I0ccb80b0099f39efad52ccd7d47f2709fdee2a93 CRs-Fixed: 2040457
509 行
15 KiB
C
509 行
15 KiB
C
/*
|
|
* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
|
|
*
|
|
* Permission to use, copy, modify, and/or distribute this software for
|
|
* any purpose with or without fee is hereby granted, provided that the
|
|
* above copyright notice and this permission notice appear in all
|
|
* copies.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
|
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
|
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
|
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
|
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
|
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
|
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
|
* PERFORMANCE OF THIS SOFTWARE.
|
|
*/
|
|
|
|
#include "dp_types.h"
|
|
#include "dp_tx_desc.h"
|
|
|
|
#ifndef DESC_PARTITION
|
|
#define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
|
|
#define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
|
|
do { \
|
|
uint8_t sig_bit; \
|
|
soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
|
|
/* Calculate page divider to find page number */ \
|
|
sig_bit = 0; \
|
|
while (num_desc_per_page) { \
|
|
sig_bit++; \
|
|
num_desc_per_page = num_desc_per_page >> 1; \
|
|
} \
|
|
soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
|
|
} while (0)
|
|
#else
|
|
#define DP_TX_DESC_SIZE(a) a
|
|
#define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
|
|
#endif /* DESC_PARTITION */
|
|
|
|
/**
|
|
* dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
|
|
* @soc Handle to DP SoC structure
|
|
* @num_pool Number of pools to allocate
|
|
* @num_elem Number of descriptor elements per pool
|
|
*
|
|
* This function allocates memory for SW tx descriptors
|
|
* (used within host for tx data path).
|
|
* The number of tx descriptors required will be large
|
|
* since based on number of clients (1024 clients x 3 radios),
|
|
* outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
|
|
* large.
|
|
*
|
|
* To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
|
|
* function to allocate memory
|
|
* in multiple pages. It then iterates through the memory allocated across pages
|
|
* and links each descriptor
|
|
* to next descriptor, taking care of page boundaries.
|
|
*
|
|
* Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
|
|
* one for each ring;
|
|
* This minimizes lock contention when hard_start_xmit is called
|
|
* from multiple CPUs.
|
|
* Alternately, multiple pools can be used for multiple VDEVs for VDEV level
|
|
* flow control.
|
|
*
|
|
* Return: Status code. 0 for success.
|
|
*/
|
|
QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
|
|
uint16_t num_elem)
|
|
{
|
|
uint32_t id, count, page_id, offset, pool_id_32;
|
|
uint16_t num_page, num_desc_per_page;
|
|
struct dp_tx_desc_s *tx_desc_elem;
|
|
uint32_t desc_size;
|
|
struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
|
|
|
|
desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
|
|
tx_desc_pool->elem_size = desc_size;
|
|
qdf_mem_multi_pages_alloc(soc->osdev,
|
|
&tx_desc_pool->desc_pages, desc_size, num_elem,
|
|
0, true);
|
|
if (!tx_desc_pool->desc_pages.num_pages) {
|
|
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
|
"Multi page alloc fail, tx desc");
|
|
goto fail_exit;
|
|
}
|
|
|
|
|
|
num_page = tx_desc_pool->desc_pages.num_pages;
|
|
num_desc_per_page =
|
|
tx_desc_pool->desc_pages.num_element_per_page;
|
|
tx_desc_pool->freelist = (struct dp_tx_desc_s *)
|
|
*tx_desc_pool->desc_pages.cacheable_pages;
|
|
if (qdf_mem_multi_page_link(soc->osdev,
|
|
&tx_desc_pool->desc_pages, desc_size, num_elem, true)) {
|
|
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
|
"invalid tx desc allocation - overflow num link");
|
|
goto free_tx_desc;
|
|
}
|
|
|
|
/* Set unique IDs for each Tx descriptor */
|
|
tx_desc_elem = tx_desc_pool->freelist;
|
|
count = 0;
|
|
pool_id_32 = (uint32_t)pool_id;
|
|
while (tx_desc_elem) {
|
|
page_id = count / num_desc_per_page;
|
|
offset = count % num_desc_per_page;
|
|
id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
|
|
(page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
|
|
|
|
tx_desc_elem->id = id;
|
|
tx_desc_elem->pool_id = pool_id;
|
|
tx_desc_elem = tx_desc_elem->next;
|
|
count++;
|
|
}
|
|
|
|
TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
|
|
return QDF_STATUS_SUCCESS;
|
|
|
|
free_tx_desc:
|
|
qdf_mem_multi_pages_free(soc->osdev,
|
|
&tx_desc_pool->desc_pages, 0, true);
|
|
|
|
fail_exit:
|
|
return QDF_STATUS_E_FAULT;
|
|
}
|
|
|
|
/**
|
|
* dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors
|
|
*
|
|
* @soc Handle to DP SoC structure
|
|
* @pool_id
|
|
*
|
|
* Return:
|
|
*/
|
|
QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
|
|
{
|
|
struct dp_tx_desc_pool_s *tx_desc_pool =
|
|
&((soc)->tx_desc[(pool_id)]);
|
|
|
|
qdf_mem_multi_pages_free(soc->osdev,
|
|
&tx_desc_pool->desc_pages, 0, true);
|
|
TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
|
|
return QDF_STATUS_SUCCESS;
|
|
}
|
|
|
|
/**
|
|
* dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool
|
|
* @soc Handle to DP SoC structure
|
|
* @pool_id
|
|
*
|
|
* Return: NONE
|
|
*/
|
|
QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
|
|
uint16_t num_elem)
|
|
{
|
|
uint16_t num_page;
|
|
uint32_t count;
|
|
struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
|
|
struct qdf_mem_dma_page_t *page_info;
|
|
struct qdf_mem_multi_page_t *pages;
|
|
QDF_STATUS status;
|
|
|
|
/* Coherent tx extension descriptor alloc */
|
|
soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
|
|
soc->tx_ext_desc[pool_id].elem_count = num_elem;
|
|
qdf_mem_multi_pages_alloc(soc->osdev,
|
|
&soc->tx_ext_desc[pool_id].desc_pages,
|
|
soc->tx_ext_desc[pool_id].elem_size,
|
|
soc->tx_ext_desc[pool_id].elem_count,
|
|
qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
|
|
false);
|
|
if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) {
|
|
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
|
"ext desc page alloc fail");
|
|
status = QDF_STATUS_E_NOMEM;
|
|
goto fail_exit;
|
|
}
|
|
|
|
num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages;
|
|
/*
|
|
* Cacheable ext descriptor link alloc
|
|
* This structure also large size already
|
|
* single element is 24bytes, 2K elements are 48Kbytes
|
|
* Have to alloc multi page cacheable memory
|
|
*/
|
|
soc->tx_ext_desc[pool_id].link_elem_size =
|
|
sizeof(struct dp_tx_ext_desc_elem_s);
|
|
qdf_mem_multi_pages_alloc(soc->osdev,
|
|
&soc->tx_ext_desc[pool_id].desc_link_pages,
|
|
soc->tx_ext_desc[pool_id].link_elem_size,
|
|
soc->tx_ext_desc[pool_id].elem_count, 0,
|
|
true);
|
|
if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) {
|
|
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
|
"ext link desc page alloc fail");
|
|
status = QDF_STATUS_E_NOMEM;
|
|
goto free_ext_desc_page;
|
|
}
|
|
|
|
/* link tx descriptors into a freelist */
|
|
soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *)
|
|
*soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages;
|
|
if (qdf_mem_multi_page_link(soc->osdev,
|
|
&soc->tx_ext_desc[pool_id].desc_link_pages,
|
|
soc->tx_ext_desc[pool_id].link_elem_size,
|
|
soc->tx_ext_desc[pool_id].elem_count, true)) {
|
|
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
|
"ext link desc page linking fail");
|
|
status = QDF_STATUS_E_FAULT;
|
|
goto free_ext_link_desc_page;
|
|
}
|
|
|
|
/* Assign coherent memory pointer into linked free list */
|
|
pages = &soc->tx_ext_desc[pool_id].desc_pages;
|
|
page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages;
|
|
c_elem = soc->tx_ext_desc[pool_id].freelist;
|
|
p_elem = c_elem;
|
|
for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) {
|
|
if (!(count % pages->num_element_per_page)) {
|
|
/**
|
|
* First element for new page,
|
|
* should point next page
|
|
*/
|
|
if (!pages->dma_pages->page_v_addr_start) {
|
|
QDF_TRACE(QDF_MODULE_ID_DP,
|
|
QDF_TRACE_LEVEL_ERROR,
|
|
"link over flow");
|
|
status = QDF_STATUS_E_FAULT;
|
|
goto free_ext_link_desc_page;
|
|
}
|
|
c_elem->vaddr = (void *)page_info->page_v_addr_start;
|
|
c_elem->paddr = page_info->page_p_addr;
|
|
page_info++;
|
|
} else {
|
|
c_elem->vaddr = (void *)(p_elem->vaddr +
|
|
soc->tx_ext_desc[pool_id].elem_size);
|
|
c_elem->paddr = (p_elem->paddr +
|
|
soc->tx_ext_desc[pool_id].elem_size);
|
|
}
|
|
p_elem = c_elem;
|
|
c_elem = c_elem->next;
|
|
if (!c_elem)
|
|
break;
|
|
}
|
|
|
|
soc->tx_ext_desc[pool_id].num_free = num_elem;
|
|
TX_DESC_LOCK_CREATE(&soc->tx_ext_desc[pool_id].lock);
|
|
return QDF_STATUS_SUCCESS;
|
|
|
|
free_ext_link_desc_page:
|
|
qdf_mem_multi_pages_free(soc->osdev,
|
|
&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
|
|
|
|
free_ext_desc_page:
|
|
qdf_mem_multi_pages_free(soc->osdev,
|
|
&soc->tx_ext_desc[pool_id].desc_pages,
|
|
qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
|
|
false);
|
|
|
|
fail_exit:
|
|
return status;
|
|
|
|
}
|
|
|
|
/**
|
|
* dp_tx_ext_desc_pool_free() - free tx ext descriptor pool
|
|
* @soc: Handle to DP SoC structure
|
|
* @pool_id: extension descriptor pool id
|
|
*
|
|
* Return: NONE
|
|
*/
|
|
QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
|
|
{
|
|
qdf_mem_multi_pages_free(soc->osdev,
|
|
&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
|
|
|
|
qdf_mem_multi_pages_free(soc->osdev,
|
|
&soc->tx_ext_desc[pool_id].desc_pages,
|
|
qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
|
|
false);
|
|
|
|
TX_DESC_LOCK_DESTROY(&soc->tx_ext_desc[pool_id].lock);
|
|
return QDF_STATUS_SUCCESS;
|
|
}
|
|
|
|
/**
|
|
* dp_tx_tso_desc_pool_alloc() - allocate tx tso descriptor pool
|
|
* @soc: Handle to DP SoC structure
|
|
* @pool_id: tso descriptor pool id
|
|
* @num_elem: number of element
|
|
*
|
|
* Return: QDF_STATUS_SUCCESS
|
|
*/
|
|
#if defined(FEATURE_TSO)
|
|
QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
|
|
uint16_t num_elem)
|
|
{
|
|
int i;
|
|
struct qdf_tso_seg_elem_t *c_element;
|
|
struct qdf_tso_seg_elem_t *temp;
|
|
|
|
soc->tx_tso_desc[pool_id].num_free = 0;
|
|
c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
|
|
|
|
if (!c_element) {
|
|
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
|
FL("Alloc Failed %p pool_id %d"),
|
|
soc, pool_id);
|
|
return QDF_STATUS_E_NOMEM;
|
|
}
|
|
|
|
soc->tx_tso_desc[pool_id].freelist = c_element;
|
|
soc->tx_tso_desc[pool_id].num_free++;
|
|
for (i = 0; i < (num_elem - 1); i++) {
|
|
c_element->next =
|
|
qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
|
|
if (!c_element->next) {
|
|
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
|
FL("Alloc Failed %p pool_id %d"),
|
|
soc, pool_id);
|
|
goto fail;
|
|
}
|
|
|
|
soc->tx_tso_desc[pool_id].num_free++;
|
|
c_element = c_element->next;
|
|
c_element->next = NULL;
|
|
|
|
}
|
|
TSO_DEBUG("Number of free descriptors: %u\n",
|
|
soc->tx_tso_desc[pool_id].num_free);
|
|
soc->tx_tso_desc[pool_id].pool_size = num_elem;
|
|
TX_DESC_LOCK_CREATE(&soc->tx_tso_desc[pool_id].lock);
|
|
|
|
return QDF_STATUS_SUCCESS;
|
|
|
|
fail:
|
|
c_element = soc->tx_tso_desc[pool_id].freelist;
|
|
while (c_element) {
|
|
temp = c_element->next;
|
|
qdf_mem_free(c_element);
|
|
c_element = temp;
|
|
}
|
|
|
|
return QDF_STATUS_E_NOMEM;
|
|
}
|
|
|
|
/**
|
|
* dp_tx_tso_desc_pool_free() - free tx tso descriptor pool
|
|
* @soc: Handle to DP SoC structure
|
|
* @pool_id: extension descriptor pool id
|
|
*
|
|
* Return: NONE
|
|
*/
|
|
void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
|
|
{
|
|
int i;
|
|
struct qdf_tso_seg_elem_t *c_element;
|
|
struct qdf_tso_seg_elem_t *temp;
|
|
|
|
TX_DESC_LOCK_LOCK(&soc->tx_tso_desc[pool_id].lock);
|
|
c_element = soc->tx_tso_desc[pool_id].freelist;
|
|
|
|
if (!c_element) {
|
|
TX_DESC_LOCK_UNLOCK(&soc->tx_tso_desc[pool_id].lock);
|
|
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
|
FL("Desc Pool Corrupt %d"), pool_id);
|
|
return;
|
|
}
|
|
|
|
for (i = 0; i < soc->tx_tso_desc[pool_id].pool_size; i++) {
|
|
temp = c_element->next;
|
|
qdf_mem_free(c_element);
|
|
c_element = temp;
|
|
if (!c_element)
|
|
break;
|
|
}
|
|
|
|
soc->tx_tso_desc[pool_id].freelist = NULL;
|
|
soc->tx_tso_desc[pool_id].num_free = 0;
|
|
soc->tx_tso_desc[pool_id].pool_size = 0;
|
|
TX_DESC_LOCK_UNLOCK(&soc->tx_tso_desc[pool_id].lock);
|
|
TX_DESC_LOCK_DESTROY(&soc->tx_tso_desc[pool_id].lock);
|
|
return;
|
|
}
|
|
/**
|
|
* dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
|
|
* fragments in each tso segment
|
|
*
|
|
* @soc: handle to dp soc structure
|
|
* @pool_id: descriptor pool id
|
|
* @num_elem: total number of descriptors to be allocated
|
|
*/
|
|
QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
|
|
uint16_t num_elem)
|
|
{
|
|
|
|
int i;
|
|
struct qdf_tso_num_seg_elem_t *c_element;
|
|
struct qdf_tso_num_seg_elem_t *temp;
|
|
|
|
soc->tx_tso_num_seg[pool_id].num_free = 0;
|
|
c_element = qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
|
|
|
|
if (!c_element) {
|
|
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
|
FL("Alloc Failed %p pool_id %d"),
|
|
soc, pool_id);
|
|
return QDF_STATUS_E_NOMEM;
|
|
}
|
|
|
|
soc->tx_tso_num_seg[pool_id].freelist = c_element;
|
|
soc->tx_tso_num_seg[pool_id].num_free++;
|
|
for (i = 0; i < (num_elem - 1); i++) {
|
|
c_element->next =
|
|
qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
|
|
|
|
if (!c_element->next) {
|
|
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
|
FL("Alloc Failed %p pool_id %d"),
|
|
soc, pool_id);
|
|
goto fail;
|
|
}
|
|
soc->tx_tso_num_seg[pool_id].num_free++;
|
|
|
|
c_element = c_element->next;
|
|
c_element->next = NULL;
|
|
}
|
|
|
|
soc->tx_tso_num_seg[pool_id].num_seg_pool_size = num_elem;
|
|
TX_DESC_LOCK_CREATE(&soc->tx_tso_num_seg[pool_id].lock);
|
|
|
|
return QDF_STATUS_SUCCESS;
|
|
|
|
fail:
|
|
c_element = soc->tx_tso_num_seg[pool_id].freelist;
|
|
while (c_element) {
|
|
temp = c_element->next;
|
|
qdf_mem_free(c_element);
|
|
c_element = temp;
|
|
}
|
|
return QDF_STATUS_E_NOMEM;
|
|
}
|
|
|
|
/**
|
|
* dp_tx_tso_num_seg_pool_free() - free pool of descriptors that tracks
|
|
* the fragments in tso segment
|
|
*
|
|
*
|
|
* @soc: handle to dp soc structure
|
|
* @pool_id: descriptor pool_id
|
|
*/
|
|
void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
|
|
{
|
|
int i;
|
|
struct qdf_tso_num_seg_elem_t *c_element;
|
|
struct qdf_tso_num_seg_elem_t *temp;
|
|
|
|
TX_DESC_LOCK_LOCK(&soc->tx_tso_num_seg[pool_id].lock);
|
|
c_element = soc->tx_tso_num_seg[pool_id].freelist;
|
|
|
|
if (!c_element) {
|
|
TX_DESC_LOCK_UNLOCK(&soc->tx_tso_desc[pool_id].lock);
|
|
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
|
FL("Desc Pool Corrupt %d"), pool_id);
|
|
return;
|
|
}
|
|
|
|
for (i = 0; i < soc->tx_tso_num_seg[pool_id].num_seg_pool_size; i++) {
|
|
temp = c_element->next;
|
|
qdf_mem_free(c_element);
|
|
c_element = temp;
|
|
if (!c_element)
|
|
break;
|
|
}
|
|
|
|
soc->tx_tso_num_seg[pool_id].freelist = NULL;
|
|
soc->tx_tso_num_seg[pool_id].num_free = 0;
|
|
soc->tx_tso_num_seg[pool_id].num_seg_pool_size = 0;
|
|
TX_DESC_LOCK_UNLOCK(&soc->tx_tso_num_seg[pool_id].lock);
|
|
TX_DESC_LOCK_DESTROY(&soc->tx_tso_num_seg[pool_id].lock);
|
|
return;
|
|
}
|
|
|
|
#else
|
|
QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
|
|
uint16_t num_elem)
|
|
{
|
|
return QDF_STATUS_SUCCESS;
|
|
}
|
|
|
|
void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
|
|
{
|
|
return;
|
|
}
|
|
|
|
QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
|
|
uint16_t num_elem)
|
|
{
|
|
return QDF_STATUS_SUCCESS;
|
|
}
|
|
|
|
void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
|
|
{
|
|
return;
|
|
}
|
|
#endif
|