qcacmn: Enable TSO support for QCA8074/6290 DP

Add TSO support for transmit datapath of QCA8074/6290

Change-Id: Id7254859372aa43e3ed16f80c0240d1b78ae63cc
CRs-Fixed: 2004658
This commit is contained in:
Ishank Jain
2017-03-15 22:22:47 +05:30
committed by Sandeep Puligilla
parent 07ec569895
commit 5122f8fc48
6 changed files with 475 additions and 22 deletions

View File

@@ -195,6 +195,120 @@ static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
return htt_desc_size;
}
/**
* dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
* @tso_seg: TSO segment to process
* @ext_desc: Pointer to MSDU extension descriptor
*
* Return: void
*/
#if defined(FEATURE_TSO)
static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
void *ext_desc)
{
uint8_t num_frag;
uint32_t *buf_ptr;
uint32_t tso_flags;
/*
* Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
* tcp_flag_mask
*
* Checksum enable flags are set in TCL descriptor and not in Extension
* Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
*/
tso_flags = *(uint32_t *) &tso_seg->tso_flags;
hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
tso_seg->tso_flags.ip_len);
hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
uint32_t lo = 0;
uint32_t hi = 0;
qdf_dmaaddr_to_32s(
tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
tso_seg->tso_frags[num_frag].length);
}
return;
}
#else
static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
void *ext_desc)
{
return;
}
#endif
/**
* dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
* @vdev: virtual device handle
* @msdu: network buffer
* @msdu_info: meta data associated with the msdu
*
* Return: QDF_STATUS_SUCCESS success
*/
#if defined(FEATURE_TSO)
static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
{
struct qdf_tso_seg_elem_t *tso_seg;
int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
struct dp_soc *soc = vdev->pdev->soc;
struct qdf_tso_info_t *tso_info;
tso_info = &msdu_info->u.tso_info;
tso_info->curr_seg = NULL;
tso_info->tso_seg_list = NULL;
tso_info->num_segs = num_seg;
msdu_info->frm_type = dp_tx_frm_tso;
while (num_seg) {
tso_seg = dp_tx_tso_desc_alloc(
soc, msdu_info->tx_queue.desc_pool_id);
if (tso_seg) {
tso_seg->next = tso_info->tso_seg_list;
tso_info->tso_seg_list = tso_seg;
num_seg--;
} else {
struct qdf_tso_seg_elem_t *next_seg;
struct qdf_tso_seg_elem_t *free_seg =
tso_info->tso_seg_list;
while (free_seg) {
next_seg = free_seg->next;
dp_tx_tso_desc_free(soc,
msdu_info->tx_queue.desc_pool_id,
free_seg);
free_seg = next_seg;
}
return QDF_STATUS_E_NOMEM;
}
}
msdu_info->num_seg =
qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
tso_info->curr_seg = tso_info->tso_seg_list;
return QDF_STATUS_SUCCESS;
}
#else
static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
{
return QDF_STATUS_E_NOMEM;
}
#endif
/**
* dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
* @vdev: DP Vdev handle
@@ -216,6 +330,7 @@ struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
/* Allocate an extension descriptor */
msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
if (!msdu_ext_desc)
return NULL;
@@ -239,18 +354,24 @@ struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
seg_info->frags[i].len);
}
hal_tx_ext_desc_sync(&cached_ext_desc[0],
msdu_ext_desc->vaddr);
break;
case dp_tx_frm_tso:
/* Todo add support for TSO */
dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
&cached_ext_desc[0]);
break;
default:
break;
}
QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
hal_tx_ext_desc_sync(&cached_ext_desc[0],
msdu_ext_desc->vaddr);
return msdu_ext_desc;
}
@@ -362,7 +483,7 @@ failure:
}
/**
* dp_tx_desc_prepare- Allocate and prepare Tx descriptor for multisegment frame
* dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
* @vdev: DP vdev handle
* @nbuf: skb
* @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
@@ -395,6 +516,7 @@ static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
/* Allocate software Tx descriptor */
tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
if (!tx_desc)
return NULL;
@@ -564,8 +686,11 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
if (qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
|| qdf_nbuf_is_tso(tx_desc->nbuf)) {
hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
}
if (tid != HTT_TX_EXT_TID_INVALID)
hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
@@ -776,8 +901,17 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
if (msdu_info->u.tso_info.curr_seg->next) {
msdu_info->u.tso_info.curr_seg =
msdu_info->u.tso_info.curr_seg->next;
/*
* If this is a jumbo nbuf, then increment the number of
* nbuf users for each additional segment of the msdu.
* This will ensure that the skb is freed only after
* receiving tx completion for all segments of an nbuf
*/
qdf_nbuf_inc_users(nbuf);
/* Check with MCL if this is needed */
/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
/* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
}
}
@@ -1018,12 +1152,18 @@ qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
* SW and HW descriptors.
*/
if (qdf_nbuf_is_tso(nbuf)) {
/* dp_tx_prepare_tso(vdev, nbuf, &seg_info, &msdu_info); */
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"%s TSO frame %p\n", __func__, vdev);
DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
qdf_nbuf_len(nbuf));
if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s tso_prepare fail vdev_id:%d\n",
__func__, vdev->vdev_id);
return nbuf;
}
goto send_multiple;
}
@@ -1746,6 +1886,14 @@ QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
"%s MSDU Ext Desc Pool %d Free descs = %d\n",
__func__, num_pool, num_ext_desc);
for (i = 0; i < num_pool; i++) {
dp_tx_tso_desc_pool_free(soc, i);
}
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s TSO Desc Pool %d Free descs = %d\n",
__func__, num_pool, num_desc);
return QDF_STATUS_SUCCESS;
}
@@ -1799,6 +1947,20 @@ QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
"%s MSDU Ext Desc Alloc %d, descs = %d\n",
__func__, num_pool, num_ext_desc);
for (i = 0; i < num_pool; i++) {
if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"TSO Desc Pool alloc %d failed %p\n",
i, soc);
goto fail;
}
}
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s TSO Desc Alloc %d, descs = %d\n",
__func__, num_pool, num_desc);
/* Initialize descriptors in TCL Rings */
if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
for (i = 0; i < soc->num_tcl_data_rings; i++) {

View File

@@ -163,11 +163,6 @@ static inline QDF_STATUS dp_tx_flow_control(struct dp_vdev *vdev)
{
return QDF_STATUS_SUCCESS;
}
static inline QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
{
return QDF_STATUS_SUCCESS;
}
static inline qdf_nbuf_t dp_tx_prepare_me(struct dp_vdev *vdev,
qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
{

View File

@@ -240,6 +240,7 @@ QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
break;
}
soc->tx_ext_desc[pool_id].num_free = num_elem;
TX_DESC_LOCK_CREATE(&soc->tx_ext_desc[pool_id].lock);
return QDF_STATUS_SUCCESS;
@@ -279,3 +280,111 @@ QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
return QDF_STATUS_SUCCESS;
}
/**
* dp_tx_tso_desc_pool_alloc() - allocate tx tso descriptor pool
* @soc: Handle to DP SoC structure
* @pool_id: tso descriptor pool id
* @num_elem: number of element
*
* Return: QDF_STATUS_SUCCESS
*/
#if defined(FEATURE_TSO)
QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem)
{
int i;
struct qdf_tso_seg_elem_t *c_element;
struct qdf_tso_seg_elem_t *temp;
c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
if (!c_element) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
FL("Alloc Failed %p pool_id %d"),
soc, pool_id);
return QDF_STATUS_E_NOMEM;
}
soc->tx_tso_desc[pool_id].freelist = c_element;
for (i = 0; i < (num_elem - 1); i++) {
c_element->next =
qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
if (!c_element->next) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
FL("Alloc Failed %p pool_id %d"),
soc, pool_id);
goto fail;
}
c_element = c_element->next;
c_element->next = NULL;
}
soc->tx_tso_desc[pool_id].pool_size = num_elem;
TX_DESC_LOCK_CREATE(&soc->tx_tso_desc[pool_id].lock);
return QDF_STATUS_SUCCESS;
fail:
c_element = soc->tx_tso_desc[pool_id].freelist;
while (c_element) {
temp = c_element->next;
qdf_mem_free(c_element);
c_element = temp;
}
return QDF_STATUS_E_NOMEM;
}
#else
QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem)
{
return QDF_STATUS_SUCCESS;
}
#endif
/**
* dp_tx_tso_desc_pool_free() - free tx tso descriptor pool
* @soc: Handle to DP SoC structure
* @pool_id: extension descriptor pool id
*
* Return: NONE
*/
#if defined(FEATURE_TSO)
void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
{
int i;
struct qdf_tso_seg_elem_t *c_element;
struct qdf_tso_seg_elem_t *temp;
TX_DESC_LOCK_LOCK(&soc->tx_tso_desc[pool_id].lock);
c_element = soc->tx_tso_desc[pool_id].freelist;
if (!c_element) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
FL("Desc Pool Corrupt %d", pool_id);
return;
}
for (i = 0; i < soc->tx_tso_desc[pool_id].pool_size; i++) {
temp = c_element->next;
qdf_mem_free(c_element);
c_element = temp;
if (!c_element)
break;
}
soc->tx_tso_desc[pool_id].freelist = NULL;
soc->tx_tso_desc[pool_id].num_free = 0;
soc->tx_tso_desc[pool_id].pool_size = 0;
TX_DESC_LOCK_UNLOCK(&soc->tx_tso_desc[pool_id].lock);
TX_DESC_LOCK_DESTROY(&soc->tx_tso_desc[pool_id].lock);
return;
}
#else
void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
{
return;
}
#endif

View File

@@ -48,6 +48,9 @@ QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem);
QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem);
void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
/**
* dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
@@ -74,6 +77,7 @@ static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
soc->tx_desc[desc_pool_id].freelist =
soc->tx_desc[desc_pool_id].freelist->next;
soc->tx_desc[desc_pool_id].num_allocated++;
soc->tx_desc[desc_pool_id].num_free--;
}
DP_STATS_INC(soc, tx.desc_in_use, 1);
@@ -83,6 +87,53 @@ static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
return tx_desc;
}
/**
* dp_tx_desc_alloc_multiple() - Allocate batch of software Tx Descriptors
* from given pool
* @soc: Handle to DP SoC structure
* @pool_id: pool id should pick up
* @num_requested: number of required descriptor
*
* allocate multiple tx descriptor and make a link
*
* Return: h_desc first descriptor pointer
*/
static inline struct dp_tx_desc_s *dp_tx_desc_alloc_multiple(
struct dp_soc *soc, uint8_t desc_pool_id, uint8_t num_requested)
{
struct dp_tx_desc_s *c_desc = NULL, *h_desc = NULL;
uint8_t count;
TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
if ((num_requested == 0) ||
(soc->tx_desc[desc_pool_id].num_free < num_requested)) {
TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s, No Free Desc: Available(%d) num_requested(%d)",
__func__, soc->tx_desc[desc_pool_id].num_free,
num_requested);
return NULL;
}
h_desc = soc->tx_desc[desc_pool_id].freelist;
/* h_desc should never be NULL since num_free > requested */
qdf_assert_always(h_desc);
c_desc = h_desc;
for (count = 0; count < (num_requested - 1); count++) {
c_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
c_desc = c_desc->next;
}
soc->tx_desc[desc_pool_id].num_free -= count;
soc->tx_desc[desc_pool_id].num_allocated += count;
soc->tx_desc[desc_pool_id].freelist = c_desc->next;
c_desc->next = NULL;
TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
return h_desc;
}
/**
* dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
@@ -101,6 +152,9 @@ dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
tx_desc->next = soc->tx_desc[desc_pool_id].freelist;
soc->tx_desc[desc_pool_id].freelist = tx_desc;
DP_STATS_DEC(soc, tx.desc_in_use, 1);
soc->tx_desc[desc_pool_id].num_allocated--;
soc->tx_desc[desc_pool_id].num_free++;
TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
}
@@ -159,4 +213,94 @@ static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
TX_DESC_LOCK_UNLOCK(&soc->tx_ext_desc[desc_pool_id].lock);
return;
}
/**
* dp_tx_ext_desc_free_multiple() - Fee multiple tx extension descriptor and
* attach it to free list
* @soc: Handle to DP SoC structure
* @desc_pool_id: pool id should pick up
* @elem: tx descriptor should be freed
* @num_free: number of descriptors should be freed
*
* Return: none
*/
static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id,
uint8_t num_free)
{
struct dp_tx_ext_desc_elem_s *head, *tail, *c_elem;
uint8_t freed = num_free;
/* caller should always guarantee atleast list of num_free nodes */
qdf_assert_always(head);
head = elem;
c_elem = head;
tail = head;
while (c_elem && freed) {
tail = c_elem;
c_elem = c_elem->next;
freed--;
}
/* caller should always guarantee atleast list of num_free nodes */
qdf_assert_always(tail);
TX_DESC_LOCK_LOCK(&soc->tx_ext_desc[desc_pool_id].lock);
tail->next = soc->tx_ext_desc[desc_pool_id].freelist;
soc->tx_ext_desc[desc_pool_id].freelist = head;
soc->tx_ext_desc[desc_pool_id].num_free += num_free;
TX_DESC_LOCK_UNLOCK(&soc->tx_ext_desc[desc_pool_id].lock);
return;
}
/**
* dp_tx_tso_desc_alloc() - function to allocate a TSO segment
* @soc: device soc instance
* @pool_id: pool id should pick up tso descriptor
*
* Allocates a TSO segment element from the free list held in
* the soc
*
* Return: tso_seg, tso segment memory pointer
*/
static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
struct dp_soc *soc, uint8_t pool_id)
{
struct qdf_tso_seg_elem_t *tso_seg = NULL;
TX_DESC_LOCK_LOCK(&soc->tx_tso_desc[pool_id].lock);
if (soc->tx_tso_desc[pool_id].freelist) {
soc->tx_tso_desc[pool_id].num_free--;
tso_seg = soc->tx_tso_desc[pool_id].freelist;
soc->tx_tso_desc[pool_id].freelist =
soc->tx_tso_desc[pool_id].freelist->next;
}
TX_DESC_LOCK_UNLOCK(&soc->tx_tso_desc[pool_id].lock);
return tso_seg;
}
/**
* dp_tx_tso_desc_free() - function to free a TSO segment
* @soc: device soc instance
* @pool_id: pool id should pick up tso descriptor
* @tso_seg: tso segment memory pointer
*
* Returns a TSO segment element to the free list held in the
* HTT pdev
*
* Return: none
*/
static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
{
TX_DESC_LOCK_LOCK(&soc->tx_tso_desc[pool_id].lock);
tso_seg->next = soc->tx_tso_desc[pool_id].freelist;
soc->tx_tso_desc[pool_id].freelist = tso_seg;
soc->tx_tso_desc[pool_id].num_free++;
TX_DESC_LOCK_UNLOCK(&soc->tx_tso_desc[pool_id].lock);
}
#endif /* DP_TX_DESC_H */

View File

@@ -115,6 +115,7 @@ struct dp_tx_ext_desc_elem_s {
* struct dp_tx_ext_desc_s - Tx Extension Descriptor Pool
* @elem_count: Number of descriptors in the pool
* @elem_size: Size of each descriptor
* @num_free: Number of free descriptors
* @msdu_ext_desc: MSDU extension descriptor
* @desc_pages: multiple page allocation information for actual descriptors
* @link_elem_size: size of the link descriptor in cacheable memory used for
@@ -124,6 +125,7 @@ struct dp_tx_ext_desc_elem_s {
struct dp_tx_ext_desc_pool_s {
uint16_t elem_count;
int elem_size;
uint16_t num_free;
struct qdf_mem_multi_page_t desc_pages;
int link_elem_size;
struct qdf_mem_multi_page_t desc_link_pages;
@@ -168,31 +170,41 @@ struct dp_tx_desc_s {
void *me_buffer;
};
/**
* struct dp_tx_tso_seg_pool_s
* @pool_size: total number of pool elements
* @num_free: free element count
* @freelist: first free element pointer
* @lock: lock for accessing the pool
*/
struct dp_tx_tso_seg_pool_s {
uint16_t pool_size;
uint16_t num_free;
struct qdf_tso_seg_elem_t *freelist;
qdf_spinlock_t lock;
};
/**
* struct dp_tx_desc_pool_s - Tx Descriptor pool information
* @desc_reserved_size: Size to be reserved for housekeeping info
* in allocated memory for each descriptor
* @page_divider: Number of bits to shift to get page number from descriptor ID
* @offset_filter: Bit mask to get offset from descriptor ID
* @num_allocated: Number of allocated (in use) descriptors in the pool
* @elem_size: Size of each descriptor in the pool
* @elem_count: Total number of descriptors in the pool
* @num_allocated: Number of used descriptors
* @num_free: Number of free descriptors
* @freelist: Chain of free descriptors
* @desc_pages: multiple page allocation information
* @desc_pages: multiple page allocation information for actual descriptors
* @lock- Lock for descriptor allocation/free from/to the pool
*/
struct dp_tx_desc_pool_s {
uint16_t desc_reserved_size;
uint8_t page_divider;
uint32_t offset_filter;
uint32_t num_allocated;
uint16_t elem_size;
uint16_t elem_count;
uint32_t num_allocated;
uint32_t num_free;
struct dp_tx_desc_s *freelist;
struct qdf_mem_multi_page_t desc_pages;
qdf_spinlock_t lock;
};
struct dp_srng {
void *hal_srng;
void *base_vaddr_unaligned;
@@ -320,6 +332,9 @@ struct dp_soc {
/* Tx MSDU Extension descriptor pool */
struct dp_tx_ext_desc_pool_s tx_ext_desc[MAX_TXDESC_POOLS];
/* Tx TSO descriptor pool */
struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS];
/* Tx H/W queues lock */
qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];

View File

@@ -525,6 +525,20 @@ static inline void hal_tx_ext_desc_set_tso_enable(void *desc,
HAL_TX_SM(TX_MSDU_EXTENSION_0, TSO_ENABLE, tso_en);
}
/**
* hal_tx_ext_desc_set_tso_flags() - Set TSO Flags
* @desc: Handle to Tx MSDU Extension Descriptor
* @falgs: 32-bit word with all TSO flags consolidated
*
* Return: none
*/
static inline void hal_tx_ext_desc_set_tso_flags(void *desc,
uint32_t tso_flags)
{
HAL_SET_FLD_OFFSET(desc, TX_MSDU_EXTENSION_0, TSO_ENABLE, 0) =
tso_flags;
}
/**
* hal_tx_ext_desc_set_checksum_en() - Enable HW Checksum offload
* @desc: Handle to Tx MSDU Extension Descriptor
@@ -592,6 +606,20 @@ static inline void hal_tx_ext_desc_set_tcp_seq(void *desc,
((HAL_TX_SM(TX_MSDU_EXTENSION_2, TCP_SEQ_NUMBER, seq_num)));
}
/**
* hal_tx_ext_desc_set_ip_id() - Set IP Identification field
* @desc: Handle to Tx MSDU Extension Descriptor
* @id: IP Id field for the msdu, if tso is enabled
*
* Return: none
*/
static inline void hal_tx_ext_desc_set_ip_id(void *desc,
uint16_t id)
{
HAL_SET_FLD(desc, TX_MSDU_EXTENSION_3, IP_IDENTIFICATION) |=
((HAL_TX_SM(TX_MSDU_EXTENSION_3, IP_IDENTIFICATION, id)));
}
/**
* hal_tx_ext_desc_set_buffer() - Set Buffer Pointer and Length for a fragment
* @desc: Handle to Tx MSDU Extension Descriptor