qcacmn: TSO fixes

1. Unmap tso common segment only after receiving
   completions for all the tso segments for a given
   skb.
2. Keep a track of num of free tso descriptors available
   in the pool of tso descriptors.

Change-Id: I01bdbb9e40b7259f77dbcfeec22c6d8cd0c0a6dd
CRs-Fixed: 2042950
This commit is contained in:
Venkata Sharath Chandra Manchala
2017-04-06 15:30:54 -07:00
committed by snandini
parent ae66cda533
commit 35503cce26
7 changed files with 367 additions and 32 deletions

View File

@@ -77,6 +77,57 @@ static inline void dp_tx_get_queue(struct dp_vdev *vdev,
return; return;
} }
#if defined(FEATURE_TSO)
/**
* dp_tx_tso_desc_release() - Release the tso segment
* after unmapping all the fragments
*
* @pdev - physical device handle
* @tx_desc - Tx software descriptor
*/
static void dp_tx_tso_desc_release(struct dp_soc *soc,
struct dp_tx_desc_s *tx_desc)
{
TSO_DEBUG("%s: Free the tso descriptor", __func__);
if (qdf_unlikely(tx_desc->tso_desc == NULL)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s %d TSO desc is NULL!",
__func__, __LINE__);
qdf_assert(0);
} else if (qdf_unlikely(tx_desc->tso_num_desc == NULL)) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s %d TSO common info is NULL!",
__func__, __LINE__);
qdf_assert(0);
} else {
struct qdf_tso_num_seg_elem_t *tso_num_desc =
(struct qdf_tso_num_seg_elem_t *) tx_desc->tso_num_desc;
if (tso_num_desc->num_seg.tso_cmn_num_seg > 1) {
tso_num_desc->num_seg.tso_cmn_num_seg--;
qdf_nbuf_unmap_tso_segment(soc->osdev,
tx_desc->tso_desc, false);
} else {
tso_num_desc->num_seg.tso_cmn_num_seg--;
qdf_assert(tso_num_desc->num_seg.tso_cmn_num_seg == 0);
qdf_nbuf_unmap_tso_segment(soc->osdev,
tx_desc->tso_desc, true);
dp_tso_num_seg_free(soc, tx_desc->pool_id,
tx_desc->tso_num_desc);
tx_desc->tso_num_desc = NULL;
}
dp_tx_tso_desc_free(soc,
tx_desc->pool_id, tx_desc->tso_desc);
tx_desc->tso_desc = NULL;
}
}
#else
static void dp_tx_tso_desc_release(struct dp_soc *soc,
struct dp_tx_desc_s *tx_desc)
{
return;
}
#endif
/** /**
* dp_tx_desc_release() - Release Tx Descriptor * dp_tx_desc_release() - Release Tx Descriptor
* @tx_desc : Tx Descriptor * @tx_desc : Tx Descriptor
@@ -98,6 +149,9 @@ dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
soc = pdev->soc; soc = pdev->soc;
if (tx_desc->frm_type == dp_tx_frm_tso)
dp_tx_tso_desc_release(soc, tx_desc);
if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id); dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
@@ -223,6 +277,56 @@ static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
} }
#endif #endif
#if defined(FEATURE_TSO)
/**
* dp_tx_free_tso_seg() - Loop through the tso segments
* allocated and free them
*
* @soc: soc handle
* @free_seg: list of tso segments
* @msdu_info: msdu descriptor
*
* Return - void
*/
static void dp_tx_free_tso_seg(struct dp_soc *soc,
struct qdf_tso_seg_elem_t *free_seg,
struct dp_tx_msdu_info_s *msdu_info)
{
struct qdf_tso_seg_elem_t *next_seg;
while (free_seg) {
next_seg = free_seg->next;
dp_tx_tso_desc_free(soc,
msdu_info->tx_queue.desc_pool_id,
free_seg);
free_seg = next_seg;
}
}
/**
* dp_tx_free_tso_num_seg() - Loop through the tso num segments
* allocated and free them
*
* @soc: soc handle
* @free_seg: list of tso segments
* @msdu_info: msdu descriptor
* Return - void
*/
static void dp_tx_free_tso_num_seg(struct dp_soc *soc,
struct qdf_tso_num_seg_elem_t *free_seg,
struct dp_tx_msdu_info_s *msdu_info)
{
struct qdf_tso_num_seg_elem_t *next_seg;
while (free_seg) {
next_seg = free_seg->next;
dp_tso_num_seg_free(soc,
msdu_info->tx_queue.desc_pool_id,
free_seg);
free_seg = next_seg;
}
}
/** /**
* dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
* @vdev: virtual device handle * @vdev: virtual device handle
@@ -231,7 +335,6 @@ static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
* *
* Return: QDF_STATUS_SUCCESS success * Return: QDF_STATUS_SUCCESS success
*/ */
#if defined(FEATURE_TSO)
static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev, static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info) qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
{ {
@@ -239,12 +342,16 @@ static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
int num_seg = qdf_nbuf_get_tso_num_seg(msdu); int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
struct dp_soc *soc = vdev->pdev->soc; struct dp_soc *soc = vdev->pdev->soc;
struct qdf_tso_info_t *tso_info; struct qdf_tso_info_t *tso_info;
struct qdf_tso_num_seg_elem_t *tso_num_seg;
tso_info = &msdu_info->u.tso_info; tso_info = &msdu_info->u.tso_info;
tso_info->curr_seg = NULL; tso_info->curr_seg = NULL;
tso_info->tso_seg_list = NULL; tso_info->tso_seg_list = NULL;
tso_info->num_segs = num_seg; tso_info->num_segs = num_seg;
msdu_info->frm_type = dp_tx_frm_tso; msdu_info->frm_type = dp_tx_frm_tso;
tso_info->tso_num_seg_list = NULL;
TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
while (num_seg) { while (num_seg) {
tso_seg = dp_tx_tso_desc_alloc( tso_seg = dp_tx_tso_desc_alloc(
@@ -254,24 +361,49 @@ static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
tso_info->tso_seg_list = tso_seg; tso_info->tso_seg_list = tso_seg;
num_seg--; num_seg--;
} else { } else {
struct qdf_tso_seg_elem_t *next_seg;
struct qdf_tso_seg_elem_t *free_seg = struct qdf_tso_seg_elem_t *free_seg =
tso_info->tso_seg_list; tso_info->tso_seg_list;
while (free_seg) { dp_tx_free_tso_seg(soc, free_seg, msdu_info);
next_seg = free_seg->next;
dp_tx_tso_desc_free(soc,
msdu_info->tx_queue.desc_pool_id,
free_seg);
free_seg = next_seg;
}
return QDF_STATUS_E_NOMEM; return QDF_STATUS_E_NOMEM;
} }
} }
TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
tso_num_seg = dp_tso_num_seg_alloc(soc,
msdu_info->tx_queue.desc_pool_id);
if (tso_num_seg) {
tso_num_seg->next = tso_info->tso_num_seg_list;
tso_info->tso_num_seg_list = tso_num_seg;
} else {
/* Bug: free tso_num_seg and tso_seg */
/* Free the already allocated num of segments */
struct qdf_tso_seg_elem_t *free_seg =
tso_info->tso_seg_list;
TSO_DEBUG(" %s: Failed alloc - Number of segs for a TSO packet",
__func__);
dp_tx_free_tso_seg(soc, free_seg, msdu_info);
return QDF_STATUS_E_NOMEM;
}
msdu_info->num_seg = msdu_info->num_seg =
qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info); qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
msdu_info->num_seg);
if (!(msdu_info->num_seg)) {
dp_tx_free_tso_seg(soc, tso_info->tso_seg_list, msdu_info);
dp_tx_free_tso_num_seg(soc, tso_info->tso_num_seg_list,
msdu_info);
return QDF_STATUS_E_INVAL;
}
tso_info->curr_seg = tso_info->tso_seg_list; tso_info->curr_seg = tso_info->tso_seg_list;
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
@@ -543,6 +675,8 @@ static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
tx_desc->vdev = vdev; tx_desc->vdev = vdev;
tx_desc->pdev = pdev; tx_desc->pdev = pdev;
tx_desc->pkt_offset = 0; tx_desc->pkt_offset = 0;
tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
/* Handle scattered frames - TSO/SG/ME */ /* Handle scattered frames - TSO/SG/ME */
/* Allocate and prepare an extension descriptor for scattered frames */ /* Allocate and prepare an extension descriptor for scattered frames */
@@ -960,7 +1094,7 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
nbuf = msdu_info->u.sg_info.curr_seg->nbuf; nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
i = 0; i = 0;
/* Print statement to track i and num_seg */
/* /*
* For each segment (maps to 1 MSDU) , prepare software and hardware * For each segment (maps to 1 MSDU) , prepare software and hardware
* descriptors using information in msdu_info * descriptors using information in msdu_info
@@ -2159,6 +2293,15 @@ QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
"%s TSO Desc Pool %d Free descs = %d\n", "%s TSO Desc Pool %d Free descs = %d\n",
__func__, num_pool, num_desc); __func__, num_pool, num_desc);
for (i = 0; i < num_pool; i++)
dp_tx_tso_num_seg_pool_free(soc, i);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s TSO Num of seg Desc Pool %d Free descs = %d\n",
__func__, num_pool, num_desc);
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
} }
@@ -2226,6 +2369,20 @@ QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
"%s TSO Desc Alloc %d, descs = %d\n", "%s TSO Desc Alloc %d, descs = %d\n",
__func__, num_pool, num_desc); __func__, num_pool, num_desc);
for (i = 0; i < num_pool; i++) {
if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"TSO Num of seg Pool alloc %d failed %p\n",
i, soc);
goto fail;
}
}
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s TSO Num of seg pool Alloc %d, descs = %d\n",
__func__, num_pool, num_desc);
/* Initialize descriptors in TCL Rings */ /* Initialize descriptors in TCL Rings */
if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
for (i = 0; i < soc->num_tcl_data_rings; i++) { for (i = 0; i < soc->num_tcl_data_rings; i++) {

View File

@@ -296,6 +296,7 @@ QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
struct qdf_tso_seg_elem_t *c_element; struct qdf_tso_seg_elem_t *c_element;
struct qdf_tso_seg_elem_t *temp; struct qdf_tso_seg_elem_t *temp;
soc->tx_tso_desc[pool_id].num_free = 0;
c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t)); c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
if (!c_element) { if (!c_element) {
@@ -306,10 +307,10 @@ QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
} }
soc->tx_tso_desc[pool_id].freelist = c_element; soc->tx_tso_desc[pool_id].freelist = c_element;
soc->tx_tso_desc[pool_id].num_free++;
for (i = 0; i < (num_elem - 1); i++) { for (i = 0; i < (num_elem - 1); i++) {
c_element->next = c_element->next =
qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t)); qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
if (!c_element->next) { if (!c_element->next) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
FL("Alloc Failed %p pool_id %d"), FL("Alloc Failed %p pool_id %d"),
@@ -317,10 +318,13 @@ QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
goto fail; goto fail;
} }
soc->tx_tso_desc[pool_id].num_free++;
c_element = c_element->next; c_element = c_element->next;
c_element->next = NULL; c_element->next = NULL;
}
}
TSO_DEBUG("Number of free descriptors: %u\n",
soc->tx_tso_desc[pool_id].num_free);
soc->tx_tso_desc[pool_id].pool_size = num_elem; soc->tx_tso_desc[pool_id].pool_size = num_elem;
TX_DESC_LOCK_CREATE(&soc->tx_tso_desc[pool_id].lock); TX_DESC_LOCK_CREATE(&soc->tx_tso_desc[pool_id].lock);
@@ -336,13 +340,6 @@ fail:
return QDF_STATUS_E_NOMEM; return QDF_STATUS_E_NOMEM;
} }
#else
QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem)
{
return QDF_STATUS_SUCCESS;
}
#endif
/** /**
* dp_tx_tso_desc_pool_free() - free tx tso descriptor pool * dp_tx_tso_desc_pool_free() - free tx tso descriptor pool
@@ -351,7 +348,6 @@ QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
* *
* Return: NONE * Return: NONE
*/ */
#if defined(FEATURE_TSO)
void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id) void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
{ {
int i; int i;
@@ -382,9 +378,124 @@ void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
TX_DESC_LOCK_DESTROY(&soc->tx_tso_desc[pool_id].lock); TX_DESC_LOCK_DESTROY(&soc->tx_tso_desc[pool_id].lock);
return; return;
} }
/**
* dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
* fragments in each tso segment
*
* @soc: handle to dp soc structure
* @pool_id: descriptor pool id
* @num_elem: total number of descriptors to be allocated
*/
QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem)
{
int i;
struct qdf_tso_num_seg_elem_t *c_element;
struct qdf_tso_num_seg_elem_t *temp;
soc->tx_tso_num_seg[pool_id].num_free = 0;
c_element = qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
if (!c_element) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
FL("Alloc Failed %p pool_id %d"),
soc, pool_id);
return QDF_STATUS_E_NOMEM;
}
soc->tx_tso_num_seg[pool_id].freelist = c_element;
soc->tx_tso_num_seg[pool_id].num_free++;
for (i = 0; i < (num_elem - 1); i++) {
c_element->next =
qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
if (!c_element->next) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
FL("Alloc Failed %p pool_id %d"),
soc, pool_id);
goto fail;
}
soc->tx_tso_num_seg[pool_id].num_free++;
c_element = c_element->next;
c_element->next = NULL;
}
soc->tx_tso_num_seg[pool_id].num_seg_pool_size = num_elem;
TX_DESC_LOCK_CREATE(&soc->tx_tso_num_seg[pool_id].lock);
return QDF_STATUS_SUCCESS;
fail:
c_element = soc->tx_tso_num_seg[pool_id].freelist;
while (c_element) {
temp = c_element->next;
qdf_mem_free(c_element);
c_element = temp;
}
return QDF_STATUS_E_NOMEM;
}
/**
* dp_tx_tso_num_seg_pool_free() - free pool of descriptors that tracks
* the fragments in tso segment
*
*
* @soc: handle to dp soc structure
* @pool_id: descriptor pool_id
*/
void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
{
int i;
struct qdf_tso_num_seg_elem_t *c_element;
struct qdf_tso_num_seg_elem_t *temp;
TX_DESC_LOCK_LOCK(&soc->tx_tso_num_seg[pool_id].lock);
c_element = soc->tx_tso_num_seg[pool_id].freelist;
if (!c_element) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
FL("Desc Pool Corrupt %d"), pool_id);
return;
}
for (i = 0; i < soc->tx_tso_num_seg[pool_id].num_seg_pool_size; i++) {
temp = c_element->next;
qdf_mem_free(c_element);
c_element = temp;
if (!c_element)
break;
}
soc->tx_tso_num_seg[pool_id].freelist = NULL;
soc->tx_tso_num_seg[pool_id].num_free = 0;
soc->tx_tso_num_seg[pool_id].num_seg_pool_size = 0;
TX_DESC_LOCK_UNLOCK(&soc->tx_tso_num_seg[pool_id].lock);
TX_DESC_LOCK_DESTROY(&soc->tx_tso_num_seg[pool_id].lock);
return;
}
#else #else
QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem)
{
return QDF_STATUS_SUCCESS;
}
void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id) void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
{ {
return; return;
} }
QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem)
{
return QDF_STATUS_SUCCESS;
}
void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
{
return;
}
#endif #endif

View File

@@ -52,7 +52,9 @@ QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem); uint16_t num_elem);
void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id); void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem);
void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
/** /**
* dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
* *
@@ -256,6 +258,7 @@ static inline void dp_tx_ext_desc_free_multiple(struct dp_soc *soc,
return; return;
} }
#if defined(FEATURE_TSO)
/** /**
* dp_tx_tso_desc_alloc() - function to allocate a TSO segment * dp_tx_tso_desc_alloc() - function to allocate a TSO segment
* @soc: device soc instance * @soc: device soc instance
@@ -294,7 +297,6 @@ static inline struct qdf_tso_seg_elem_t *dp_tx_tso_desc_alloc(
* *
* Return: none * Return: none
*/ */
static inline void dp_tx_tso_desc_free(struct dp_soc *soc, static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg) uint8_t pool_id, struct qdf_tso_seg_elem_t *tso_seg)
{ {
@@ -304,6 +306,36 @@ static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
soc->tx_tso_desc[pool_id].num_free++; soc->tx_tso_desc[pool_id].num_free++;
TX_DESC_LOCK_UNLOCK(&soc->tx_tso_desc[pool_id].lock); TX_DESC_LOCK_UNLOCK(&soc->tx_tso_desc[pool_id].lock);
} }
static inline
struct qdf_tso_num_seg_elem_t *dp_tso_num_seg_alloc(struct dp_soc *soc,
uint8_t pool_id)
{
struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
TX_DESC_LOCK_LOCK(&soc->tx_tso_num_seg[pool_id].lock);
if (soc->tx_tso_num_seg[pool_id].freelist) {
soc->tx_tso_num_seg[pool_id].num_free--;
tso_num_seg = soc->tx_tso_num_seg[pool_id].freelist;
soc->tx_tso_num_seg[pool_id].freelist =
soc->tx_tso_num_seg[pool_id].freelist->next;
}
TX_DESC_LOCK_UNLOCK(&soc->tx_tso_num_seg[pool_id].lock);
return tso_num_seg;
}
static inline
void dp_tso_num_seg_free(struct dp_soc *soc,
uint8_t pool_id, struct qdf_tso_num_seg_elem_t *tso_num_seg)
{
TX_DESC_LOCK_LOCK(&soc->tx_tso_num_seg[pool_id].lock);
tso_num_seg->next = soc->tx_tso_num_seg[pool_id].freelist;
soc->tx_tso_num_seg[pool_id].freelist = tso_num_seg;
soc->tx_tso_num_seg[pool_id].num_free++;
TX_DESC_LOCK_UNLOCK(&soc->tx_tso_num_seg[pool_id].lock);
}
#endif
/* /*
* dp_tx_me_alloc_buf() Alloc descriptor from me pool * dp_tx_me_alloc_buf() Alloc descriptor from me pool
* @pdev DP_PDEV handle for datapath * @pdev DP_PDEV handle for datapath

View File

@@ -202,7 +202,7 @@ struct dp_tx_ext_desc_pool_s {
* @id: Descriptor ID * @id: Descriptor ID
* @vdev: vdev over which the packet was transmitted * @vdev: vdev over which the packet was transmitted
* @pdev: Handle to pdev * @pdev: Handle to pdev
* @pool_id: Pool ID - used when releasing the descripto * @pool_id: Pool ID - used when releasing the descriptor
* @flags: Flags to track the state of descriptor and special frame handling * @flags: Flags to track the state of descriptor and special frame handling
* @comp: Pool ID - used when releasing the descriptor * @comp: Pool ID - used when releasing the descriptor
* @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet). * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet).
@@ -228,6 +228,8 @@ struct dp_tx_desc_s {
uint8_t frm_type; uint8_t frm_type;
uint8_t pkt_offset; uint8_t pkt_offset;
void *me_buffer; void *me_buffer;
void *tso_desc;
void *tso_num_desc;
}; };
/** /**
@@ -244,6 +246,22 @@ struct dp_tx_tso_seg_pool_s {
qdf_spinlock_t lock; qdf_spinlock_t lock;
}; };
/**
* struct dp_tx_tso_num_seg_pool_s {
* @num_seg_pool_size: total number of pool elements
* @num_free: free element count
* @freelist: first free element pointer
* @lock: lock for accessing the pool
*/
struct dp_tx_tso_num_seg_pool_s {
uint16_t num_seg_pool_size;
uint16_t num_free;
struct qdf_tso_num_seg_elem_t *freelist;
/*tso mutex */
qdf_spinlock_t lock;
};
/** /**
* struct dp_tx_desc_pool_s - Tx Descriptor pool information * struct dp_tx_desc_pool_s - Tx Descriptor pool information
* @elem_size: Size of each descriptor in the pool * @elem_size: Size of each descriptor in the pool
@@ -416,6 +434,9 @@ struct dp_soc {
/* Tx TSO descriptor pool */ /* Tx TSO descriptor pool */
struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS]; struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS];
/* Tx TSO Num of segments pool */
struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS];
/* Tx H/W queues lock */ /* Tx H/W queues lock */
qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES]; qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];

View File

@@ -103,9 +103,11 @@
/* Enable flag to print TSO specific prints in datapath */ /* Enable flag to print TSO specific prints in datapath */
#ifdef TSO_DEBUG_LOG_ENABLE #ifdef TSO_DEBUG_LOG_ENABLE
#define TSO_DEBUG(args ...) printk(args) #define TSO_DEBUG(fmt, args ...) \
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_NONE, \
fmt, ## args)
#else #else
#define TSO_DEBUG(args ...) #define TSO_DEBUG(fmt, args ...)
#endif #endif
/** /**

View File

@@ -467,6 +467,14 @@ void __printf(3, 4) qdf_snprintf(char *str_buffer, unsigned int size,
#define QDF_SNPRINTF qdf_snprintf #define QDF_SNPRINTF qdf_snprintf
#else
#define DPTRACE(x)
#define qdf_trace_hex_dump(x, y, z, q)
#endif /* CONFIG_MCL */
#ifdef TSOSEG_DEBUG #ifdef TSOSEG_DEBUG
static inline static inline
int qdf_tso_seg_dbg_record(struct qdf_tso_seg_elem_t *tsoseg, int qdf_tso_seg_dbg_record(struct qdf_tso_seg_elem_t *tsoseg,
@@ -522,12 +530,6 @@ qdf_tso_seg_dbg_zero(struct qdf_tso_seg_elem_t *tsoseg)
}; };
#endif /* TSOSEG_DEBUG */ #endif /* TSOSEG_DEBUG */
#else
#define DPTRACE(x)
#define qdf_trace_hex_dump(x, y, z, q)
#endif /* CONFIG_MCL */
#define ERROR_CODE -1 #define ERROR_CODE -1
#define QDF_MAX_NAME_SIZE 32 #define QDF_MAX_NAME_SIZE 32

View File

@@ -1740,7 +1740,6 @@ static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
tso_info->ethproto); tso_info->ethproto);
return 1; return 1;
} }
tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb)); tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
tso_info->tcphdr = tcp_hdr(skb); tso_info->tcphdr = tcp_hdr(skb);
tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq); tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
@@ -1886,6 +1885,7 @@ uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
qdf_print("TSO: error getting common segment info\n"); qdf_print("TSO: error getting common segment info\n");
return 0; return 0;
} }
total_num_seg = tso_info->tso_num_seg_list; total_num_seg = tso_info->tso_num_seg_list;
curr_seg = tso_info->tso_seg_list; curr_seg = tso_info->tso_seg_list;
@@ -1921,6 +1921,7 @@ uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
int i = 1; /* tso fragment index */ int i = 1; /* tso fragment index */
uint8_t more_tso_frags = 1; uint8_t more_tso_frags = 1;
curr_seg->seg.num_frags = 0;
tso_info->num_segs++; tso_info->num_segs++;
total_num_seg->num_seg.tso_cmn_num_seg++; total_num_seg->num_seg.tso_cmn_num_seg++;
@@ -1943,6 +1944,7 @@ uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
skb_proc = skb_proc - tso_frag_len; skb_proc = skb_proc - tso_frag_len;
/* increment the TCP sequence number */ /* increment the TCP sequence number */
tso_cmn_info.tcp_seq_num += tso_frag_len; tso_cmn_info.tcp_seq_num += tso_frag_len;
curr_seg->seg.tso_frags[i].paddr = tso_frag_paddr; curr_seg->seg.tso_frags[i].paddr = tso_frag_paddr;
TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %p\n", TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %p\n",
@@ -1997,6 +1999,12 @@ uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
__func__, __LINE__, skb_frag_len, tso_frag_len, __func__, __LINE__, skb_frag_len, tso_frag_len,
tso_seg_size); tso_seg_size);
if (!(tso_frag_vaddr)) {
TSO_DEBUG("%s: Fragment virtual addr is NULL",
__func__);
return 0;
}
tso_frag_paddr = tso_frag_paddr =
dma_map_single(osdev->dev, dma_map_single(osdev->dev,
tso_frag_vaddr, tso_frag_vaddr,
@@ -2009,6 +2017,8 @@ uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
return 0; return 0;
} }
} }
TSO_DEBUG("%s tcp_seq_num: %u", __func__,
curr_seg->seg.tso_flags.tcp_seq_num);
num_seg--; num_seg--;
/* if TCP FIN flag was set, set it in the last segment */ /* if TCP FIN flag was set, set it in the last segment */
if (!num_seg) if (!num_seg)