qcacmn: Add a change to do fast transmit completion

Lot of checks in Tx completion path are for special handlings
such as when ol stats are enabled some protective debug checks etc..
Add a logic to fast free of buffer at transmit completion.
If extended stats not enabled(Typically needed for enterprise cases)
do the buffer free faster. This is controlled through a flag.

Change-Id: I04873b5e3643d8e93e5b248fcaf23504dcb7624f
此提交包含在:
Radha Krishna Simha Jiguru
2020-04-04 11:05:48 +05:30
提交者 nshrivas
父節點 2bfeac4376
當前提交 41c0716617
共有 5 個檔案被更改,包括 195 行新增64 行删除

查看文件

@@ -262,6 +262,8 @@ dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
soc = pdev->soc;
dp_tx_outstanding_dec(pdev);
if (tx_desc->frm_type == dp_tx_frm_tso)
dp_tx_tso_desc_release(soc, tx_desc);
@@ -271,8 +273,6 @@ dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
dp_tx_outstanding_dec(pdev);
if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
qdf_atomic_dec(&pdev->num_tx_exception);
@@ -1114,10 +1114,8 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
*tx_exc_metadata)
{
uint8_t type;
uint16_t length;
void *hal_tx_desc;
uint32_t *hal_tx_desc_cached;
qdf_dma_addr_t dma_addr;
/*
* Setting it initialization statically here to avoid
@@ -1144,19 +1142,20 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
hal_tx_desc_cached = (void *) cached_desc;
if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
length = HAL_TX_EXT_DESC_WITH_META_DATA;
tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
type = HAL_TX_BUF_TYPE_EXT_DESC;
dma_addr = tx_desc->msdu_ext_desc->paddr;
tx_desc->dma_addr = tx_desc->msdu_ext_desc->paddr;
} else {
length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
tx_desc->length = qdf_nbuf_len(tx_desc->nbuf) -
tx_desc->pkt_offset;
type = HAL_TX_BUF_TYPE_BUFFER;
dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
tx_desc->dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
}
qdf_assert_always(dma_addr);
qdf_assert_always(tx_desc->dma_addr);
hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
dma_addr, bm_id, tx_desc->id,
tx_desc->dma_addr, bm_id, tx_desc->id,
type);
hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
vdev->lmac_id);
@@ -1173,7 +1172,7 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
(vdev->bss_ast_hash & 0xF));
hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
@@ -1200,7 +1199,7 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_get());
dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
length, type, (uint64_t)dma_addr,
tx_desc->length, type, (uint64_t)tx_desc->dma_addr,
tx_desc->pkt_offset, tx_desc->id);
hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
@@ -1227,7 +1226,7 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
status = QDF_STATUS_SUCCESS;
ring_access_fail:
@@ -1703,6 +1702,9 @@ dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
if (msdu_info->exception_fw)
HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
!pdev->enhanced_stats_en);
if (qdf_unlikely(QDF_STATUS_SUCCESS !=
dp_tx_msdu_single_map(vdev, tx_desc, nbuf))) {
/* Handle failure */
@@ -3458,13 +3460,49 @@ dp_tx_comp_process_desc_list(struct dp_soc *soc,
{
struct dp_tx_desc_s *desc;
struct dp_tx_desc_s *next;
struct hal_tx_completion_status ts = {0};
struct hal_tx_completion_status ts;
struct dp_peer *peer;
qdf_nbuf_t netbuf;
desc = comp_head;
while (desc) {
if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
struct dp_pdev *pdev = desc->pdev;
peer = dp_peer_find_by_id(soc, desc->peer_id);
if (qdf_likely(peer)) {
/*
* Increment peer statistics
* Minimal statistics update done here
*/
DP_STATS_INC_PKT(peer, tx.comp_pkt, 1,
desc->length);
if (desc->tx_status !=
HAL_TX_TQM_RR_FRAME_ACKED)
peer->stats.tx.tx_failed++;
dp_peer_unref_del_find_by_id(peer);
}
qdf_assert(pdev);
dp_tx_outstanding_dec(pdev);
/*
* Calling a QDF WRAPPER here is creating signifcant
* performance impact so avoided the wrapper call here
*/
next = desc->next;
qdf_mem_unmap_nbytes_single(soc->osdev,
desc->dma_addr,
QDF_DMA_TO_DEVICE,
desc->length);
qdf_nbuf_free(desc->nbuf);
dp_tx_desc_free(soc, desc, desc->pool_id);
desc = next;
continue;
}
hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
peer = dp_peer_find_by_id(soc, ts.peer_id);
dp_tx_comp_process_tx_status(desc, &ts, peer, ring_id);
@@ -3511,12 +3549,35 @@ void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status,
struct cdp_tid_tx_stats *tid_stats = NULL;
struct htt_soc *htt_handle;
qdf_assert(tx_desc->pdev);
/*
* If the descriptor is already freed in vdev_detach,
* continue to next descriptor
*/
if (!tx_desc->vdev && !tx_desc->flags) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_INFO,
"Descriptor freed in vdev_detach %d",
tx_desc->id);
return;
}
pdev = tx_desc->pdev;
vdev = tx_desc->vdev;
soc = pdev->soc;
if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_INFO,
"pdev in down state %d",
tx_desc->id);
dp_tx_comp_free_buf(soc, tx_desc);
dp_tx_desc_release(tx_desc, tx_desc->pool_id);
return;
}
qdf_assert(tx_desc->pdev);
vdev = tx_desc->vdev;
if (!vdev)
return;
tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
@@ -3648,8 +3709,9 @@ uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
more_data:
/* Re-initialize local variables to be re-used */
head_desc = NULL;
tail_desc = NULL;
head_desc = NULL;
tail_desc = NULL;
if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
return 0;
@@ -3664,6 +3726,7 @@ more_data:
/* Find head descriptor from completion ring */
while (qdf_likely(num_avail_for_reap)) {
tx_comp_hal_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
if (qdf_unlikely(!tx_comp_hal_desc))
break;
@@ -3672,8 +3735,10 @@ more_data:
/* If this buffer was not released by TQM or FW, then it is not
* Tx completion indication, assert */
if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
(buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
if (qdf_unlikely(buffer_src !=
HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
(qdf_unlikely(buffer_src !=
HAL_TX_COMP_RELEASE_SOURCE_FW))) {
uint8_t wbm_internal_error;
dp_err_rl(
@@ -3725,35 +3790,6 @@ more_data:
(tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
DP_TX_DESC_ID_OFFSET_OS);
/*
* If the descriptor is already freed in vdev_detach,
* continue to next descriptor
*/
if (!tx_desc->vdev && !tx_desc->flags) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_INFO,
"Descriptor freed in vdev_detach %d",
tx_desc_id);
num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
count++;
continue;
}
if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_INFO,
"pdev in down state %d",
tx_desc_id);
num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
count++;
dp_tx_comp_free_buf(soc, tx_desc);
dp_tx_desc_release(tx_desc, tx_desc->pool_id);
continue;
}
/*
* If the release source is FW, process the HTT status
*/
@@ -3765,6 +3801,43 @@ more_data:
dp_tx_process_htt_completion(tx_desc,
htt_tx_status, ring_id);
} else {
/*
* If the fast completion mode is enabled extended
* metadata from descriptor is not copied
*/
if (qdf_likely(tx_desc->flags &
DP_TX_DESC_FLAG_SIMPLE)) {
tx_desc->peer_id =
hal_tx_comp_get_peer_id(tx_comp_hal_desc);
tx_desc->tx_status =
hal_tx_comp_get_tx_status(tx_comp_hal_desc);
goto add_to_pool;
}
/*
* If the descriptor is already freed in vdev_detach,
* continue to next descriptor
*/
if (qdf_unlikely(!tx_desc->vdev) &&
qdf_unlikely(!tx_desc->flags)) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_INFO,
"Descriptor freed in vdev_detach %d",
tx_desc_id);
continue;
}
if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_INFO,
"pdev in down state %d",
tx_desc_id);
dp_tx_comp_free_buf(soc, tx_desc);
dp_tx_desc_release(tx_desc, tx_desc->pool_id);
goto next_desc;
}
/* Pool id is not matching. Error */
if (tx_desc->pool_id != pool_id) {
QDF_TRACE(QDF_MODULE_ID_DP,
@@ -3778,12 +3851,18 @@ more_data:
if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_FATAL,
"Txdesc invalid, flgs = %x,id = %d",
tx_desc->flags, tx_desc_id);
QDF_TRACE_LEVEL_FATAL,
"Txdesc invalid, flgs = %x,id = %d",
tx_desc->flags, tx_desc_id);
qdf_assert_always(0);
}
/* Collect hw completion contents */
hal_tx_comp_desc_sync(tx_comp_hal_desc,
&tx_desc->comp, 1);
add_to_pool:
DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
/* First ring descriptor on the cycle */
if (!head_desc) {
head_desc = tx_desc;
@@ -3793,15 +3872,8 @@ more_data:
tail_desc->next = tx_desc;
tx_desc->next = NULL;
tail_desc = tx_desc;
DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
/* Collect hw completion contents */
hal_tx_comp_desc_sync(tx_comp_hal_desc,
&tx_desc->comp, 1);
}
next_desc:
num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
/*