qcacmn: Add TX completion logic for WCN6450

In the case of WCN6450, WBM HW block is removed in the UMAC.
TX completions come via HTT messages. Add logic to handle
HTT TX completion messages from the firmware.

Changes are specific to WCN6450 and hence implement the logic
in the arch specific code.

Change-Id: I447020354ce26e8948e4b49648c434fb2ed302cd
CRs-Fixed: 3381814
This commit is contained in:
Manikanta Pubbisetty
2022-10-10 15:58:20 +05:30
committed by Madan Koyyalamudi
parent 21ec2f5d16
commit b5f74912c1
6 changed files with 175 additions and 1 deletions

View File

@@ -1865,6 +1865,14 @@ static uint8_t dp_htt_tx_comp_get_status(struct dp_soc *soc, char *htt_desc)
tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]); tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
break; break;
case CDP_ARCH_TYPE_RH:
{
uint32_t *msg_word = (uint32_t *)htt_desc;
tx_status = HTT_TX_MSDU_INFO_RELEASE_REASON_GET(
*(msg_word + 3));
}
break;
default: default:
dp_err("Incorrect CDP_ARCH %d", soc->arch_id); dp_err("Incorrect CDP_ARCH %d", soc->arch_id);
QDF_BUG(0); QDF_BUG(0);

View File

@@ -624,6 +624,10 @@ uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
uint32_t quota); uint32_t quota);
#endif #endif
void
dp_tx_comp_process_desc_list(struct dp_soc *soc,
struct dp_tx_desc_s *comp_head, uint8_t ring_id);
QDF_STATUS QDF_STATUS
dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf); dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);

View File

@@ -285,7 +285,21 @@ dp_htt_t2h_msg_handler_fast(void *context, qdf_nbuf_t *cmpl_msdus,
msdu_cnt); msdu_cnt);
break; break;
} }
/* TODO add support for TX completion handling */ case HTT_T2H_MSG_TYPE_SOFT_UMAC_TX_COMPL_IND:
{
uint32_t num_msdus;
num_msdus = HTT_SOFT_UMAC_TX_COMP_IND_MSDU_COUNT_GET(*msg_word);
if ((num_msdus * HTT_TX_MSDU_INFO_SIZE +
HTT_SOFT_UMAC_TX_COMPL_IND_SIZE) > msg_len) {
dp_htt_err("Invalid msdu count in tx compl indication %d", num_msdus);
break;
}
dp_tx_compl_handler_rh(soc->dp_soc, htt_t2h_msg);
break;
}
case HTT_T2H_MSG_TYPE_RX_PN_IND: case HTT_T2H_MSG_TYPE_RX_PN_IND:
{ {
/* TODO check and add PN IND handling */ /* TODO check and add PN IND handling */

View File

@@ -26,6 +26,12 @@
#include "qdf_mem.h" #include "qdf_mem.h"
#include "cdp_txrx_cmn_struct.h" #include "cdp_txrx_cmn_struct.h"
/* sizeof(struct htt_t2h_soft_umac_tx_compl_ind) */
#define HTT_SOFT_UMAC_TX_COMPL_IND_SIZE (1 * 4) //in bytes
/* sizeof(struct htt_t2h_tx_msdu_info) */
#define HTT_TX_MSDU_INFO_SIZE (8 * 4) //in bytes
/* /*
* dp_htt_h2t_rx_ring_rfs_cfg() - RFS config for RX DATA indication * dp_htt_h2t_rx_ring_rfs_cfg() - RFS config for RX DATA indication
* @htt_soc: Opaque htt SOC handle * @htt_soc: Opaque htt SOC handle

View File

@@ -28,6 +28,7 @@
#include "dp_rh.h" #include "dp_rh.h"
#include <ce_api.h> #include <ce_api.h>
#include <ce_internal.h> #include <ce_internal.h>
#include "dp_rh_htt.h"
extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE]; extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
@@ -80,6 +81,40 @@ void dp_tx_comp_get_params_from_hal_desc_rh(struct dp_soc *soc,
{ {
} }
/**
* dp_tx_comp_find_tx_desc_rh() - Find software TX descriptor using sw_cookie
*
* @soc: Handle to DP SoC structure
* @sw_cookie: Key to find the TX descriptor
*
* Return: TX descriptor handle or NULL (if not found)
*/
static struct dp_tx_desc_s *
dp_tx_comp_find_tx_desc_rh(struct dp_soc *soc, uint32_t sw_cookie)
{
uint8_t pool_id;
struct dp_tx_desc_s *tx_desc;
pool_id = (sw_cookie & DP_TX_DESC_ID_POOL_MASK) >>
DP_TX_DESC_ID_POOL_OS;
/* Find Tx descriptor */
tx_desc = dp_tx_desc_find(soc, pool_id,
(sw_cookie & DP_TX_DESC_ID_PAGE_MASK) >>
DP_TX_DESC_ID_PAGE_OS,
(sw_cookie & DP_TX_DESC_ID_OFFSET_MASK) >>
DP_TX_DESC_ID_OFFSET_OS);
/* pool id is not matching. Error */
if (tx_desc && tx_desc->pool_id != pool_id) {
dp_tx_comp_alert("Tx Comp pool id %d not matched %d",
pool_id, tx_desc->pool_id);
qdf_assert_always(0);
}
return tx_desc;
}
void dp_tx_process_htt_completion_rh(struct dp_soc *soc, void dp_tx_process_htt_completion_rh(struct dp_soc *soc,
struct dp_tx_desc_s *tx_desc, struct dp_tx_desc_s *tx_desc,
uint8_t *status, uint8_t *status,
@@ -524,3 +559,101 @@ void dp_tx_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id)
dp_tx_ext_desc_pool_free_by_id(soc, pool_id); dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
dp_tx_tcl_desc_pool_free_rh(soc, pool_id); dp_tx_tcl_desc_pool_free_rh(soc, pool_id);
} }
void dp_tx_compl_handler_rh(struct dp_soc *soc, qdf_nbuf_t htt_msg)
{
struct dp_tx_desc_s *tx_desc = NULL;
struct dp_tx_desc_s *head_desc = NULL;
struct dp_tx_desc_s *tail_desc = NULL;
uint32_t sw_cookie;
uint32_t num_msdus;
uint32_t *msg_word;
uint8_t ring_id;
uint8_t tx_status;
int i;
DP_HIST_INIT();
msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
num_msdus = HTT_SOFT_UMAC_TX_COMP_IND_MSDU_COUNT_GET(*msg_word);
msg_word += HTT_SOFT_UMAC_TX_COMPL_IND_SIZE >> 2;
for (i = 0; i < num_msdus; i++) {
sw_cookie = HTT_TX_BUFFER_ADDR_INFO_SW_BUFFER_COOKIE_GET(*(msg_word + 1));
tx_desc = dp_tx_comp_find_tx_desc_rh(soc, sw_cookie);
if (!tx_desc) {
dp_err("failed to find tx desc");
qdf_assert_always(0);
}
/*
* If the descriptor is already freed in vdev_detach,
* continue to next descriptor
*/
if (qdf_unlikely((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
!tx_desc->flags)) {
dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
tx_desc->id);
DP_STATS_INC(soc, tx.tx_comp_exception, 1);
dp_tx_desc_check_corruption(tx_desc);
goto next_msdu;
}
if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
dp_tx_comp_info_rl("pdev in down state %d",
tx_desc->id);
tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
dp_tx_comp_free_buf(soc, tx_desc, false);
dp_tx_desc_release(tx_desc, tx_desc->pool_id);
goto next_msdu;
}
if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
tx_desc->flags, tx_desc->id);
qdf_assert_always(0);
}
if (HTT_TX_BUFFER_ADDR_INFO_RELEASE_SOURCE_GET(*(msg_word + 1)) ==
HTT_TX_MSDU_RELEASE_SOURCE_FW)
tx_desc->buffer_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
else
tx_desc->buffer_src = HAL_TX_COMP_RELEASE_SOURCE_TQM;
tx_desc->peer_id = HTT_TX_MSDU_INFO_SW_PEER_ID_GET(*(msg_word + 2));
tx_status = HTT_TX_MSDU_INFO_RELEASE_REASON_GET(*(msg_word + 3));
tx_desc->tx_status =
(tx_status == HTT_TX_MSDU_RELEASE_REASON_FRAME_ACKED ?
HAL_TX_TQM_RR_FRAME_ACKED : HAL_TX_TQM_RR_REM_CMD_REM);
qdf_mem_copy(&tx_desc->comp, msg_word, HTT_TX_MSDU_INFO_SIZE);
DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
/* First ring descriptor on the cycle */
if (!head_desc) {
head_desc = tx_desc;
tail_desc = tx_desc;
}
tail_desc->next = tx_desc;
tx_desc->next = NULL;
tail_desc = tx_desc;
next_msdu:
msg_word += HTT_TX_MSDU_INFO_SIZE >> 2;
}
/* For now, pass ring_id as 0 (zero) as WCN6450 only
* supports one TX ring.
*/
ring_id = 0;
if (head_desc)
dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
DP_STATS_INC(soc, tx.tx_comp[ring_id], num_msdus);
DP_TX_HIST_STATS_PER_PDEV();
}

View File

@@ -155,4 +155,13 @@ QDF_STATUS dp_tx_desc_pool_alloc_rh(struct dp_soc *soc, uint32_t num_elem,
* Return: none * Return: none
*/ */
void dp_tx_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id); void dp_tx_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id);
/**
* dp_tx_compl_handler_rh() - TX completion handler for Rhine
* @soc: Handle to DP Soc structure
* @htt_msg: TX completion HTT message
*
* Return: none
*/
void dp_tx_compl_handler_rh(struct dp_soc *soc, qdf_nbuf_t htt_msg);
#endif #endif