qcacmn: Optimize DP Rx Error Handling (Part-2)

In WBM2SW Rx Error path for BE
specific functionality
1) HAL API's/Function pointers are replaced
with specific function calls.
2) Efficient read/write of WBM Error Info
from HAL Rx desc.
3) Minimize reading data from Nbuf TLV.
4) Peer_id fix for MLO clients with security

Change-Id: I1c9e6e767bbf6565567d998ae8e1357398de5803
CRs-Fixed: 3486304
This commit is contained in:
Kenvish Butani
2023-05-03 12:12:19 +05:30
gecommit door Rahul Choudhary
bovenliggende bfebabf7bd
commit 0df4b5eaea
12 gewijzigde bestanden met toevoegingen van 372 en 242 verwijderingen

Bestand weergeven

@@ -2869,8 +2869,6 @@ void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops)
arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_be; arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_be;
arch_ops->dp_rx_peer_metadata_peer_id_get = arch_ops->dp_rx_peer_metadata_peer_id_get =
dp_rx_peer_metadata_peer_id_get_be; dp_rx_peer_metadata_peer_id_get_be;
arch_ops->dp_rx_peer_mdata_link_id_get =
dp_rx_peer_mdata_link_id_get_be;
arch_ops->soc_cfg_attach = dp_soc_cfg_attach_be; arch_ops->soc_cfg_attach = dp_soc_cfg_attach_be;
arch_ops->tx_implicit_rbm_set = dp_tx_implicit_rbm_set_be; arch_ops->tx_implicit_rbm_set = dp_tx_implicit_rbm_set_be;
arch_ops->txrx_set_vdev_param = dp_txrx_set_vdev_param_be; arch_ops->txrx_set_vdev_param = dp_txrx_set_vdev_param_be;

Bestand weergeven

@@ -1790,21 +1790,18 @@ dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
union dp_rx_desc_list_elem_t union dp_rx_desc_list_elem_t
*tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } }; *tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } };
uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { 0 } }; uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { 0 } };
uint8_t buf_type;
uint8_t mac_id; uint8_t mac_id;
struct dp_srng *dp_rxdma_srng; struct dp_srng *dp_rxdma_srng;
struct rx_desc_pool *rx_desc_pool; struct rx_desc_pool *rx_desc_pool;
qdf_nbuf_t nbuf_head = NULL; qdf_nbuf_t nbuf_head = NULL;
qdf_nbuf_t nbuf_tail = NULL; qdf_nbuf_t nbuf_tail = NULL;
qdf_nbuf_t nbuf; qdf_nbuf_t nbuf;
struct hal_wbm_err_desc_info wbm_err_info = { 0 };
uint8_t msdu_continuation = 0; uint8_t msdu_continuation = 0;
bool process_sg_buf = false; bool process_sg_buf = false;
uint32_t wbm_err_src;
QDF_STATUS status; QDF_STATUS status;
struct dp_soc *replenish_soc; struct dp_soc *replenish_soc;
uint8_t chip_id; uint8_t chip_id;
struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 }; union hal_wbm_err_info_u wbm_err = { 0 };
qdf_assert(soc && hal_ring_hdl); qdf_assert(soc && hal_ring_hdl);
hal_soc = soc->hal_soc; hal_soc = soc->hal_soc;
@@ -1823,32 +1820,22 @@ dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
while (qdf_likely(quota)) { while (qdf_likely(quota)) {
ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl); ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
if (qdf_unlikely(!ring_desc)) if (qdf_unlikely(!ring_desc))
break; break;
/* XXX */ /* Get SW Desc from HAL desc */
buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc); if (dp_wbm_get_rx_desc_from_hal_desc_be(soc,
ring_desc,
/* &rx_desc)) {
* For WBM ring, expect only MSDU buffers dp_rx_err_err("get rx sw desc from hal_desc failed");
*/
qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
wbm_err_src = hal_rx_wbm_err_src_get(hal_soc, ring_desc);
qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) ||
(wbm_err_src == HAL_RX_WBM_ERR_SRC_REO));
if (soc->arch_ops.dp_wbm_get_rx_desc_from_hal_desc(soc,
ring_desc,
&rx_desc)) {
dp_rx_err_err("get rx desc from hal_desc failed");
continue; continue;
} }
qdf_assert_always(rx_desc); qdf_assert_always(rx_desc);
if (!dp_rx_desc_check_magic(rx_desc)) { if (!dp_rx_desc_check_magic(rx_desc)) {
dp_rx_err_err("%pk: Invalid rx_desc %pk", dp_rx_err_err("%pK: Invalid rx_desc %pK",
soc, rx_desc); soc, rx_desc);
continue; continue;
} }
@@ -1867,15 +1854,12 @@ dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
continue; continue;
} }
hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
nbuf = rx_desc->nbuf;
status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl, status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl,
ring_desc, rx_desc); ring_desc, rx_desc);
if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
dp_info_rl("Rx error Nbuf %pk sanity check failure!", dp_info_rl("Rx error Nbuf %pK sanity check failure!",
nbuf); rx_desc->nbuf);
rx_desc->in_err_state = 1; rx_desc->in_err_state = 1;
rx_desc->unmapped = 1; rx_desc->unmapped = 1;
rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++; rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
@@ -1887,12 +1871,30 @@ dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
continue; continue;
} }
/* Get MPDU DESC info */ nbuf = rx_desc->nbuf;
hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, &mpdu_desc_info);
if (qdf_likely(mpdu_desc_info.mpdu_flags & /*
HAL_MPDU_F_QOS_CONTROL_VALID)) * Read wbm err info , MSDU info , MPDU info , peer meta data,
qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid); * from desc. Save all the info in nbuf CB/TLV.
* We will need this info when we do the actual nbuf processing
*/
wbm_err.info = dp_rx_wbm_err_copy_desc_info_in_nbuf(
soc,
ring_desc,
nbuf,
rx_desc->pool_id);
/*
* For WBM ring, expect only MSDU buffers
*/
qdf_assert_always(wbm_err.info_bit.buffer_or_desc_type ==
HAL_RX_WBM_BUF_TYPE_REL_BUF);
/*
* Errors are handled only if the source is RXDMA or REO
*/
qdf_assert((wbm_err.info_bit.wbm_err_src ==
HAL_RX_WBM_ERR_SRC_RXDMA) ||
(wbm_err.info_bit.wbm_err_src ==
HAL_RX_WBM_ERR_SRC_REO));
rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
dp_ipa_rx_buf_smmu_mapping_lock(soc); dp_ipa_rx_buf_smmu_mapping_lock(soc);
@@ -1902,11 +1904,12 @@ dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
if (qdf_unlikely( if (qdf_unlikely(
soc->wbm_release_desc_rx_sg_support && soc->wbm_release_desc_rx_sg_support &&
dp_rx_is_sg_formation_required(&wbm_err_info))) { dp_rx_is_sg_formation_required(&wbm_err.info_bit))) {
/* SG is detected from continuation bit */ /* SG is detected from continuation bit */
msdu_continuation = msdu_continuation =
hal_rx_wbm_err_msdu_continuation_get(hal_soc, dp_rx_wbm_err_msdu_continuation_get(soc,
ring_desc); ring_desc,
nbuf);
if (msdu_continuation && if (msdu_continuation &&
!(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) { !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) {
/* Update length from first buffer in SG */ /* Update length from first buffer in SG */
@@ -1933,14 +1936,6 @@ dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
} }
} }
/*
* save the wbm desc info in nbuf TLV. We will need this
* info when we do the actual nbuf processing
*/
wbm_err_info.pool_id = rx_desc->pool_id;
dp_rx_set_err_info(soc, nbuf, wbm_err_info);
rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++; rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) { if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) {

Bestand weergeven

@@ -659,13 +659,6 @@ dp_rx_set_msdu_hw_link_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
QDF_NBUF_CB_RX_LOGICAL_LINK_ID(nbuf) = logical_link_id; QDF_NBUF_CB_RX_LOGICAL_LINK_ID(nbuf) = logical_link_id;
} }
static inline uint8_t
dp_rx_get_stats_arr_idx_from_link_id(qdf_nbuf_t nbuf,
struct dp_txrx_peer *txrx_peer)
{
return QDF_NBUF_CB_RX_LOGICAL_LINK_ID(nbuf);
}
static inline uint16_t static inline uint16_t
dp_rx_get_peer_id_be(qdf_nbuf_t nbuf) dp_rx_get_peer_id_be(qdf_nbuf_t nbuf)
{ {
@@ -768,42 +761,21 @@ static inline uint8_t hal_rx_get_l3_pad_bytes_be(qdf_nbuf_t nbuf,
{ {
return HAL_RX_TLV_L3_HEADER_PADDING_GET(rx_tlv_hdr); return HAL_RX_TLV_L3_HEADER_PADDING_GET(rx_tlv_hdr);
} }
#else
static inline uint8_t static inline uint8_t
dp_rx_peer_mdata_link_id_get_be(uint32_t peer_metadata) dp_rx_wbm_err_msdu_continuation_get(struct dp_soc *soc,
hal_ring_desc_t ring_desc,
qdf_nbuf_t nbuf)
{ {
uint8_t link_id = 0; return hal_rx_wbm_err_msdu_continuation_get(soc->hal_soc,
ring_desc);
link_id = (HTT_RX_PEER_META_DATA_V1A_LOGICAL_LINK_ID_GET(peer_metadata)
+ 1);
if (link_id > DP_MAX_MLO_LINKS)
link_id = 0;
return link_id;
} }
#else
static inline void static inline void
dp_rx_set_msdu_hw_link_id(qdf_nbuf_t nbuf, uint32_t peer_mdata) dp_rx_set_msdu_hw_link_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
{ {
} }
static inline uint8_t
dp_rx_get_stats_arr_idx_from_link_id(qdf_nbuf_t nbuf,
struct dp_txrx_peer *txrx_peer)
{
uint8_t link_id = 0;
link_id = (QDF_NBUF_CB_RX_HW_LINK_ID(nbuf) + 1);
if (link_id > DP_MAX_MLO_LINKS) {
link_id = 0;
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.inval_link_id_pkt_cnt,
1, link_id);
}
return link_id;
}
static inline uint16_t static inline uint16_t
dp_rx_get_peer_id_be(qdf_nbuf_t nbuf) dp_rx_get_peer_id_be(qdf_nbuf_t nbuf)
{ {
@@ -852,5 +824,51 @@ static inline uint8_t hal_rx_get_l3_pad_bytes_be(qdf_nbuf_t nbuf,
{ {
return QDF_NBUF_CB_RX_L3_PAD_MSB(nbuf) ? 2 : 0; return QDF_NBUF_CB_RX_L3_PAD_MSB(nbuf) ? 2 : 0;
} }
#endif
static inline uint8_t
dp_rx_wbm_err_msdu_continuation_get(struct dp_soc *soc,
hal_ring_desc_t ring_desc,
qdf_nbuf_t nbuf)
{
return qdf_nbuf_is_rx_chfrag_cont(nbuf);
}
#endif /* CONFIG_NBUF_AP_PLATFORM */
/**
* dp_rx_wbm_err_copy_desc_info_in_nbuf(): API to copy WBM dest ring
* descriptor information in nbuf CB/TLV
*
* @soc: pointer to Soc structure
* @ring_desc: wbm dest ring descriptor
* @nbuf: nbuf to save descriptor information
* @pool_id: pool id part of wbm error info
*
* Return: wbm error information details
*/
static inline uint32_t
dp_rx_wbm_err_copy_desc_info_in_nbuf(struct dp_soc *soc,
hal_ring_desc_t ring_desc,
qdf_nbuf_t nbuf,
uint8_t pool_id)
{
uint32_t mpdu_desc_info = 0;
uint32_t msdu_desc_info = 0;
uint32_t peer_mdata = 0;
union hal_wbm_err_info_u wbm_err = { 0 };
/* get WBM mpdu & msdu desc info */
hal_rx_wbm_err_mpdu_msdu_info_get_be(ring_desc,
&wbm_err.info,
&mpdu_desc_info,
&msdu_desc_info,
&peer_mdata);
wbm_err.info_bit.pool_id = pool_id;
dp_rx_set_mpdu_msdu_desc_info_in_nbuf(nbuf,
mpdu_desc_info,
peer_mdata,
msdu_desc_info);
dp_rx_set_wbm_err_info_in_nbuf(soc, nbuf, wbm_err);
return wbm_err.info;
}
#endif #endif

Bestand weergeven

@@ -2419,12 +2419,6 @@ dp_rx_peer_metadata_peer_id_get(struct dp_soc *soc, uint32_t peer_metadata)
peer_metadata); peer_metadata);
} }
static inline uint8_t
dp_rx_peer_mdata_link_id_get(struct dp_soc *soc, uint32_t peer_metadata)
{
return soc->arch_ops.dp_rx_peer_mdata_link_id_get(peer_metadata);
}
/** /**
* dp_rx_desc_pool_init_generic() - Generic Rx descriptors initialization * dp_rx_desc_pool_init_generic() - Generic Rx descriptors initialization
* @soc: SOC handle * @soc: SOC handle
@@ -2501,20 +2495,6 @@ void dp_audio_smmu_unmap(qdf_device_t qdf_dev, qdf_dma_addr_t iova,
#endif #endif
#if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86) #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
static inline
void dp_rx_set_err_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
struct hal_wbm_err_desc_info wbm_err_info)
{
QDF_NBUF_CB_RX_ERR_CODES(nbuf) = *((uint32_t *)&wbm_err_info);
}
static inline
struct hal_wbm_err_desc_info dp_rx_get_err_info(struct dp_soc *soc,
qdf_nbuf_t nbuf)
{
return *(struct hal_wbm_err_desc_info *)&QDF_NBUF_CB_RX_ERR_CODES(nbuf);
}
static inline static inline
QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id, QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id,
struct dp_srng *rxdma_srng, struct dp_srng *rxdma_srng,
@@ -2669,29 +2649,6 @@ void dp_rx_nbuf_free(qdf_nbuf_t nbuf)
qdf_nbuf_free_simple(nbuf); qdf_nbuf_free_simple(nbuf);
} }
#else #else
static inline
void dp_rx_set_err_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
struct hal_wbm_err_desc_info wbm_err_info)
{
hal_rx_priv_info_set_in_tlv(soc->hal_soc,
qdf_nbuf_data(nbuf),
(uint8_t *)&wbm_err_info,
sizeof(wbm_err_info));
}
static inline
struct hal_wbm_err_desc_info dp_rx_get_err_info(struct dp_soc *soc,
qdf_nbuf_t nbuf)
{
struct hal_wbm_err_desc_info wbm_err_info = { 0 };
hal_rx_priv_info_get_from_tlv(soc->hal_soc, qdf_nbuf_data(nbuf),
(uint8_t *)&wbm_err_info,
sizeof(struct hal_wbm_err_desc_info));
return wbm_err_info;
}
static inline static inline
QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id, QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id,
struct dp_srng *rxdma_srng, struct dp_srng *rxdma_srng,
@@ -3088,6 +3045,19 @@ dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
qdf_nbuf_t tail, qdf_nbuf_t tail,
bool is_eapol); bool is_eapol);
/**
* dp_rx_set_wbm_err_info_in_nbuf() - function to set wbm err info in nbuf
* @soc: DP soc
* @nbuf: skb list head
* @wbm_err: wbm error info details
*
* Return: None
*/
void
dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
qdf_nbuf_t nbuf,
union hal_wbm_err_info_u wbm_err);
#ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */ #ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */
/** /**
* dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
@@ -3525,4 +3495,30 @@ dp_rx_get_rx_bm_id(struct dp_soc *soc)
} }
#endif /* WLAN_SOFTUMAC_SUPPORT */ #endif /* WLAN_SOFTUMAC_SUPPORT */
#ifndef CONFIG_NBUF_AP_PLATFORM
static inline uint8_t
dp_rx_get_stats_arr_idx_from_link_id(qdf_nbuf_t nbuf,
struct dp_txrx_peer *txrx_peer)
{
return 0;
}
#else
static inline uint8_t
dp_rx_get_stats_arr_idx_from_link_id(qdf_nbuf_t nbuf,
struct dp_txrx_peer *txrx_peer)
{
uint8_t link_id = 0;
link_id = (QDF_NBUF_CB_RX_HW_LINK_ID(nbuf) + 1);
if (link_id > DP_MAX_MLO_LINKS) {
link_id = 0;
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.inval_link_id_pkt_cnt,
1, link_id);
}
return link_id;
}
#endif /* CONFIG_NBUF_AP_PLATFORM */
#endif /* _DP_RX_H */ #endif /* _DP_RX_H */

Bestand weergeven

@@ -2357,6 +2357,69 @@ void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
} }
#endif #endif
#ifndef CONFIG_NBUF_AP_PLATFORM
static inline uint16_t
dp_rx_get_peer_id(struct dp_soc *soc,
uint8_t *rx_tlv_hdr,
qdf_nbuf_t nbuf)
{
uint32_t peer_mdata = 0;
peer_mdata = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
rx_tlv_hdr);
return dp_rx_peer_metadata_peer_id_get(soc, peer_mdata);
}
static inline void
dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc *soc,
qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr,
union hal_wbm_err_info_u *wbm_err)
{
hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_tlv_hdr,
(uint8_t *)&wbm_err->info,
sizeof(union hal_wbm_err_info_u));
}
void
dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
qdf_nbuf_t nbuf,
union hal_wbm_err_info_u wbm_err)
{
hal_rx_priv_info_set_in_tlv(soc->hal_soc,
qdf_nbuf_data(nbuf),
(uint8_t *)&wbm_err.info,
sizeof(union hal_wbm_err_info_u));
}
#else
static inline uint16_t
dp_rx_get_peer_id(struct dp_soc *soc,
uint8_t *rx_tlv_hdr,
qdf_nbuf_t nbuf)
{
uint32_t peer_mdata = QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(nbuf);
return dp_rx_peer_metadata_peer_id_get(soc, peer_mdata);
}
static inline void
dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc *soc,
qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr,
union hal_wbm_err_info_u *wbm_err)
{
wbm_err->info = QDF_NBUF_CB_RX_ERROR_CODE_INFO(nbuf);
}
void
dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
qdf_nbuf_t nbuf,
union hal_wbm_err_info_u wbm_err)
{
QDF_NBUF_CB_RX_ERROR_CODE_INFO(nbuf) = wbm_err.info;
}
#endif /* CONFIG_NBUF_AP_PLATFORM */
uint32_t uint32_t
dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl, uint32_t quota) hal_ring_handle_t hal_ring_hdl, uint32_t quota)
@@ -2368,7 +2431,7 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
bool is_tkip_mic_err; bool is_tkip_mic_err;
qdf_nbuf_t nbuf_head = NULL; qdf_nbuf_t nbuf_head = NULL;
qdf_nbuf_t nbuf, next; qdf_nbuf_t nbuf, next;
struct hal_wbm_err_desc_info wbm_err_info = { 0 }; union hal_wbm_err_info_u wbm_err = { 0 };
uint8_t pool_id; uint8_t pool_id;
uint8_t tid = 0; uint8_t tid = 0;
uint8_t link_id = 0; uint8_t link_id = 0;
@@ -2392,18 +2455,20 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
uint16_t peer_id; uint16_t peer_id;
uint8_t err_code; uint8_t err_code;
uint8_t *tlv_hdr; uint8_t *tlv_hdr;
uint32_t peer_meta_data;
dp_txrx_ref_handle txrx_ref_handle = NULL; dp_txrx_ref_handle txrx_ref_handle = NULL;
rx_tlv_hdr = qdf_nbuf_data(nbuf); rx_tlv_hdr = qdf_nbuf_data(nbuf);
/* /*
* retrieve the wbm desc info from nbuf TLV, so we can * retrieve the wbm desc info from nbuf CB/TLV, so we can
* handle error cases appropriately * handle error cases appropriately
*/ */
wbm_err_info = dp_rx_get_err_info(soc, nbuf); dp_rx_get_wbm_err_info_from_nbuf(soc, nbuf,
peer_meta_data = hal_rx_tlv_peer_meta_data_get(soc->hal_soc, rx_tlv_hdr,
rx_tlv_hdr); &wbm_err);
peer_id = dp_rx_peer_metadata_peer_id_get(soc, peer_meta_data);
peer_id = dp_rx_get_peer_id(soc,
rx_tlv_hdr,
nbuf);
txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id, txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
&txrx_ref_handle, &txrx_ref_handle,
DP_MOD_ID_RX_ERR); DP_MOD_ID_RX_ERR);
@@ -2412,11 +2477,11 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
dp_info_rl("peer is null peer_id %u err_src %u, " dp_info_rl("peer is null peer_id %u err_src %u, "
"REO: push_rsn %u err_code %u, " "REO: push_rsn %u err_code %u, "
"RXDMA: push_rsn %u err_code %u", "RXDMA: push_rsn %u err_code %u",
peer_id, wbm_err_info.wbm_err_src, peer_id, wbm_err.info_bit.wbm_err_src,
wbm_err_info.reo_psh_rsn, wbm_err.info_bit.reo_psh_rsn,
wbm_err_info.reo_err_code, wbm_err.info_bit.reo_err_code,
wbm_err_info.rxdma_psh_rsn, wbm_err.info_bit.rxdma_psh_rsn,
wbm_err_info.rxdma_err_code); wbm_err.info_bit.rxdma_err_code);
/* Set queue_mapping in nbuf to 0 */ /* Set queue_mapping in nbuf to 0 */
dp_set_rx_queue(nbuf, 0); dp_set_rx_queue(nbuf, 0);
@@ -2442,43 +2507,37 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
continue; continue;
} }
pool_id = wbm_err_info.pool_id; pool_id = wbm_err.info_bit.pool_id;
dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id); dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
if (dp_pdev && dp_pdev->link_peer_stats && if (dp_pdev && dp_pdev->link_peer_stats &&
txrx_peer && txrx_peer->is_mld_peer) { txrx_peer && txrx_peer->is_mld_peer) {
link_id = dp_rx_peer_mdata_link_id_get( link_id = dp_rx_get_stats_arr_idx_from_link_id(
soc, nbuf,
peer_meta_data); txrx_peer);
if (!link_id) {
DP_PEER_PER_PKT_STATS_INC(
txrx_peer,
rx.inval_link_id_pkt_cnt,
1, link_id);
}
} else { } else {
link_id = 0; link_id = 0;
} }
if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) { if (wbm_err.info_bit.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
if (wbm_err_info.reo_psh_rsn if (wbm_err.info_bit.reo_psh_rsn
== HAL_RX_WBM_REO_PSH_RSN_ERROR) { == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
DP_STATS_INC(soc, DP_STATS_INC(soc,
rx.err.reo_error rx.err.reo_error
[wbm_err_info.reo_err_code], 1); [wbm_err.info_bit.reo_err_code], 1);
/* increment @pdev level */ /* increment @pdev level */
if (dp_pdev) if (dp_pdev)
DP_STATS_INC(dp_pdev, err.reo_error, DP_STATS_INC(dp_pdev, err.reo_error,
1); 1);
switch (wbm_err_info.reo_err_code) { switch (wbm_err.info_bit.reo_err_code) {
/* /*
* Handling for packets which have NULL REO * Handling for packets which have NULL REO
* queue descriptor * queue descriptor
*/ */
case HAL_REO_ERR_QUEUE_DESC_ADDR_0: case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
pool_id = wbm_err_info.pool_id; pool_id = wbm_err.info_bit.pool_id;
soc->arch_ops.dp_rx_null_q_desc_handle( soc->arch_ops.dp_rx_null_q_desc_handle(
soc, nbuf, soc, nbuf,
rx_tlv_hdr, rx_tlv_hdr,
@@ -2496,7 +2555,7 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1, 1,
link_id); link_id);
pool_id = wbm_err_info.pool_id; pool_id = wbm_err.info_bit.pool_id;
if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc, if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
rx_tlv_hdr)) { rx_tlv_hdr)) {
@@ -2553,10 +2612,11 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
default: default:
dp_info_rl("Got pkt with REO ERROR: %d", dp_info_rl("Got pkt with REO ERROR: %d",
wbm_err_info.reo_err_code); wbm_err.info_bit.
reo_err_code);
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
} }
} else if (wbm_err_info.reo_psh_rsn } else if (wbm_err.info_bit.reo_psh_rsn
== HAL_RX_WBM_REO_PSH_RSN_ROUTE) { == HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
dp_rx_err_route_hdl(soc, nbuf, txrx_peer, dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
rx_tlv_hdr, rx_tlv_hdr,
@@ -2565,23 +2625,23 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
} else { } else {
/* should not enter here */ /* should not enter here */
dp_rx_err_alert("invalid reo push reason %u", dp_rx_err_alert("invalid reo push reason %u",
wbm_err_info.reo_psh_rsn); wbm_err.info_bit.reo_psh_rsn);
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
qdf_assert_always(0); qdf_assert_always(0);
} }
} else if (wbm_err_info.wbm_err_src == } else if (wbm_err.info_bit.wbm_err_src ==
HAL_RX_WBM_ERR_SRC_RXDMA) { HAL_RX_WBM_ERR_SRC_RXDMA) {
if (wbm_err_info.rxdma_psh_rsn if (wbm_err.info_bit.rxdma_psh_rsn
== HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) { == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
DP_STATS_INC(soc, DP_STATS_INC(soc,
rx.err.rxdma_error rx.err.rxdma_error
[wbm_err_info.rxdma_err_code], 1); [wbm_err.info_bit.rxdma_err_code], 1);
/* increment @pdev level */ /* increment @pdev level */
if (dp_pdev) if (dp_pdev)
DP_STATS_INC(dp_pdev, DP_STATS_INC(dp_pdev,
err.rxdma_error, 1); err.rxdma_error, 1);
switch (wbm_err_info.rxdma_err_code) { switch (wbm_err.info_bit.rxdma_err_code) {
case HAL_RXDMA_ERR_UNENCRYPTED: case HAL_RXDMA_ERR_UNENCRYPTED:
case HAL_RXDMA_ERR_WIFI_PARSE: case HAL_RXDMA_ERR_WIFI_PARSE:
@@ -2591,11 +2651,12 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1, 1,
link_id); link_id);
pool_id = wbm_err_info.pool_id; pool_id = wbm_err.info_bit.pool_id;
dp_rx_process_rxdma_err(soc, nbuf, dp_rx_process_rxdma_err(soc, nbuf,
rx_tlv_hdr, rx_tlv_hdr,
txrx_peer, txrx_peer,
wbm_err_info. wbm_err.
info_bit.
rxdma_err_code, rxdma_err_code,
pool_id, pool_id,
link_id); link_id);
@@ -2643,8 +2704,8 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
break; break;
} }
pool_id = wbm_err_info.pool_id; pool_id = wbm_err.info_bit.pool_id;
err_code = wbm_err_info.rxdma_err_code; err_code = wbm_err.info_bit.rxdma_err_code;
tlv_hdr = rx_tlv_hdr; tlv_hdr = rx_tlv_hdr;
dp_rx_process_rxdma_err(soc, nbuf, dp_rx_process_rxdma_err(soc, nbuf,
tlv_hdr, NULL, tlv_hdr, NULL,
@@ -2661,8 +2722,8 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
break; break;
case HAL_RXDMA_UNAUTHORIZED_WDS: case HAL_RXDMA_UNAUTHORIZED_WDS:
pool_id = wbm_err_info.pool_id; pool_id = wbm_err.info_bit.pool_id;
err_code = wbm_err_info.rxdma_err_code; err_code = wbm_err.info_bit.rxdma_err_code;
tlv_hdr = rx_tlv_hdr; tlv_hdr = rx_tlv_hdr;
dp_rx_process_rxdma_err(soc, nbuf, dp_rx_process_rxdma_err(soc, nbuf,
tlv_hdr, tlv_hdr,
@@ -2674,24 +2735,24 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
default: default:
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
dp_err_rl("RXDMA error %d", dp_err_rl("RXDMA error %d",
wbm_err_info.rxdma_err_code); wbm_err.info_bit.rxdma_err_code);
} }
} else if (wbm_err_info.rxdma_psh_rsn } else if (wbm_err.info_bit.rxdma_psh_rsn
== HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) { == HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) {
dp_rx_err_route_hdl(soc, nbuf, txrx_peer, dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
rx_tlv_hdr, rx_tlv_hdr,
HAL_RX_WBM_ERR_SRC_RXDMA, HAL_RX_WBM_ERR_SRC_RXDMA,
link_id); link_id);
} else if (wbm_err_info.rxdma_psh_rsn } else if (wbm_err.info_bit.rxdma_psh_rsn
== HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) { == HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) {
dp_rx_err_err("rxdma push reason %u", dp_rx_err_err("rxdma push reason %u",
wbm_err_info.rxdma_psh_rsn); wbm_err.info_bit.rxdma_psh_rsn);
DP_STATS_INC(soc, rx.err.rx_flush_count, 1); DP_STATS_INC(soc, rx.err.rx_flush_count, 1);
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
} else { } else {
/* should not enter here */ /* should not enter here */
dp_rx_err_alert("invalid rxdma push reason %u", dp_rx_err_alert("invalid rxdma push reason %u",
wbm_err_info.rxdma_psh_rsn); wbm_err.info_bit.rxdma_psh_rsn);
dp_rx_nbuf_free(nbuf); dp_rx_nbuf_free(nbuf);
qdf_assert_always(0); qdf_assert_always(0);
} }

Bestand weergeven

@@ -2212,7 +2212,6 @@ enum dp_context_type {
* @dp_service_near_full_srngs: Handler for servicing the near full IRQ * @dp_service_near_full_srngs: Handler for servicing the near full IRQ
* @tx_implicit_rbm_set: * @tx_implicit_rbm_set:
* @dp_rx_peer_metadata_peer_id_get: * @dp_rx_peer_metadata_peer_id_get:
* @dp_rx_peer_mdata_link_id_get: Handle to get link id
* @dp_rx_chain_msdus: * @dp_rx_chain_msdus:
* @txrx_set_vdev_param: target specific ops while setting vdev params * @txrx_set_vdev_param: target specific ops while setting vdev params
* @txrx_get_vdev_mcast_param: target specific ops for getting vdev * @txrx_get_vdev_mcast_param: target specific ops for getting vdev
@@ -2378,7 +2377,6 @@ struct dp_arch_ops {
uint8_t bm_id); uint8_t bm_id);
uint16_t (*dp_rx_peer_metadata_peer_id_get)(struct dp_soc *soc, uint16_t (*dp_rx_peer_metadata_peer_id_get)(struct dp_soc *soc,
uint32_t peer_metadata); uint32_t peer_metadata);
uint8_t (*dp_rx_peer_mdata_link_id_get)(uint32_t peer_metadata);
bool (*dp_rx_chain_msdus)(struct dp_soc *soc, qdf_nbuf_t nbuf, bool (*dp_rx_chain_msdus)(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, uint8_t mac_id); uint8_t *rx_tlv_hdr, uint8_t mac_id);
/* Control Arch Ops */ /* Control Arch Ops */

Bestand weergeven

@@ -700,8 +700,6 @@ void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_li; arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_li;
arch_ops->dp_rx_peer_metadata_peer_id_get = arch_ops->dp_rx_peer_metadata_peer_id_get =
dp_rx_peer_metadata_peer_id_get_li; dp_rx_peer_metadata_peer_id_get_li;
arch_ops->dp_rx_peer_mdata_link_id_get =
dp_rx_peer_mdata_link_id_get_li;
arch_ops->soc_cfg_attach = dp_soc_cfg_attach_li; arch_ops->soc_cfg_attach = dp_soc_cfg_attach_li;
arch_ops->tx_implicit_rbm_set = dp_tx_implicit_rbm_set_li; arch_ops->tx_implicit_rbm_set = dp_tx_implicit_rbm_set_li;
arch_ops->txrx_set_vdev_param = dp_txrx_set_vdev_param_li; arch_ops->txrx_set_vdev_param = dp_txrx_set_vdev_param_li;

Bestand weergeven

@@ -1091,11 +1091,9 @@ dp_rx_wbm_err_reap_desc_li(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_desc_t ring_desc; hal_ring_desc_t ring_desc;
hal_soc_handle_t hal_soc; hal_soc_handle_t hal_soc;
struct dp_rx_desc *rx_desc; struct dp_rx_desc *rx_desc;
union dp_rx_desc_list_elem_t union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
*head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } }; union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
union dp_rx_desc_list_elem_t uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
*tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } };
uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { 0 } };
uint8_t buf_type; uint8_t buf_type;
uint8_t mac_id; uint8_t mac_id;
struct dp_srng *dp_rxdma_srng; struct dp_srng *dp_rxdma_srng;
@@ -1103,14 +1101,16 @@ dp_rx_wbm_err_reap_desc_li(struct dp_intr *int_ctx, struct dp_soc *soc,
qdf_nbuf_t nbuf_head = NULL; qdf_nbuf_t nbuf_head = NULL;
qdf_nbuf_t nbuf_tail = NULL; qdf_nbuf_t nbuf_tail = NULL;
qdf_nbuf_t nbuf; qdf_nbuf_t nbuf;
struct hal_wbm_err_desc_info wbm_err_info = { 0 }; union hal_wbm_err_info_u wbm_err_info = { 0 };
uint8_t msdu_continuation = 0; uint8_t msdu_continuation = 0;
bool process_sg_buf = false; bool process_sg_buf = false;
uint32_t wbm_err_src; uint32_t wbm_err_src;
QDF_STATUS status; QDF_STATUS status;
struct dp_soc *replenish_soc; struct dp_soc *replenish_soc;
uint8_t chip_id; uint8_t chip_id = 0;
struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 }; struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 };
uint8_t *rx_tlv_hdr;
uint32_t peer_mdata;
qdf_assert(soc && hal_ring_hdl); qdf_assert(soc && hal_ring_hdl);
hal_soc = soc->hal_soc; hal_soc = soc->hal_soc;
@@ -1173,7 +1173,8 @@ dp_rx_wbm_err_reap_desc_li(struct dp_intr *int_ctx, struct dp_soc *soc,
continue; continue;
} }
hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc); hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info.info_bit,
hal_soc);
nbuf = rx_desc->nbuf; nbuf = rx_desc->nbuf;
status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl, status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl,
@@ -1184,15 +1185,22 @@ dp_rx_wbm_err_reap_desc_li(struct dp_intr *int_ctx, struct dp_soc *soc,
nbuf); nbuf);
rx_desc->in_err_state = 1; rx_desc->in_err_state = 1;
rx_desc->unmapped = 1; rx_desc->unmapped = 1;
rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++; rx_bufs_reaped[rx_desc->pool_id]++;
dp_rx_add_to_free_desc_list( dp_rx_add_to_free_desc_list(
&head[rx_desc->chip_id][rx_desc->pool_id], &head[rx_desc->pool_id],
&tail[rx_desc->chip_id][rx_desc->pool_id], &tail[rx_desc->pool_id],
rx_desc); rx_desc);
continue; continue;
} }
/* Update peer_id in nbuf cb */
rx_tlv_hdr = qdf_nbuf_data(nbuf);
peer_mdata = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
rx_tlv_hdr);
QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) =
dp_rx_peer_metadata_peer_id_get(soc, peer_mdata);
/* Get MPDU DESC info */ /* Get MPDU DESC info */
hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, &mpdu_desc_info); hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, &mpdu_desc_info);
@@ -1207,8 +1215,8 @@ dp_rx_wbm_err_reap_desc_li(struct dp_intr *int_ctx, struct dp_soc *soc,
dp_ipa_rx_buf_smmu_mapping_unlock(soc); dp_ipa_rx_buf_smmu_mapping_unlock(soc);
if (qdf_unlikely( if (qdf_unlikely(
soc->wbm_release_desc_rx_sg_support && soc->wbm_release_desc_rx_sg_support &&
dp_rx_is_sg_formation_required(&wbm_err_info))) { dp_rx_is_sg_formation_required(&wbm_err_info.info_bit))) {
/* SG is detected from continuation bit */ /* SG is detected from continuation bit */
msdu_continuation = msdu_continuation =
hal_rx_wbm_err_msdu_continuation_get(hal_soc, hal_rx_wbm_err_msdu_continuation_get(hal_soc,
@@ -1240,13 +1248,13 @@ dp_rx_wbm_err_reap_desc_li(struct dp_intr *int_ctx, struct dp_soc *soc,
} }
/* /*
* save the wbm desc info in nbuf TLV. We will need this * save the wbm desc info in nbuf CB/TLV. We will need this
* info when we do the actual nbuf processing * info when we do the actual nbuf processing
*/ */
wbm_err_info.pool_id = rx_desc->pool_id; wbm_err_info.info_bit.pool_id = rx_desc->pool_id;
dp_rx_set_err_info(soc, nbuf, wbm_err_info); dp_rx_set_wbm_err_info_in_nbuf(soc, nbuf, wbm_err_info);
rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++; rx_bufs_reaped[rx_desc->pool_id]++;
if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) { if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) {
DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head, DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head,
@@ -1271,8 +1279,8 @@ dp_rx_wbm_err_reap_desc_li(struct dp_intr *int_ctx, struct dp_soc *soc,
} }
dp_rx_add_to_free_desc_list dp_rx_add_to_free_desc_list
(&head[rx_desc->chip_id][rx_desc->pool_id], (&head[rx_desc->pool_id],
&tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc); &tail[rx_desc->pool_id], rx_desc);
/* /*
* if continuation bit is set then we have MSDU spread * if continuation bit is set then we have MSDU spread
@@ -1285,32 +1293,30 @@ dp_rx_wbm_err_reap_desc_li(struct dp_intr *int_ctx, struct dp_soc *soc,
done: done:
dp_srng_access_end(int_ctx, soc, hal_ring_hdl); dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) { for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { /*
/* * continue with next mac_id if no pkts were reaped
* continue with next mac_id if no pkts were reaped * from that pool
* from that pool */
*/ if (!rx_bufs_reaped[mac_id])
if (!rx_bufs_reaped[chip_id][mac_id]) continue;
continue;
replenish_soc = replenish_soc =
soc->arch_ops.dp_rx_replenish_soc_get(soc, chip_id); soc->arch_ops.dp_rx_replenish_soc_get(soc, chip_id);
dp_rxdma_srng = dp_rxdma_srng =
&replenish_soc->rx_refill_buf_ring[mac_id]; &replenish_soc->rx_refill_buf_ring[mac_id];
rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id]; rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id];
dp_rx_buffers_replenish_simple( dp_rx_buffers_replenish_simple(
replenish_soc, mac_id, replenish_soc, mac_id,
dp_rxdma_srng, dp_rxdma_srng,
rx_desc_pool, rx_desc_pool,
rx_bufs_reaped[chip_id][mac_id], rx_bufs_reaped[mac_id],
&head[chip_id][mac_id], &head[mac_id],
&tail[chip_id][mac_id]); &tail[mac_id]);
*rx_bufs_used += rx_bufs_reaped[chip_id][mac_id]; *rx_bufs_used += rx_bufs_reaped[mac_id];
}
} }
return nbuf_head; return nbuf_head;
} }

Bestand weergeven

@@ -150,12 +150,6 @@ dp_rx_peer_metadata_peer_id_get_li(struct dp_soc *soc, uint32_t peer_metadata)
return metadata->peer_id; return metadata->peer_id;
} }
static inline uint8_t
dp_rx_peer_mdata_link_id_get_li(uint32_t peer_metadata)
{
return 0;
}
bool bool
dp_rx_intrabss_handle_nawds_li(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, dp_rx_intrabss_handle_nawds_li(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
qdf_nbuf_t nbuf_copy, qdf_nbuf_t nbuf_copy,

Bestand weergeven

@@ -570,4 +570,34 @@ hal_rx_get_mpdu_msdu_desc_info_be(void *desc_addr,
*msdu_info = *(uint32_t *)(&reo_dst_ring->rx_msdu_desc_info_details); *msdu_info = *(uint32_t *)(&reo_dst_ring->rx_msdu_desc_info_details);
} }
/**
* hal_rx_wbm_err_mpdu_msdu_info_get_be() - Copies wbm, msdu, mpdu info
* from HAL desc
* @desc_addr: WBM2SW Rx Error ring descriptor addr
* @wbm_err_info: Holds WBM Error info from HAL Rx descriptor
* @mpdu_info: Holds MPDU descriptor info from HAL Rx descriptor
* @msdu_info: Holds MSDU descriptor info from HAL Rx descriptor
* @peer_meta_data: Holds Peer Meta data from HAL Rx descriptor
*
* This function copies the WBM error information, MSDU desc info,
* MPDU Desc info and peer meta data from HAL RX Desc.
*
* Return: void
*/
static inline void
hal_rx_wbm_err_mpdu_msdu_info_get_be(void *desc_addr,
uint32_t *wbm_err_info,
uint32_t *mpdu_info,
uint32_t *msdu_info,
uint32_t *peer_meta_data)
{
struct wbm2sw_completion_ring_rx *wbm_rx_err_ring;
wbm_rx_err_ring = (struct wbm2sw_completion_ring_rx *)desc_addr;
*msdu_info = *(uint32_t *)&wbm_rx_err_ring->rx_msdu_desc_info_details;
*mpdu_info = *(uint32_t *)&wbm_rx_err_ring->rx_mpdu_desc_info_details;
*peer_meta_data =
*((uint32_t *)&wbm_rx_err_ring->rx_mpdu_desc_info_details + 1);
*wbm_err_info = *((uint32_t *)wbm_rx_err_ring + 2);
}
#endif /* _HAL_BE_RX_H_ */ #endif /* _HAL_BE_RX_H_ */

Bestand weergeven

@@ -86,26 +86,62 @@ enum {
/** /**
* struct hal_wbm_err_desc_info - structure to hold wbm error codes and reasons * struct hal_wbm_err_desc_info - structure to hold wbm error codes and reasons
* *
* @reo_psh_rsn: REO push reason * The fields of this structure is aligned to HAL Rx WBM2SW Ring desc,
* @reo_err_code: REO Error code * inorder to efficiently copy the data from desc to struct.
* @rxdma_psh_rsn: RXDMA push reason * Do not change the sequence of the fields.
* @rxdma_err_code: RXDMA Error code *
* @reserved_1: Reserved bits * @wbm_err_src: Module which initiated the buffer release
* @wbm_err_src: WBM error source * @bm_action: BM action
* @pool_id: pool ID, indicates which rxdma pool * @buffer_or_desc_type: Type of Buffer or Desc released
* @msdu_continued: Is the MSDU continued * @return_buffer_manager: Buffer address Info for debug
* @reserved_2: Reserved bits * @pool_id: pool ID, indicates which rxdma pool
* @cache_id: cache Id
* @cookie_conversion_status: cookie conversion status
* @rxdma_psh_rsn: RXDMA push reason
* @rxdma_err_code: RXDMA Error code
* @reo_psh_rsn: REO push reason
* @reo_err_code: REO Error code
* @wbm_internal_error: WBM Internal error
*/ */
struct hal_wbm_err_desc_info { struct hal_wbm_err_desc_info {
uint16_t reo_psh_rsn:2, #ifndef WIFI_BIT_ORDER_BIG_ENDIAN
reo_err_code:5, uint32_t wbm_err_src : 3,
rxdma_psh_rsn:2, bm_action : 3,
rxdma_err_code:5, buffer_or_desc_type : 3,
reserved_1:2; return_buffer_manager : 4,
uint8_t wbm_err_src:3, pool_id : 2,
pool_id:2, cache_id : 1,
msdu_continued:1, cookie_conversion_status : 1,
reserved_2:2; rxdma_psh_rsn : 2,
rxdma_err_code : 5,
reo_psh_rsn : 2,
reo_err_code : 5,
wbm_internal_error : 1;
#else
uint32_t wbm_internal_error : 1,
reo_err_code : 5,
reo_psh_rsn : 2,
rxdma_err_code : 5,
rxdma_psh_rsn : 2,
cookie_conversion_status : 1,
cache_id : 1,
pool_id : 2,
return_buffer_manager : 4,
buffer_or_desc_type : 3,
bm_action : 3,
wbm_err_src : 3;
#endif
};
/**
* union hal_wbm_err_info_u - Union to hold wbm error information
* @info_bit: hal_wbm_err_desc_info: structure to hold wbm error info bit fields
* @info: variable to hold wbm error info
*
*/
union hal_wbm_err_info_u {
struct hal_wbm_err_desc_info info_bit;
uint32_t info;
}; };
/** /**

Bestand weergeven

@@ -410,10 +410,6 @@ QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
(((struct qdf_nbuf_cb *) \ (((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_info.msdu_desc_info) ((skb)->cb))->u.rx.hw_info.desc_info.msdu_desc_info)
#define QDF_NBUF_CB_RX_ERR_CODES(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_info.rx_error_codes)
#define QDF_NBUF_CB_RX_MPDU_DESC_INFO(skb) \ #define QDF_NBUF_CB_RX_MPDU_DESC_INFO(skb) \
(((struct qdf_nbuf_cb *) \ (((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_info.mpdu_desc_info) ((skb)->cb))->u.rx.hw_info.desc_info.mpdu_desc_info)
@@ -426,6 +422,10 @@ QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
(((struct qdf_nbuf_cb *) \ (((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_info.mpdu_desc_info[1]) ((skb)->cb))->u.rx.hw_info.desc_info.mpdu_desc_info[1])
#define QDF_NBUF_CB_RX_ERROR_CODE_INFO(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_info.rx_error_codes)
#define QDF_NBUF_CB_RX_PEER_ID(skb) \ #define QDF_NBUF_CB_RX_PEER_ID(skb) \
(((struct qdf_nbuf_cb *) \ (((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.peer_id) ((skb)->cb))->u.rx.hw_info.desc_tlv_members.peer_id)