qcacmn: Optimize DP Rx Error Handling (Part-1)

1) In WBM2SW Rx Error path, code to reap and
the process the HAL descriptor is split into
BE and LI architecture specific functionality
in dp_be_rx and dp_li_rx files respectively.
2) The function to handle Null Queue desc.
error for WBM and REO Rx Error path is split
into BE and LI architecture specific
functionality in dp_be_rx and dp_li_rx files
respectively.

Change-Id: Ic51a9742f65cee677ed7f3081f49fb3ece5b42f1
CRs-Fixed: 3356179
This commit is contained in:
Kenvish Butani
2022-12-08 11:11:41 +05:30
committed by Madan Koyyalamudi
parent c4db9c63b8
commit 2fbc8c8ab0
9 changed files with 1257 additions and 587 deletions

View File

@@ -2604,6 +2604,8 @@ void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops)
dp_wbm_get_rx_desc_from_hal_desc_be; dp_wbm_get_rx_desc_from_hal_desc_be;
arch_ops->dp_tx_compute_hw_delay = dp_tx_compute_tx_delay_be; arch_ops->dp_tx_compute_hw_delay = dp_tx_compute_tx_delay_be;
arch_ops->dp_rx_chain_msdus = dp_rx_chain_msdus_be; arch_ops->dp_rx_chain_msdus = dp_rx_chain_msdus_be;
arch_ops->dp_rx_wbm_err_reap_desc = dp_rx_wbm_err_reap_desc_be;
arch_ops->dp_rx_null_q_desc_handle = dp_rx_null_q_desc_handle_be;
#endif #endif
arch_ops->txrx_get_context_size = dp_get_context_size_be; arch_ops->txrx_get_context_size = dp_get_context_size_be;
#ifdef WIFI_MONITOR_SUPPORT #ifdef WIFI_MONITOR_SUPPORT

View File

@@ -1816,3 +1816,508 @@ bool dp_rx_chain_msdus_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
return mpdu_done; return mpdu_done;
} }
qdf_nbuf_t
dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl, uint32_t quota,
uint32_t *rx_bufs_used)
{
hal_ring_desc_t ring_desc;
hal_soc_handle_t hal_soc;
struct dp_rx_desc *rx_desc;
union dp_rx_desc_list_elem_t
*head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } };
union dp_rx_desc_list_elem_t
*tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } };
uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { 0 } };
uint8_t buf_type;
uint8_t mac_id;
struct dp_srng *dp_rxdma_srng;
struct rx_desc_pool *rx_desc_pool;
qdf_nbuf_t nbuf_head = NULL;
qdf_nbuf_t nbuf_tail = NULL;
qdf_nbuf_t nbuf;
struct hal_wbm_err_desc_info wbm_err_info = { 0 };
uint8_t msdu_continuation = 0;
bool process_sg_buf = false;
uint32_t wbm_err_src;
QDF_STATUS status;
struct dp_soc *replenish_soc;
uint8_t chip_id;
struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 };
qdf_assert(soc && hal_ring_hdl);
hal_soc = soc->hal_soc;
qdf_assert(hal_soc);
if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
/* TODO */
/*
* Need API to convert from hal_ring pointer to
* Ring Type / Ring Id combo
*/
dp_rx_err_err("%pK: HAL RING Access Failed -- %pK",
soc, hal_ring_hdl);
goto done;
}
while (qdf_likely(quota)) {
ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
if (qdf_unlikely(!ring_desc))
break;
/* XXX */
buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
/*
* For WBM ring, expect only MSDU buffers
*/
qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
wbm_err_src = hal_rx_wbm_err_src_get(hal_soc, ring_desc);
qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) ||
(wbm_err_src == HAL_RX_WBM_ERR_SRC_REO));
if (soc->arch_ops.dp_wbm_get_rx_desc_from_hal_desc(soc,
ring_desc,
&rx_desc)) {
dp_rx_err_err("get rx desc from hal_desc failed");
continue;
}
qdf_assert_always(rx_desc);
if (!dp_rx_desc_check_magic(rx_desc)) {
dp_rx_err_err("%pk: Invalid rx_desc %pk",
soc, rx_desc);
continue;
}
/*
* this is a unlikely scenario where the host is reaping
* a descriptor which it already reaped just a while ago
* but is yet to replenish it back to HW.
* In this case host will dump the last 128 descriptors
* including the software descriptor rx_desc and assert.
*/
if (qdf_unlikely(!rx_desc->in_use)) {
DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
ring_desc, rx_desc);
continue;
}
hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
nbuf = rx_desc->nbuf;
status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl,
ring_desc, rx_desc);
if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
dp_info_rl("Rx error Nbuf %pk sanity check failure!",
nbuf);
rx_desc->in_err_state = 1;
rx_desc->unmapped = 1;
rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
dp_rx_add_to_free_desc_list(
&head[rx_desc->chip_id][rx_desc->pool_id],
&tail[rx_desc->chip_id][rx_desc->pool_id],
rx_desc);
continue;
}
/* Get MPDU DESC info */
hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, &mpdu_desc_info);
if (qdf_likely(mpdu_desc_info.mpdu_flags &
HAL_MPDU_F_QOS_CONTROL_VALID))
qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid);
rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
dp_ipa_rx_buf_smmu_mapping_lock(soc);
dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
rx_desc->unmapped = 1;
dp_ipa_rx_buf_smmu_mapping_unlock(soc);
if (qdf_unlikely(
soc->wbm_release_desc_rx_sg_support &&
dp_rx_is_sg_formation_required(&wbm_err_info))) {
/* SG is detected from continuation bit */
msdu_continuation =
hal_rx_wbm_err_msdu_continuation_get(hal_soc,
ring_desc);
if (msdu_continuation &&
!(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) {
/* Update length from first buffer in SG */
soc->wbm_sg_param.wbm_sg_desc_msdu_len =
hal_rx_msdu_start_msdu_len_get(
soc->hal_soc,
qdf_nbuf_data(nbuf));
soc->wbm_sg_param.wbm_is_first_msdu_in_sg =
true;
}
if (msdu_continuation) {
/* MSDU continued packets */
qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
soc->wbm_sg_param.wbm_sg_desc_msdu_len;
} else {
/* This is the terminal packet in SG */
qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
soc->wbm_sg_param.wbm_sg_desc_msdu_len;
process_sg_buf = true;
}
}
/*
* save the wbm desc info in nbuf TLV. We will need this
* info when we do the actual nbuf processing
*/
wbm_err_info.pool_id = rx_desc->pool_id;
hal_rx_priv_info_set_in_tlv(soc->hal_soc,
qdf_nbuf_data(nbuf),
(uint8_t *)&wbm_err_info,
sizeof(wbm_err_info));
dp_rx_err_tlv_invalidate(soc, nbuf);
rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) {
DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head,
soc->wbm_sg_param.wbm_sg_nbuf_tail,
nbuf);
if (process_sg_buf) {
if (!dp_rx_buffer_pool_refill(
soc,
soc->wbm_sg_param.wbm_sg_nbuf_head,
rx_desc->pool_id))
DP_RX_MERGE_TWO_LIST(
nbuf_head, nbuf_tail,
soc->wbm_sg_param.wbm_sg_nbuf_head,
soc->wbm_sg_param.wbm_sg_nbuf_tail);
dp_rx_wbm_sg_list_last_msdu_war(soc);
dp_rx_wbm_sg_list_reset(soc);
process_sg_buf = false;
}
} else if (!dp_rx_buffer_pool_refill(soc, nbuf,
rx_desc->pool_id)) {
DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
}
dp_rx_add_to_free_desc_list
(&head[rx_desc->chip_id][rx_desc->pool_id],
&tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc);
/*
* if continuation bit is set then we have MSDU spread
* across multiple buffers, let us not decrement quota
* till we reap all buffers of that MSDU.
*/
if (qdf_likely(!msdu_continuation))
quota -= 1;
}
done:
dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
/*
* continue with next mac_id if no pkts were reaped
* from that pool
*/
if (!rx_bufs_reaped[chip_id][mac_id])
continue;
replenish_soc =
soc->arch_ops.dp_rx_replenish_soc_get(soc, chip_id);
dp_rxdma_srng =
&replenish_soc->rx_refill_buf_ring[mac_id];
rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id];
dp_rx_buffers_replenish(replenish_soc, mac_id,
dp_rxdma_srng,
rx_desc_pool,
rx_bufs_reaped[chip_id][mac_id],
&head[chip_id][mac_id],
&tail[chip_id][mac_id], false);
*rx_bufs_used += rx_bufs_reaped[chip_id][mac_id];
}
}
return nbuf_head;
}
QDF_STATUS
dp_rx_null_q_desc_handle_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, uint8_t pool_id,
struct dp_txrx_peer *txrx_peer,
bool is_reo_exception)
{
uint32_t pkt_len;
uint16_t msdu_len;
struct dp_vdev *vdev;
uint8_t tid;
qdf_ether_header_t *eh;
struct hal_rx_msdu_metadata msdu_metadata;
uint16_t sa_idx = 0;
bool is_eapol = 0;
bool enh_flag;
qdf_nbuf_set_rx_chfrag_start(
nbuf,
hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
rx_tlv_hdr));
qdf_nbuf_set_rx_chfrag_end(nbuf,
hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
rx_tlv_hdr));
qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
rx_tlv_hdr));
qdf_nbuf_set_da_valid(nbuf,
hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
rx_tlv_hdr));
qdf_nbuf_set_sa_valid(nbuf,
hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
rx_tlv_hdr));
hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
if (dp_rx_check_pkt_len(soc, pkt_len))
goto drop_nbuf;
/* Set length in nbuf */
qdf_nbuf_set_pktlen(
nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
qdf_assert_always(nbuf->data == rx_tlv_hdr);
}
/*
* Check if DMA completed -- msdu_done is the last bit
* to be written
*/
if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
dp_err_rl("MSDU DONE failure");
hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
QDF_TRACE_LEVEL_INFO);
qdf_assert(0);
}
if (!txrx_peer &&
dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
rx_tlv_hdr, nbuf))
return QDF_STATUS_E_FAILURE;
if (!txrx_peer) {
bool mpdu_done = false;
struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
if (!pdev) {
dp_err_rl("pdev is null for pool_id = %d", pool_id);
return QDF_STATUS_E_FAILURE;
}
dp_err_rl("txrx_peer is NULL");
DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
qdf_nbuf_len(nbuf));
/* QCN9000 has the support enabled */
if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) {
mpdu_done = true;
nbuf->next = NULL;
/* Trigger invalid peer handler wrapper */
dp_rx_process_invalid_peer_wrapper(soc,
nbuf,
mpdu_done,
pool_id);
} else {
mpdu_done = soc->arch_ops.dp_rx_chain_msdus(soc, nbuf,
rx_tlv_hdr,
pool_id);
/* Trigger invalid peer handler wrapper */
dp_rx_process_invalid_peer_wrapper(
soc,
pdev->invalid_peer_head_msdu,
mpdu_done, pool_id);
}
if (mpdu_done) {
pdev->invalid_peer_head_msdu = NULL;
pdev->invalid_peer_tail_msdu = NULL;
}
return QDF_STATUS_E_FAILURE;
}
vdev = txrx_peer->vdev;
if (!vdev) {
dp_err_rl("Null vdev!");
DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
goto drop_nbuf;
}
/*
* Advance the packet start pointer by total size of
* pre-header TLV's
*/
if (qdf_nbuf_is_frag(nbuf))
qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
else
qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
soc->rx_pkt_tlv_size));
DP_STATS_INC_PKT(vdev, rx_i.null_q_desc_pkt, 1, qdf_nbuf_len(nbuf));
dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
if (dp_rx_err_drop_3addr_mcast(vdev, rx_tlv_hdr)) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.mcast_3addr_drop, 1);
goto drop_nbuf;
}
if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
if ((sa_idx < 0) ||
(sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
goto drop_nbuf;
}
}
if ((!soc->mec_fw_offload) &&
dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) {
/* this is a looped back MCBC pkt, drop it */
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
qdf_nbuf_len(nbuf));
goto drop_nbuf;
}
/*
* In qwrap mode if the received packet matches with any of the vdev
* mac addresses, drop it. Donot receive multicast packets originated
* from any proxysta.
*/
if (check_qwrap_multicast_loopback(vdev, nbuf)) {
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
qdf_nbuf_len(nbuf));
goto drop_nbuf;
}
if (qdf_unlikely(txrx_peer->nawds_enabled &&
hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
rx_tlv_hdr))) {
dp_err_rl("free buffer for multicast packet");
DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.nawds_mcast_drop, 1);
goto drop_nbuf;
}
if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
dp_err_rl("mcast Policy Check Drop pkt");
DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.policy_check_drop, 1);
goto drop_nbuf;
}
/* WDS Source Port Learning */
if (!soc->ast_offload_support &&
qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
vdev->wds_enabled))
dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, txrx_peer, nbuf,
msdu_metadata);
if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
struct dp_peer *peer;
struct dp_rx_tid *rx_tid;
tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
DP_MOD_ID_RX_ERR);
if (peer) {
rx_tid = &peer->rx_tid[tid];
qdf_spin_lock_bh(&rx_tid->tid_lock);
if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned)
dp_rx_tid_setup_wifi3(peer, tid, 1,
IEEE80211_SEQ_MAX);
qdf_spin_unlock_bh(&rx_tid->tid_lock);
/* IEEE80211_SEQ_MAX indicates invalid start_seq */
dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
}
}
eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
if (!txrx_peer->authorize) {
is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
if (is_eapol) {
if (!dp_rx_err_match_dhost(eh, vdev))
goto drop_nbuf;
} else {
goto drop_nbuf;
}
}
/*
* Drop packets in this path if cce_match is found. Packets will come
* in following path depending on whether tidQ is setup.
* 1. If tidQ is setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE and
* cce_match = 1
* Packets with WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE are already
* dropped.
* 2. If tidQ is not setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ERROR and
* cce_match = 1
* These packets need to be dropped and should not get delivered
* to stack.
*/
if (qdf_unlikely(dp_rx_err_cce_drop(soc, vdev, nbuf, rx_tlv_hdr)))
goto drop_nbuf;
if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
qdf_nbuf_set_next(nbuf, NULL);
dp_rx_deliver_raw(vdev, nbuf, txrx_peer);
} else {
enh_flag = vdev->pdev->enhanced_stats_en;
qdf_nbuf_set_next(nbuf, NULL);
DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
enh_flag);
/*
* Update the protocol tag in SKB based on
* CCE metadata
*/
dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
EXCEPTION_DEST_RING_ID,
true, true);
/* Update the flow tag in SKB based on FSE metadata */
dp_rx_update_flow_tag(soc, vdev, nbuf,
rx_tlv_hdr, true);
if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
soc->hal_soc, rx_tlv_hdr) &&
(vdev->rx_decap_type ==
htt_cmn_pkt_type_ethernet))) {
DP_PEER_MC_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
enh_flag);
if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
DP_PEER_BC_INCC_PKT(txrx_peer, 1,
qdf_nbuf_len(nbuf),
enh_flag);
}
qdf_nbuf_set_exc_frame(nbuf, 1);
dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
is_eapol);
}
return QDF_STATUS_SUCCESS;
drop_nbuf:
dp_rx_nbuf_free(nbuf);
return QDF_STATUS_E_FAILURE;
}

View File

@@ -548,4 +548,55 @@ uint64_t dp_rx_get_reo_qdesc_addr_be(hal_soc_handle_t hal_soc,
return hal_rx_get_qdesc_addr(hal_soc, dst_ring_desc, buf); return hal_rx_get_qdesc_addr(hal_soc, dst_ring_desc, buf);
} }
#endif #endif
/**
* dp_rx_wbm_err_reap_desc_be() - Function to reap and replenish
* WBM RX Error descriptors
*
* @int_ctx: pointer to DP interrupt context
* @soc: core DP main context
* @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, to be serviced
* @quota: No. of units (packets) that can be serviced in one shot.
* @rx_bufs_used: No. of descriptors reaped
*
* This function implements the core Rx functionality like reap and
* replenish the RX error ring Descriptors, and create a nbuf list
* out of it. It also reads wbm error information from descriptors
* and update the nbuf tlv area.
*
* Return: qdf_nbuf_t: head pointer to the nbuf list created
*/
qdf_nbuf_t
dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl, uint32_t quota,
uint32_t *rx_bufs_used);
/**
* dp_rx_null_q_desc_handle_be() - Function to handle NULL Queue
* descriptor violation on either a
* REO or WBM ring
*
* @soc: core DP main context
* @nbuf: buffer pointer
* @rx_tlv_hdr: start of rx tlv header
* @pool_id: mac id
* @txrx_peer: txrx peer handle
* @is_reo_exception: flag to check if the error is from REO or WBM
*
* This function handles NULL queue descriptor violations arising out
* a missing REO queue for a given peer or a given TID. This typically
* may happen if a packet is received on a QOS enabled TID before the
* ADDBA negotiation for that TID, when the TID queue is setup. Or
* it may also happen for MC/BC frames if they are not routed to the
* non-QOS TID queue, in the absence of any other default TID queue.
* This error can show up both in a REO destination or WBM release ring.
*
* Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
* if nbuf could not be handled or dropped.
*/
QDF_STATUS
dp_rx_null_q_desc_handle_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, uint8_t pool_id,
struct dp_txrx_peer *txrx_peer,
bool is_reo_exception);
#endif #endif

View File

@@ -74,6 +74,8 @@
#define dp_rx_info_rl(params...) \ #define dp_rx_info_rl(params...) \
__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params) __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params)
#define dp_rx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX, params) #define dp_rx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX, params)
#define dp_rx_err_err(params...) \
QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX_ERROR, params)
/** /**
* enum dp_rx_desc_state * enum dp_rx_desc_state
@@ -3234,4 +3236,104 @@ void dp_rx_nbuf_list_dup_deliver(struct dp_soc *soc,
#endif /* DP_TX_RX_TPUT_SIMULATE */ #endif /* DP_TX_RX_TPUT_SIMULATE */
/**
* dp_rx_wbm_desc_nbuf_sanity_check() - Add sanity check to for WBM rx_desc
* paddr corruption
* @soc: core txrx main context
* @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring
* @ring_desc: REO ring descriptor
* @rx_desc: Rx descriptor
*
* Return: NONE
*/
QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl,
hal_ring_desc_t ring_desc,
struct dp_rx_desc *rx_desc);
/**
* dp_rx_is_sg_formation_required() - Check if sg formation is required
* @info: WBM desc info
*
* Return: True if sg is required else false
*/
bool dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info);
/**
* dp_rx_err_tlv_invalidate() - Invalidate network buffer
* @soc: core txrx main context
* @nbuf: Network buffer to invalidate
*
* Return: NONE
*/
void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
qdf_nbuf_t nbuf);
/*
* dp_rx_wbm_sg_list_last_msdu_war() - war for HW issue
*
* This is a war for HW issue where length is only valid in last msdu
* @soc: DP SOC handle
*
* Return: NONE
*/
void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc);
/**
* dp_rx_check_pkt_len() - Check for pktlen validity
* @soc: DP SOC context
* @pkt_len: computed length of the pkt from caller in bytes
*
* Return: true if pktlen > RX_BUFFER_SIZE, else return false
*
*/
bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len);
/**
* dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
* @soc: pointer to dp_soc struct
* @pool_id: Pool id to find dp_pdev
* @rx_tlv_hdr: TLV header of received packet
* @nbuf: SKB
*
* In certain types of packets if peer_id is not correct then
* driver may not be able find. Try finding peer by addr_2 of
* received MPDU. If you find the peer then most likely sw_peer_id &
* ast_idx is corrupted.
*
* Return: True if you find the peer by addr_2 of received MPDU else false
*/
bool dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
uint8_t pool_id,
uint8_t *rx_tlv_hdr,
qdf_nbuf_t nbuf);
/**
* dp_rx_err_drop_3addr_mcast() - Check if feature drop_3ddr_mcast is enabled
* If so, drop the multicast frame.
* @vdev: datapath vdev
* @rx_tlv_hdr: TLV header
*
* Return: true if packet is to be dropped,
* false, if packet is not dropped.
*/
bool dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr);
/*
* dp_rx_deliver_to_osif_stack() - function to deliver rx pkts to stack
* @soc: DP soc
* @vdev: DP vdev handle
* @txrx_peer: pointer to the txrx_peer object
* @nbuf: skb list head
* @tail: skb list tail
* @is_eapol: eapol pkt check
*
* Return: None
*/
void
dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
struct dp_vdev *vdev,
struct dp_txrx_peer *txrx_peer,
qdf_nbuf_t nbuf,
qdf_nbuf_t tail,
bool is_eapol);
#endif /* _DP_RX_H */ #endif /* _DP_RX_H */

View File

@@ -40,7 +40,6 @@
#include "dp_rx_buffer_pool.h" #include "dp_rx_buffer_pool.h"
#define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params) #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params)
#define dp_rx_err_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX_ERROR, params)
#define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params) #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params)
#define dp_rx_err_info(params...) \ #define dp_rx_err_info(params...) \
__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params) __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
@@ -870,21 +869,7 @@ free_nbuf:
#if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \ #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI) defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI)
/** bool
* dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
* @soc: pointer to dp_soc struct
* @pool_id: Pool id to find dp_pdev
* @rx_tlv_hdr: TLV header of received packet
* @nbuf: SKB
*
* In certain types of packets if peer_id is not correct then
* driver may not be able find. Try finding peer by addr_2 of
* received MPDU. If you find the peer then most likely sw_peer_id &
* ast_idx is corrupted.
*
* Return: True if you find the peer by addr_2 of received MPDU else false
*/
static bool
dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
uint8_t pool_id, uint8_t pool_id,
uint8_t *rx_tlv_hdr, uint8_t *rx_tlv_hdr,
@@ -922,7 +907,7 @@ dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
return false; return false;
} }
#else #else
static inline bool bool
dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc, dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
uint8_t pool_id, uint8_t pool_id,
uint8_t *rx_tlv_hdr, uint8_t *rx_tlv_hdr,
@@ -932,15 +917,6 @@ dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
} }
#endif #endif
/**
* dp_rx_check_pkt_len() - Check for pktlen validity
* @soc: DP SOC context
* @pkt_len: computed length of the pkt from caller in bytes
*
* Return: true if pktlen > RX_BUFFER_SIZE, else return false
*
*/
static inline
bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len) bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
{ {
if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) { if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
@@ -952,19 +928,8 @@ bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
} }
} }
/*
* dp_rx_deliver_to_osif_stack() - function to deliver rx pkts to stack
* @soc: DP soc
* @vdv: DP vdev handle
* @txrx_peer: pointer to the txrx_peer object
* @nbuf: skb list head
* @tail: skb list tail
* @is_eapol: eapol pkt check
*
* Return: None
*/
#ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
static inline void void
dp_rx_deliver_to_osif_stack(struct dp_soc *soc, dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
struct dp_vdev *vdev, struct dp_vdev *vdev,
struct dp_txrx_peer *txrx_peer, struct dp_txrx_peer *txrx_peer,
@@ -978,7 +943,7 @@ dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL); dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
} }
#else #else
static inline void void
dp_rx_deliver_to_osif_stack(struct dp_soc *soc, dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
struct dp_vdev *vdev, struct dp_vdev *vdev,
struct dp_txrx_peer *txrx_peer, struct dp_txrx_peer *txrx_peer,
@@ -1017,16 +982,7 @@ int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
#ifndef QCA_HOST_MODE_WIFI_DISABLED #ifndef QCA_HOST_MODE_WIFI_DISABLED
/** bool
* dp_rx_err_drop_3addr_mcast() - Check if feature drop_3ddr_mcast is enabled
* If so, drop the multicast frame.
* @vdev: datapath vdev
* @rx_tlv_hdr: TLV header
*
* Return: true if packet is to be dropped,
* false, if packet is not dropped.
*/
static bool
dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr) dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr)
{ {
struct dp_soc *soc = vdev->pdev->soc; struct dp_soc *soc = vdev->pdev->soc;
@@ -1064,294 +1020,6 @@ dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error,
hal_rx_reo_is_bar_oor_2k_jump(error_code)))); hal_rx_reo_is_bar_oor_2k_jump(error_code))));
} }
/**
* dp_rx_null_q_desc_handle() - Function to handle NULL Queue
* descriptor violation on either a
* REO or WBM ring
*
* @soc: core DP main context
* @nbuf: buffer pointer
* @rx_tlv_hdr: start of rx tlv header
* @pool_id: mac id
* @txrx_peer: txrx peer handle
*
* This function handles NULL queue descriptor violations arising out
* a missing REO queue for a given peer or a given TID. This typically
* may happen if a packet is received on a QOS enabled TID before the
* ADDBA negotiation for that TID, when the TID queue is setup. Or
* it may also happen for MC/BC frames if they are not routed to the
* non-QOS TID queue, in the absence of any other default TID queue.
* This error can show up both in a REO destination or WBM release ring.
*
* Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
* if nbuf could not be handled or dropped.
*/
static QDF_STATUS
dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, uint8_t pool_id,
struct dp_txrx_peer *txrx_peer)
{
uint32_t pkt_len;
uint16_t msdu_len;
struct dp_vdev *vdev;
uint8_t tid;
qdf_ether_header_t *eh;
struct hal_rx_msdu_metadata msdu_metadata;
uint16_t sa_idx = 0;
bool is_eapol = 0;
bool enh_flag;
qdf_nbuf_set_rx_chfrag_start(nbuf,
hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
rx_tlv_hdr));
qdf_nbuf_set_rx_chfrag_end(nbuf,
hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
rx_tlv_hdr));
qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
rx_tlv_hdr));
qdf_nbuf_set_da_valid(nbuf,
hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
rx_tlv_hdr));
qdf_nbuf_set_sa_valid(nbuf,
hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
rx_tlv_hdr));
hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
if (dp_rx_check_pkt_len(soc, pkt_len))
goto drop_nbuf;
/* Set length in nbuf */
qdf_nbuf_set_pktlen(
nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
qdf_assert_always(nbuf->data == rx_tlv_hdr);
}
/*
* Check if DMA completed -- msdu_done is the last bit
* to be written
*/
if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
dp_err_rl("MSDU DONE failure");
hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
QDF_TRACE_LEVEL_INFO);
qdf_assert(0);
}
if (!txrx_peer &&
dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
rx_tlv_hdr, nbuf))
return QDF_STATUS_E_FAILURE;
if (!txrx_peer) {
bool mpdu_done = false;
struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
if (!pdev) {
dp_err_rl("pdev is null for pool_id = %d", pool_id);
return QDF_STATUS_E_FAILURE;
}
dp_err_rl("txrx_peer is NULL");
DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
qdf_nbuf_len(nbuf));
/* QCN9000 has the support enabled */
if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) {
mpdu_done = true;
nbuf->next = NULL;
/* Trigger invalid peer handler wrapper */
dp_rx_process_invalid_peer_wrapper(soc,
nbuf, mpdu_done, pool_id);
} else {
mpdu_done = soc->arch_ops.dp_rx_chain_msdus(soc, nbuf,
rx_tlv_hdr,
pool_id);
/* Trigger invalid peer handler wrapper */
dp_rx_process_invalid_peer_wrapper(soc,
pdev->invalid_peer_head_msdu,
mpdu_done, pool_id);
}
if (mpdu_done) {
pdev->invalid_peer_head_msdu = NULL;
pdev->invalid_peer_tail_msdu = NULL;
}
return QDF_STATUS_E_FAILURE;
}
vdev = txrx_peer->vdev;
if (!vdev) {
dp_err_rl("Null vdev!");
DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
goto drop_nbuf;
}
/*
* Advance the packet start pointer by total size of
* pre-header TLV's
*/
if (qdf_nbuf_is_frag(nbuf))
qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
else
qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
soc->rx_pkt_tlv_size));
DP_STATS_INC_PKT(vdev, rx_i.null_q_desc_pkt, 1, qdf_nbuf_len(nbuf));
dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
if (dp_rx_err_drop_3addr_mcast(vdev, rx_tlv_hdr)) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.mcast_3addr_drop, 1);
goto drop_nbuf;
}
if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
if ((sa_idx < 0) ||
(sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
goto drop_nbuf;
}
}
if ((!soc->mec_fw_offload) &&
dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) {
/* this is a looped back MCBC pkt, drop it */
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
qdf_nbuf_len(nbuf));
goto drop_nbuf;
}
/*
* In qwrap mode if the received packet matches with any of the vdev
* mac addresses, drop it. Donot receive multicast packets originated
* from any proxysta.
*/
if (check_qwrap_multicast_loopback(vdev, nbuf)) {
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
qdf_nbuf_len(nbuf));
goto drop_nbuf;
}
if (qdf_unlikely(txrx_peer->nawds_enabled &&
hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
rx_tlv_hdr))) {
dp_err_rl("free buffer for multicast packet");
DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.nawds_mcast_drop, 1);
goto drop_nbuf;
}
if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
dp_err_rl("mcast Policy Check Drop pkt");
DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.policy_check_drop, 1);
goto drop_nbuf;
}
/* WDS Source Port Learning */
if (!soc->ast_offload_support &&
qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
vdev->wds_enabled))
dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, txrx_peer, nbuf,
msdu_metadata);
if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
struct dp_peer *peer;
struct dp_rx_tid *rx_tid;
tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
DP_MOD_ID_RX_ERR);
if (peer) {
rx_tid = &peer->rx_tid[tid];
qdf_spin_lock_bh(&rx_tid->tid_lock);
if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned)
dp_rx_tid_setup_wifi3(peer, tid, 1,
IEEE80211_SEQ_MAX);
qdf_spin_unlock_bh(&rx_tid->tid_lock);
/* IEEE80211_SEQ_MAX indicates invalid start_seq */
dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
}
}
eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
if (!txrx_peer->authorize) {
is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
if (is_eapol) {
if (!dp_rx_err_match_dhost(eh, vdev))
goto drop_nbuf;
} else {
goto drop_nbuf;
}
}
/*
* Drop packets in this path if cce_match is found. Packets will come
* in following path depending on whether tidQ is setup.
* 1. If tidQ is setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE and
* cce_match = 1
* Packets with WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE are already
* dropped.
* 2. If tidQ is not setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ERROR and
* cce_match = 1
* These packets need to be dropped and should not get delivered
* to stack.
*/
if (qdf_unlikely(dp_rx_err_cce_drop(soc, vdev, nbuf, rx_tlv_hdr))) {
goto drop_nbuf;
}
if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
qdf_nbuf_set_next(nbuf, NULL);
dp_rx_deliver_raw(vdev, nbuf, txrx_peer);
} else {
enh_flag = vdev->pdev->enhanced_stats_en;
qdf_nbuf_set_next(nbuf, NULL);
DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
enh_flag);
/*
* Update the protocol tag in SKB based on
* CCE metadata
*/
dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
EXCEPTION_DEST_RING_ID,
true, true);
/* Update the flow tag in SKB based on FSE metadata */
dp_rx_update_flow_tag(soc, vdev, nbuf,
rx_tlv_hdr, true);
if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
soc->hal_soc, rx_tlv_hdr) &&
(vdev->rx_decap_type ==
htt_cmn_pkt_type_ethernet))) {
DP_PEER_MC_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
enh_flag);
if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
DP_PEER_BC_INCC_PKT(txrx_peer, 1,
qdf_nbuf_len(nbuf),
enh_flag);
}
qdf_nbuf_set_exc_frame(nbuf, 1);
dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
is_eapol);
}
return QDF_STATUS_SUCCESS;
drop_nbuf:
dp_rx_nbuf_free(nbuf);
return QDF_STATUS_E_FAILURE;
}
#ifdef DP_WAR_INVALID_FIRST_MSDU_FLAG #ifdef DP_WAR_INVALID_FIRST_MSDU_FLAG
static inline void static inline void
dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf, dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
@@ -1594,8 +1262,11 @@ more_msdu_link_desc:
if (!txrx_peer) if (!txrx_peer)
dp_info_rl("txrx_peer is null peer_id %u", dp_info_rl("txrx_peer is null peer_id %u",
peer_id); peer_id);
dp_rx_null_q_desc_handle(soc, nbuf, rx_tlv_hdr_last, soc->arch_ops.dp_rx_null_q_desc_handle(soc, nbuf,
rx_desc_pool_id, txrx_peer); rx_tlv_hdr_last,
rx_desc_pool_id,
txrx_peer,
TRUE);
if (txrx_peer) if (txrx_peer)
dp_txrx_peer_unref_delete(txrx_ref_handle, dp_txrx_peer_unref_delete(txrx_ref_handle,
DP_MOD_ID_RX_ERR); DP_MOD_ID_RX_ERR);
@@ -2637,13 +2308,7 @@ static inline bool dp_handle_rxdma_decrypt_err(void)
} }
#endif #endif
/* void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc)
* dp_rx_wbm_sg_list_last_msdu_war() - war for HW issue
*
* This is a war for HW issue where length is only valid in last msdu
*@soc: DP SOC handle
*/
static inline void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc)
{ {
if (soc->wbm_sg_last_msdu_war) { if (soc->wbm_sg_last_msdu_war) {
uint32_t len; uint32_t len;
@@ -2660,17 +2325,6 @@ static inline void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc)
} }
#ifdef RX_DESC_DEBUG_CHECK #ifdef RX_DESC_DEBUG_CHECK
/**
* dp_rx_wbm_desc_nbuf_sanity_check - Add sanity check to for WBM rx_desc paddr
* corruption
* @soc: core txrx main context
* @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring
* @ring_desc: REO ring descriptor
* @rx_desc: Rx descriptor
*
* Return: NONE
*/
static
QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc, QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl, hal_ring_handle_t hal_ring_hdl,
hal_ring_desc_t ring_desc, hal_ring_desc_t ring_desc,
@@ -2689,7 +2343,6 @@ QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
} }
#else #else
static
QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc, QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl, hal_ring_handle_t hal_ring_hdl,
hal_ring_desc_t ring_desc, hal_ring_desc_t ring_desc,
@@ -2698,8 +2351,7 @@ QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
} }
#endif #endif
bool
static inline bool
dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info) dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
{ {
/* /*
@@ -2716,7 +2368,7 @@ dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
} }
#ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
static inline void dp_rx_err_tlv_invalidate(struct dp_soc *soc, void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
qdf_nbuf_t nbuf) qdf_nbuf_t nbuf)
{ {
/* /*
@@ -2730,7 +2382,7 @@ static inline void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
L3_HEADER_PAD)); L3_HEADER_PAD));
} }
#else #else
static inline void dp_rx_err_tlv_invalidate(struct dp_soc *soc, void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
qdf_nbuf_t nbuf) qdf_nbuf_t nbuf)
{ {
} }
@@ -2740,35 +2392,16 @@ uint32_t
dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl, uint32_t quota) hal_ring_handle_t hal_ring_hdl, uint32_t quota)
{ {
hal_ring_desc_t ring_desc;
hal_soc_handle_t hal_soc; hal_soc_handle_t hal_soc;
struct dp_rx_desc *rx_desc;
union dp_rx_desc_list_elem_t
*head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } };
union dp_rx_desc_list_elem_t
*tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } };
uint32_t rx_bufs_used = 0; uint32_t rx_bufs_used = 0;
uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { 0 } };
uint8_t buf_type;
uint8_t mac_id;
struct dp_pdev *dp_pdev; struct dp_pdev *dp_pdev;
struct dp_srng *dp_rxdma_srng;
struct rx_desc_pool *rx_desc_pool;
uint8_t *rx_tlv_hdr; uint8_t *rx_tlv_hdr;
bool is_tkip_mic_err; bool is_tkip_mic_err;
qdf_nbuf_t nbuf_head = NULL; qdf_nbuf_t nbuf_head = NULL;
qdf_nbuf_t nbuf_tail = NULL;
qdf_nbuf_t nbuf, next; qdf_nbuf_t nbuf, next;
struct hal_wbm_err_desc_info wbm_err_info = { 0 }; struct hal_wbm_err_desc_info wbm_err_info = { 0 };
uint8_t pool_id; uint8_t pool_id;
uint8_t tid = 0; uint8_t tid = 0;
uint8_t msdu_continuation = 0;
bool process_sg_buf = false;
uint32_t wbm_err_src;
QDF_STATUS status;
struct dp_soc *replenish_soc;
uint8_t chip_id;
struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 };
/* Debug -- Remove later */ /* Debug -- Remove later */
qdf_assert(soc && hal_ring_hdl); qdf_assert(soc && hal_ring_hdl);
@@ -2778,205 +2411,10 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
/* Debug -- Remove later */ /* Debug -- Remove later */
qdf_assert(hal_soc); qdf_assert(hal_soc);
if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) { nbuf_head = soc->arch_ops.dp_rx_wbm_err_reap_desc(int_ctx, soc,
hal_ring_hdl,
/* TODO */ quota,
/* &rx_bufs_used);
* Need API to convert from hal_ring pointer to
* Ring Type / Ring Id combo
*/
dp_rx_err_err("%pK: HAL RING Access Failed -- %pK",
soc, hal_ring_hdl);
goto done;
}
while (qdf_likely(quota)) {
ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
if (qdf_unlikely(!ring_desc))
break;
/* XXX */
buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
/*
* For WBM ring, expect only MSDU buffers
*/
qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
wbm_err_src = hal_rx_wbm_err_src_get(hal_soc, ring_desc);
qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) ||
(wbm_err_src == HAL_RX_WBM_ERR_SRC_REO));
if (soc->arch_ops.dp_wbm_get_rx_desc_from_hal_desc(soc,
ring_desc,
&rx_desc)) {
dp_rx_err_err("get rx desc from hal_desc failed");
continue;
}
qdf_assert_always(rx_desc);
if (!dp_rx_desc_check_magic(rx_desc)) {
dp_rx_err_err("%pk: Invalid rx_desc %pk",
soc, rx_desc);
continue;
}
/*
* this is a unlikely scenario where the host is reaping
* a descriptor which it already reaped just a while ago
* but is yet to replenish it back to HW.
* In this case host will dump the last 128 descriptors
* including the software descriptor rx_desc and assert.
*/
if (qdf_unlikely(!rx_desc->in_use)) {
DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
ring_desc, rx_desc);
continue;
}
hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
nbuf = rx_desc->nbuf;
status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl,
ring_desc, rx_desc);
if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
dp_info_rl("Rx error Nbuf %pk sanity check failure!",
nbuf);
rx_desc->in_err_state = 1;
rx_desc->unmapped = 1;
rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
dp_rx_add_to_free_desc_list(
&head[rx_desc->chip_id][rx_desc->pool_id],
&tail[rx_desc->chip_id][rx_desc->pool_id],
rx_desc);
continue;
}
/* Get MPDU DESC info */
hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, &mpdu_desc_info);
if (qdf_likely(mpdu_desc_info.mpdu_flags &
HAL_MPDU_F_QOS_CONTROL_VALID))
qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid);
rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
dp_ipa_rx_buf_smmu_mapping_lock(soc);
dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
rx_desc->unmapped = 1;
dp_ipa_rx_buf_smmu_mapping_unlock(soc);
if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support &&
dp_rx_is_sg_formation_required(&wbm_err_info))) {
/* SG is detected from continuation bit */
msdu_continuation =
hal_rx_wbm_err_msdu_continuation_get(hal_soc,
ring_desc);
if (msdu_continuation &&
!(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) {
/* Update length from first buffer in SG */
soc->wbm_sg_param.wbm_sg_desc_msdu_len =
hal_rx_msdu_start_msdu_len_get(
soc->hal_soc,
qdf_nbuf_data(nbuf));
soc->wbm_sg_param.wbm_is_first_msdu_in_sg = true;
}
if (msdu_continuation) {
/* MSDU continued packets */
qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
soc->wbm_sg_param.wbm_sg_desc_msdu_len;
} else {
/* This is the terminal packet in SG */
qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
soc->wbm_sg_param.wbm_sg_desc_msdu_len;
process_sg_buf = true;
}
}
/*
* save the wbm desc info in nbuf TLV. We will need this
* info when we do the actual nbuf processing
*/
wbm_err_info.pool_id = rx_desc->pool_id;
hal_rx_priv_info_set_in_tlv(soc->hal_soc,
qdf_nbuf_data(nbuf),
(uint8_t *)&wbm_err_info,
sizeof(wbm_err_info));
dp_rx_err_tlv_invalidate(soc, nbuf);
rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) {
DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head,
soc->wbm_sg_param.wbm_sg_nbuf_tail,
nbuf);
if (process_sg_buf) {
if (!dp_rx_buffer_pool_refill(
soc,
soc->wbm_sg_param.wbm_sg_nbuf_head,
rx_desc->pool_id))
DP_RX_MERGE_TWO_LIST(
nbuf_head, nbuf_tail,
soc->wbm_sg_param.wbm_sg_nbuf_head,
soc->wbm_sg_param.wbm_sg_nbuf_tail);
dp_rx_wbm_sg_list_last_msdu_war(soc);
dp_rx_wbm_sg_list_reset(soc);
process_sg_buf = false;
}
} else if (!dp_rx_buffer_pool_refill(soc, nbuf,
rx_desc->pool_id)) {
DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
}
dp_rx_add_to_free_desc_list
(&head[rx_desc->chip_id][rx_desc->pool_id],
&tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc);
/*
* if continuation bit is set then we have MSDU spread
* across multiple buffers, let us not decrement quota
* till we reap all buffers of that MSDU.
*/
if (qdf_likely(!msdu_continuation))
quota -= 1;
}
done:
dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
/*
* continue with next mac_id if no pkts were reaped
* from that pool
*/
if (!rx_bufs_reaped[chip_id][mac_id])
continue;
replenish_soc =
soc->arch_ops.dp_rx_replenish_soc_get(soc, chip_id);
dp_rxdma_srng =
&replenish_soc->rx_refill_buf_ring[mac_id];
rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id];
dp_rx_buffers_replenish(replenish_soc, mac_id,
dp_rxdma_srng,
rx_desc_pool,
rx_bufs_reaped[chip_id][mac_id],
&head[chip_id][mac_id],
&tail[chip_id][mac_id], false);
rx_bufs_used += rx_bufs_reaped[chip_id][mac_id];
}
}
nbuf = nbuf_head; nbuf = nbuf_head;
while (nbuf) { while (nbuf) {
struct dp_txrx_peer *txrx_peer; struct dp_txrx_peer *txrx_peer;
@@ -3017,7 +2455,6 @@ done:
dp_set_rx_queue(nbuf, 0); dp_set_rx_queue(nbuf, 0);
next = nbuf->next; next = nbuf->next;
/* /*
* Form the SG for msdu continued buffers * Form the SG for msdu continued buffers
* QCN9000 has this support * QCN9000 has this support
@@ -3059,10 +2496,12 @@ done:
*/ */
case HAL_REO_ERR_QUEUE_DESC_ADDR_0: case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
pool_id = wbm_err_info.pool_id; pool_id = wbm_err_info.pool_id;
dp_rx_null_q_desc_handle(soc, nbuf, soc->arch_ops.dp_rx_null_q_desc_handle(
soc, nbuf,
rx_tlv_hdr, rx_tlv_hdr,
pool_id, pool_id,
txrx_peer); txrx_peer,
FALSE);
break; break;
/* TODO */ /* TODO */
/* Add per error code accounting */ /* Add per error code accounting */

View File

@@ -2200,6 +2200,8 @@ enum dp_context_type {
* @txrx_soc_ppeds_stop: * @txrx_soc_ppeds_stop:
* @dp_register_ppeds_interrupts: * @dp_register_ppeds_interrupts:
* @dp_free_ppeds_interrupts: * @dp_free_ppeds_interrupts:
* @dp_rx_wbm_err_reap_desc: Reap WBM Error Ring Descriptor
* @dp_rx_null_q_desc_handle: Handle Null Queue Exception Error
*/ */
struct dp_arch_ops { struct dp_arch_ops {
/* INIT/DEINIT Arch Ops */ /* INIT/DEINIT Arch Ops */
@@ -2411,6 +2413,17 @@ struct dp_arch_ops {
void (*dp_free_ppeds_interrupts)(struct dp_soc *soc, void (*dp_free_ppeds_interrupts)(struct dp_soc *soc,
struct dp_srng *srng, int ring_type, struct dp_srng *srng, int ring_type,
int ring_num); int ring_num);
qdf_nbuf_t (*dp_rx_wbm_err_reap_desc)(struct dp_intr *int_ctx,
struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl,
uint32_t quota,
uint32_t *rx_bufs_used);
QDF_STATUS (*dp_rx_null_q_desc_handle)(struct dp_soc *soc,
qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr,
uint8_t pool_id,
struct dp_txrx_peer *txrx_peer,
bool is_reo_exception);
}; };
/** /**

View File

@@ -613,6 +613,8 @@ void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_li; arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_li;
arch_ops->dp_tx_compute_hw_delay = dp_tx_compute_tx_delay_li; arch_ops->dp_tx_compute_hw_delay = dp_tx_compute_tx_delay_li;
arch_ops->dp_rx_chain_msdus = dp_rx_chain_msdus_li; arch_ops->dp_rx_chain_msdus = dp_rx_chain_msdus_li;
arch_ops->dp_rx_wbm_err_reap_desc = dp_rx_wbm_err_reap_desc_li;
arch_ops->dp_rx_null_q_desc_handle = dp_rx_null_q_desc_handle_li;
#else #else
arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_generic; arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_generic;
arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_generic; arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_generic;

View File

@@ -1076,3 +1076,508 @@ bool dp_rx_chain_msdus_li(struct dp_soc *soc, qdf_nbuf_t nbuf,
return mpdu_done; return mpdu_done;
} }
qdf_nbuf_t
dp_rx_wbm_err_reap_desc_li(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl, uint32_t quota,
uint32_t *rx_bufs_used)
{
hal_ring_desc_t ring_desc;
hal_soc_handle_t hal_soc;
struct dp_rx_desc *rx_desc;
union dp_rx_desc_list_elem_t
*head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } };
union dp_rx_desc_list_elem_t
*tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } };
uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { 0 } };
uint8_t buf_type;
uint8_t mac_id;
struct dp_srng *dp_rxdma_srng;
struct rx_desc_pool *rx_desc_pool;
qdf_nbuf_t nbuf_head = NULL;
qdf_nbuf_t nbuf_tail = NULL;
qdf_nbuf_t nbuf;
struct hal_wbm_err_desc_info wbm_err_info = { 0 };
uint8_t msdu_continuation = 0;
bool process_sg_buf = false;
uint32_t wbm_err_src;
QDF_STATUS status;
struct dp_soc *replenish_soc;
uint8_t chip_id;
struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 };
qdf_assert(soc && hal_ring_hdl);
hal_soc = soc->hal_soc;
qdf_assert(hal_soc);
if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
/* TODO */
/*
* Need API to convert from hal_ring pointer to
* Ring Type / Ring Id combo
*/
dp_rx_err_err("%pK: HAL RING Access Failed -- %pK",
soc, hal_ring_hdl);
goto done;
}
while (qdf_likely(quota)) {
ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
if (qdf_unlikely(!ring_desc))
break;
/* XXX */
buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
/*
* For WBM ring, expect only MSDU buffers
*/
qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
wbm_err_src = hal_rx_wbm_err_src_get(hal_soc, ring_desc);
qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) ||
(wbm_err_src == HAL_RX_WBM_ERR_SRC_REO));
if (soc->arch_ops.dp_wbm_get_rx_desc_from_hal_desc(soc,
ring_desc,
&rx_desc)) {
dp_rx_err_err("get rx desc from hal_desc failed");
continue;
}
qdf_assert_always(rx_desc);
if (!dp_rx_desc_check_magic(rx_desc)) {
dp_rx_err_err("%pk: Invalid rx_desc %pk",
soc, rx_desc);
continue;
}
/*
* this is a unlikely scenario where the host is reaping
* a descriptor which it already reaped just a while ago
* but is yet to replenish it back to HW.
* In this case host will dump the last 128 descriptors
* including the software descriptor rx_desc and assert.
*/
if (qdf_unlikely(!rx_desc->in_use)) {
DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
ring_desc, rx_desc);
continue;
}
hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
nbuf = rx_desc->nbuf;
status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl,
ring_desc, rx_desc);
if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
dp_info_rl("Rx error Nbuf %pk sanity check failure!",
nbuf);
rx_desc->in_err_state = 1;
rx_desc->unmapped = 1;
rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
dp_rx_add_to_free_desc_list(
&head[rx_desc->chip_id][rx_desc->pool_id],
&tail[rx_desc->chip_id][rx_desc->pool_id],
rx_desc);
continue;
}
/* Get MPDU DESC info */
hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, &mpdu_desc_info);
if (qdf_likely(mpdu_desc_info.mpdu_flags &
HAL_MPDU_F_QOS_CONTROL_VALID))
qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid);
rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
dp_ipa_rx_buf_smmu_mapping_lock(soc);
dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
rx_desc->unmapped = 1;
dp_ipa_rx_buf_smmu_mapping_unlock(soc);
if (qdf_unlikely(
soc->wbm_release_desc_rx_sg_support &&
dp_rx_is_sg_formation_required(&wbm_err_info))) {
/* SG is detected from continuation bit */
msdu_continuation =
hal_rx_wbm_err_msdu_continuation_get(hal_soc,
ring_desc);
if (msdu_continuation &&
!(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) {
/* Update length from first buffer in SG */
soc->wbm_sg_param.wbm_sg_desc_msdu_len =
hal_rx_msdu_start_msdu_len_get(
soc->hal_soc,
qdf_nbuf_data(nbuf));
soc->wbm_sg_param.wbm_is_first_msdu_in_sg =
true;
}
if (msdu_continuation) {
/* MSDU continued packets */
qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
soc->wbm_sg_param.wbm_sg_desc_msdu_len;
} else {
/* This is the terminal packet in SG */
qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
soc->wbm_sg_param.wbm_sg_desc_msdu_len;
process_sg_buf = true;
}
}
/*
* save the wbm desc info in nbuf TLV. We will need this
* info when we do the actual nbuf processing
*/
wbm_err_info.pool_id = rx_desc->pool_id;
hal_rx_priv_info_set_in_tlv(soc->hal_soc,
qdf_nbuf_data(nbuf),
(uint8_t *)&wbm_err_info,
sizeof(wbm_err_info));
dp_rx_err_tlv_invalidate(soc, nbuf);
rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) {
DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head,
soc->wbm_sg_param.wbm_sg_nbuf_tail,
nbuf);
if (process_sg_buf) {
if (!dp_rx_buffer_pool_refill(
soc,
soc->wbm_sg_param.wbm_sg_nbuf_head,
rx_desc->pool_id))
DP_RX_MERGE_TWO_LIST(
nbuf_head, nbuf_tail,
soc->wbm_sg_param.wbm_sg_nbuf_head,
soc->wbm_sg_param.wbm_sg_nbuf_tail);
dp_rx_wbm_sg_list_last_msdu_war(soc);
dp_rx_wbm_sg_list_reset(soc);
process_sg_buf = false;
}
} else if (!dp_rx_buffer_pool_refill(soc, nbuf,
rx_desc->pool_id)) {
DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
}
dp_rx_add_to_free_desc_list
(&head[rx_desc->chip_id][rx_desc->pool_id],
&tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc);
/*
* if continuation bit is set then we have MSDU spread
* across multiple buffers, let us not decrement quota
* till we reap all buffers of that MSDU.
*/
if (qdf_likely(!msdu_continuation))
quota -= 1;
}
done:
dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
/*
* continue with next mac_id if no pkts were reaped
* from that pool
*/
if (!rx_bufs_reaped[chip_id][mac_id])
continue;
replenish_soc =
soc->arch_ops.dp_rx_replenish_soc_get(soc, chip_id);
dp_rxdma_srng =
&replenish_soc->rx_refill_buf_ring[mac_id];
rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id];
dp_rx_buffers_replenish(replenish_soc, mac_id,
dp_rxdma_srng,
rx_desc_pool,
rx_bufs_reaped[chip_id][mac_id],
&head[chip_id][mac_id],
&tail[chip_id][mac_id], false);
*rx_bufs_used += rx_bufs_reaped[chip_id][mac_id];
}
}
return nbuf_head;
}
QDF_STATUS
dp_rx_null_q_desc_handle_li(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, uint8_t pool_id,
struct dp_txrx_peer *txrx_peer,
bool is_reo_exception)
{
uint32_t pkt_len;
uint16_t msdu_len;
struct dp_vdev *vdev;
uint8_t tid;
qdf_ether_header_t *eh;
struct hal_rx_msdu_metadata msdu_metadata;
uint16_t sa_idx = 0;
bool is_eapol = 0;
bool enh_flag;
qdf_nbuf_set_rx_chfrag_start(
nbuf,
hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
rx_tlv_hdr));
qdf_nbuf_set_rx_chfrag_end(nbuf,
hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
rx_tlv_hdr));
qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
rx_tlv_hdr));
qdf_nbuf_set_da_valid(nbuf,
hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
rx_tlv_hdr));
qdf_nbuf_set_sa_valid(nbuf,
hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
rx_tlv_hdr));
hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
if (dp_rx_check_pkt_len(soc, pkt_len))
goto drop_nbuf;
/* Set length in nbuf */
qdf_nbuf_set_pktlen(
nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
qdf_assert_always(nbuf->data == rx_tlv_hdr);
}
/*
* Check if DMA completed -- msdu_done is the last bit
* to be written
*/
if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
dp_err_rl("MSDU DONE failure");
hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
QDF_TRACE_LEVEL_INFO);
qdf_assert(0);
}
if (!txrx_peer &&
dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
rx_tlv_hdr, nbuf))
return QDF_STATUS_E_FAILURE;
if (!txrx_peer) {
bool mpdu_done = false;
struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
if (!pdev) {
dp_err_rl("pdev is null for pool_id = %d", pool_id);
return QDF_STATUS_E_FAILURE;
}
dp_err_rl("txrx_peer is NULL");
DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
qdf_nbuf_len(nbuf));
/* QCN9000 has the support enabled */
if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) {
mpdu_done = true;
nbuf->next = NULL;
/* Trigger invalid peer handler wrapper */
dp_rx_process_invalid_peer_wrapper(soc,
nbuf,
mpdu_done,
pool_id);
} else {
mpdu_done = soc->arch_ops.dp_rx_chain_msdus(soc, nbuf,
rx_tlv_hdr,
pool_id);
/* Trigger invalid peer handler wrapper */
dp_rx_process_invalid_peer_wrapper(
soc,
pdev->invalid_peer_head_msdu,
mpdu_done, pool_id);
}
if (mpdu_done) {
pdev->invalid_peer_head_msdu = NULL;
pdev->invalid_peer_tail_msdu = NULL;
}
return QDF_STATUS_E_FAILURE;
}
vdev = txrx_peer->vdev;
if (!vdev) {
dp_err_rl("Null vdev!");
DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
goto drop_nbuf;
}
/*
* Advance the packet start pointer by total size of
* pre-header TLV's
*/
if (qdf_nbuf_is_frag(nbuf))
qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
else
qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
soc->rx_pkt_tlv_size));
DP_STATS_INC_PKT(vdev, rx_i.null_q_desc_pkt, 1, qdf_nbuf_len(nbuf));
dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
if (dp_rx_err_drop_3addr_mcast(vdev, rx_tlv_hdr)) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.mcast_3addr_drop, 1);
goto drop_nbuf;
}
if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
if ((sa_idx < 0) ||
(sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
goto drop_nbuf;
}
}
if ((!soc->mec_fw_offload) &&
dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) {
/* this is a looped back MCBC pkt, drop it */
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
qdf_nbuf_len(nbuf));
goto drop_nbuf;
}
/*
* In qwrap mode if the received packet matches with any of the vdev
* mac addresses, drop it. Donot receive multicast packets originated
* from any proxysta.
*/
if (check_qwrap_multicast_loopback(vdev, nbuf)) {
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
qdf_nbuf_len(nbuf));
goto drop_nbuf;
}
if (qdf_unlikely(txrx_peer->nawds_enabled &&
hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
rx_tlv_hdr))) {
dp_err_rl("free buffer for multicast packet");
DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.nawds_mcast_drop, 1);
goto drop_nbuf;
}
if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
dp_err_rl("mcast Policy Check Drop pkt");
DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.policy_check_drop, 1);
goto drop_nbuf;
}
/* WDS Source Port Learning */
if (!soc->ast_offload_support &&
qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
vdev->wds_enabled))
dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, txrx_peer, nbuf,
msdu_metadata);
if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
struct dp_peer *peer;
struct dp_rx_tid *rx_tid;
tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
DP_MOD_ID_RX_ERR);
if (peer) {
rx_tid = &peer->rx_tid[tid];
qdf_spin_lock_bh(&rx_tid->tid_lock);
if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned)
dp_rx_tid_setup_wifi3(peer, tid, 1,
IEEE80211_SEQ_MAX);
qdf_spin_unlock_bh(&rx_tid->tid_lock);
/* IEEE80211_SEQ_MAX indicates invalid start_seq */
dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
}
}
eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
if (!txrx_peer->authorize) {
is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
if (is_eapol) {
if (!dp_rx_err_match_dhost(eh, vdev))
goto drop_nbuf;
} else {
goto drop_nbuf;
}
}
/*
* Drop packets in this path if cce_match is found. Packets will come
* in following path depending on whether tidQ is setup.
* 1. If tidQ is setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE and
* cce_match = 1
* Packets with WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE are already
* dropped.
* 2. If tidQ is not setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ERROR and
* cce_match = 1
* These packets need to be dropped and should not get delivered
* to stack.
*/
if (qdf_unlikely(dp_rx_err_cce_drop(soc, vdev, nbuf, rx_tlv_hdr)))
goto drop_nbuf;
if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
qdf_nbuf_set_next(nbuf, NULL);
dp_rx_deliver_raw(vdev, nbuf, txrx_peer);
} else {
enh_flag = vdev->pdev->enhanced_stats_en;
qdf_nbuf_set_next(nbuf, NULL);
DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
enh_flag);
/*
* Update the protocol tag in SKB based on
* CCE metadata
*/
dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
EXCEPTION_DEST_RING_ID,
true, true);
/* Update the flow tag in SKB based on FSE metadata */
dp_rx_update_flow_tag(soc, vdev, nbuf,
rx_tlv_hdr, true);
if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
soc->hal_soc, rx_tlv_hdr) &&
(vdev->rx_decap_type ==
htt_cmn_pkt_type_ethernet))) {
DP_PEER_MC_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
enh_flag);
if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
DP_PEER_BC_INCC_PKT(txrx_peer, 1,
qdf_nbuf_len(nbuf),
enh_flag);
}
qdf_nbuf_set_exc_frame(nbuf, 1);
dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
is_eapol);
}
return QDF_STATUS_SUCCESS;
drop_nbuf:
dp_rx_nbuf_free(nbuf);
return QDF_STATUS_E_FAILURE;
}

View File

@@ -282,4 +282,55 @@ QDF_STATUS dp_peer_rx_reorder_queue_setup_li(struct dp_soc *soc,
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
} }
/**
* dp_rx_wbm_err_reap_desc_li() - Function to reap and replenish
* WBM RX Error descriptors
*
* @int_ctx: pointer to DP interrupt context
* @soc: core DP main context
* @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, to be serviced
* @quota: No. of units (packets) that can be serviced in one shot.
* @rx_bufs_used: No. of descriptors reaped
*
* This function implements the core Rx functionality like reap and
* replenish the RX error ring Descriptors, and create a nbuf list
* out of it. It also reads wbm error information from descriptors
* and update the nbuf tlv area.
*
* Return: qdf_nbuf_t: head pointer to the nbuf list created
*/
qdf_nbuf_t
dp_rx_wbm_err_reap_desc_li(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl, uint32_t quota,
uint32_t *rx_bufs_used);
/**
* dp_rx_null_q_desc_handle_li() - Function to handle NULL Queue
* descriptor violation on either a
* REO or WBM ring
*
* @soc: core DP main context
* @nbuf: buffer pointer
* @rx_tlv_hdr: start of rx tlv header
* @pool_id: mac id
* @txrx_peer: txrx peer handle
* @is_reo_exception: flag to check if the error is from REO or WBM
*
* This function handles NULL queue descriptor violations arising out
* a missing REO queue for a given peer or a given TID. This typically
* may happen if a packet is received on a QOS enabled TID before the
* ADDBA negotiation for that TID, when the TID queue is setup. Or
* it may also happen for MC/BC frames if they are not routed to the
* non-QOS TID queue, in the absence of any other default TID queue.
* This error can show up both in a REO destination or WBM release ring.
*
* Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
* if nbuf could not be handled or dropped.
*/
QDF_STATUS
dp_rx_null_q_desc_handle_li(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, uint8_t pool_id,
struct dp_txrx_peer *txrx_peer,
bool is_reo_exception);
#endif #endif