From 477928661caa64507fbd1f01f4f8cdfb31174321 Mon Sep 17 00:00:00 2001 From: Manjunathappa Prakash Date: Wed, 17 Feb 2021 01:37:14 -0800 Subject: [PATCH] qcacmn: Core DP RX path changes for WCN7850 Implement core DP rx processing to functions in to corresponding architecture specific be/li rx files. Keep common utility functions in DP common files. Change-Id: I40083e10772fd2b6ce2f1fa9e197f2ad92d0522a CRs-Fixed: 2891021 --- dp/wifi3.0/be/dp_be_rx.c | 756 ++++++++++++++++++++++ dp/wifi3.0/be/dp_be_rx.h | 28 + dp/wifi3.0/dp_main.c | 2 +- dp/wifi3.0/dp_rx.c | 971 +--------------------------- dp/wifi3.0/dp_rx.h | 238 ++++++- dp/wifi3.0/dp_types.h | 3 + dp/wifi3.0/li/dp_li_rx.c | 781 ++++++++++++++++++++++ dp/wifi3.0/li/dp_li_rx.h | 28 + hal/wifi3.0/li/hal_li_generic_api.c | 37 -- hal/wifi3.0/li/hal_li_rx.h | 38 ++ 10 files changed, 1886 insertions(+), 996 deletions(-) create mode 100644 dp/wifi3.0/be/dp_be_rx.c create mode 100644 dp/wifi3.0/be/dp_be_rx.h create mode 100644 dp/wifi3.0/li/dp_li_rx.c create mode 100644 dp/wifi3.0/li/dp_li_rx.h diff --git a/dp/wifi3.0/be/dp_be_rx.c b/dp/wifi3.0/be/dp_be_rx.c new file mode 100644 index 0000000000..9f25e44366 --- /dev/null +++ b/dp/wifi3.0/be/dp_be_rx.c @@ -0,0 +1,756 @@ +/* + * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "cdp_txrx_cmn_struct.h" +#include "hal_hw_headers.h" +#include "dp_types.h" +#include "dp_rx.h" +#include "dp_be_rx.h" +#include "dp_peer.h" +#include "hal_rx.h" +#include "hal_be_rx.h" +#include "hal_api.h" +#include "hal_be_api.h" +#include "qdf_nbuf.h" +#ifdef MESH_MODE_SUPPORT +#include "if_meta_hdr.h" +#endif +#include "dp_internal.h" +#include "dp_rx_mon.h" +#include "dp_ipa.h" +#ifdef FEATURE_WDS +#include "dp_txrx_wds.h" +#endif +#include "dp_hist.h" +#include "dp_rx_buffer_pool.h" + +/** + * dp_rx_process_be() - Brain of the Rx processing functionality + * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) + * @int_ctx: per interrupt context + * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced + * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. + * @quota: No. of units (packets) that can be serviced in one shot. + * + * This function implements the core of Rx functionality. This is + * expected to handle only non-error frames. + * + * Return: uint32_t: No. of elements processed + */ +uint32_t dp_rx_process_be(struct dp_intr *int_ctx, + hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, + uint32_t quota) +{ + hal_ring_desc_t ring_desc; + hal_soc_handle_t hal_soc; + struct dp_rx_desc *rx_desc = NULL; + qdf_nbuf_t nbuf, next; + bool near_full; + union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT]; + union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT]; + uint32_t num_pending; + uint32_t rx_bufs_used = 0, rx_buf_cookie; + uint16_t msdu_len = 0; + uint16_t peer_id; + uint8_t vdev_id; + struct dp_peer *peer; + struct dp_vdev *vdev; + uint32_t pkt_len = 0; + struct hal_rx_mpdu_desc_info mpdu_desc_info; + struct hal_rx_msdu_desc_info msdu_desc_info; + enum hal_reo_error_status error; + uint32_t peer_mdata; + uint8_t *rx_tlv_hdr; + uint32_t rx_bufs_reaped[MAX_PDEV_CNT]; + uint8_t mac_id = 0; + struct dp_pdev *rx_pdev; + struct dp_srng *dp_rxdma_srng; + struct rx_desc_pool *rx_desc_pool; + struct dp_soc *soc = int_ctx->soc; + uint8_t core_id = 0; + struct cdp_tid_rx_stats *tid_stats; + qdf_nbuf_t nbuf_head; + qdf_nbuf_t nbuf_tail; + qdf_nbuf_t deliver_list_head; + qdf_nbuf_t deliver_list_tail; + uint32_t num_rx_bufs_reaped = 0; + uint32_t intr_id; + struct hif_opaque_softc *scn; + int32_t tid = 0; + bool is_prev_msdu_last = true; + uint32_t num_entries_avail = 0; + uint32_t rx_ol_pkt_cnt = 0; + uint32_t num_entries = 0; + struct hal_rx_msdu_metadata msdu_metadata; + QDF_STATUS status; + qdf_nbuf_t ebuf_head; + qdf_nbuf_t ebuf_tail; + uint8_t pkt_capture_offload = 0; + + DP_HIST_INIT(); + + qdf_assert_always(soc && hal_ring_hdl); + hal_soc = soc->hal_soc; + qdf_assert_always(hal_soc); + + scn = soc->hif_handle; + hif_pm_runtime_mark_dp_rx_busy(scn); + intr_id = int_ctx->dp_intr_id; + num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); + +more_data: + /* reset local variables here to be re-used in the function */ + nbuf_head = NULL; + nbuf_tail = NULL; + deliver_list_head = NULL; + deliver_list_tail = NULL; + peer = NULL; + vdev = NULL; + num_rx_bufs_reaped = 0; + ebuf_head = NULL; + ebuf_tail = NULL; + + qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); + qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); + qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); + qdf_mem_zero(head, sizeof(head)); + qdf_mem_zero(tail, sizeof(tail)); + + if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) { + /* + * Need API to convert from hal_ring pointer to + * Ring Type / Ring Id combo + */ + DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); + goto done; + } + + /* + * start reaping the buffers from reo ring and queue + * them in per vdev queue. + * Process the received pkts in a different per vdev loop. + */ + while (qdf_likely(quota && + (ring_desc = hal_srng_dst_peek(hal_soc, + hal_ring_hdl)))) { + error = HAL_RX_ERROR_STATUS_GET(ring_desc); + + if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { + dp_rx_err("%pK: HAL RING 0x%pK:error %d", + soc, hal_ring_hdl, error); + DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num], + 1); + /* Don't know how to deal with this -- assert */ + qdf_assert(0); + } + + dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc); + rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); + status = dp_rx_cookie_check_and_invalidate(ring_desc); + if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { + DP_STATS_INC(soc, rx.err.stale_cookie, 1); + break; + } + + rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); + status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, + ring_desc, rx_desc); + if (QDF_IS_STATUS_ERROR(status)) { + if (qdf_unlikely(rx_desc && rx_desc->nbuf)) { + qdf_assert_always(rx_desc->unmapped); + dp_ipa_reo_ctx_buf_mapping_lock( + soc, + reo_ring_num); + dp_ipa_handle_rx_buf_smmu_mapping( + soc, + rx_desc->nbuf, + RX_DATA_BUFFER_SIZE, + false); + qdf_nbuf_unmap_nbytes_single( + soc->osdev, + rx_desc->nbuf, + QDF_DMA_FROM_DEVICE, + RX_DATA_BUFFER_SIZE); + rx_desc->unmapped = 1; + dp_ipa_reo_ctx_buf_mapping_unlock( + soc, + reo_ring_num); + dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, + rx_desc->pool_id); + dp_rx_add_to_free_desc_list( + &head[rx_desc->pool_id], + &tail[rx_desc->pool_id], + rx_desc); + } + hal_srng_dst_get_next(hal_soc, hal_ring_hdl); + continue; + } + + /* + * this is a unlikely scenario where the host is reaping + * a descriptor which it already reaped just a while ago + * but is yet to replenish it back to HW. + * In this case host will dump the last 128 descriptors + * including the software descriptor rx_desc and assert. + */ + + if (qdf_unlikely(!rx_desc->in_use)) { + DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); + dp_info_rl("Reaping rx_desc not in use!"); + dp_rx_dump_info_and_assert(soc, hal_ring_hdl, + ring_desc, rx_desc); + /* ignore duplicate RX desc and continue to process */ + /* Pop out the descriptor */ + hal_srng_dst_get_next(hal_soc, hal_ring_hdl); + continue; + } + + status = dp_rx_desc_nbuf_sanity_check(ring_desc, rx_desc); + if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { + DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); + dp_info_rl("Nbuf sanity check failure!"); + dp_rx_dump_info_and_assert(soc, hal_ring_hdl, + ring_desc, rx_desc); + rx_desc->in_err_state = 1; + hal_srng_dst_get_next(hal_soc, hal_ring_hdl); + continue; + } + + if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { + dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); + DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); + dp_rx_dump_info_and_assert(soc, hal_ring_hdl, + ring_desc, rx_desc); + } + + /* Get MPDU DESC info */ + hal_rx_mpdu_desc_info_get_be(ring_desc, &mpdu_desc_info); + + /* Get MSDU DESC info */ + hal_rx_msdu_desc_info_get(hal_soc, ring_desc, &msdu_desc_info); + + if (qdf_unlikely(msdu_desc_info.msdu_flags & + HAL_MSDU_F_MSDU_CONTINUATION)) { + /* previous msdu has end bit set, so current one is + * the new MPDU + */ + if (is_prev_msdu_last) { + /* Get number of entries available in HW ring */ + num_entries_avail = + hal_srng_dst_num_valid(hal_soc, + hal_ring_hdl, 1); + + /* For new MPDU check if we can read complete + * MPDU by comparing the number of buffers + * available and number of buffers needed to + * reap this MPDU + */ + if ((msdu_desc_info.msdu_len / + (RX_DATA_BUFFER_SIZE - + soc->rx_pkt_tlv_size) + 1) > + num_entries_avail) { + DP_STATS_INC(soc, + rx.msdu_scatter_wait_break, + 1); + dp_rx_cookie_reset_invalid_bit( + ring_desc); + break; + } + is_prev_msdu_last = false; + } + } + core_id = smp_processor_id(); + DP_STATS_INC(soc, rx.ring_packets[core_id][reo_ring_num], 1); + + if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) + qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); + + if (qdf_unlikely(mpdu_desc_info.mpdu_flags & + HAL_MPDU_F_RAW_AMPDU)) + qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); + + if (!is_prev_msdu_last && + msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) + is_prev_msdu_last = true; + + /* Pop out the descriptor*/ + hal_srng_dst_get_next(hal_soc, hal_ring_hdl); + + rx_bufs_reaped[rx_desc->pool_id]++; + peer_mdata = mpdu_desc_info.peer_meta_data; + QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = + DP_PEER_METADATA_PEER_ID_GET(peer_mdata); + QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = + DP_PEER_METADATA_VDEV_ID_GET(peer_mdata); + + /* to indicate whether this msdu is rx offload */ + pkt_capture_offload = + DP_PEER_METADATA_OFFLOAD_GET(peer_mdata); + + /* + * save msdu flags first, last and continuation msdu in + * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and + * length to nbuf->cb. This ensures the info required for + * per pkt processing is always in the same cache line. + * This helps in improving throughput for smaller pkt + * sizes. + */ + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) + qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); + + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) + qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); + + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) + qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); + + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) + qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); + + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) + qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); + + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) + qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); + + qdf_nbuf_set_tid_val(rx_desc->nbuf, + HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc)); +#ifdef CONFIG_LITHIUM + qdf_nbuf_set_rx_reo_dest_ind( + rx_desc->nbuf, + HAL_RX_REO_MSDU_REO_DST_IND_GET(ring_desc)); +#endif + + QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; + + QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; + + /* + * move unmap after scattered msdu waiting break logic + * in case double skb unmap happened. + */ + rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; + dp_ipa_reo_ctx_buf_mapping_lock(soc, reo_ring_num); + dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf, + rx_desc_pool->buf_size, + false); + qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, + QDF_DMA_FROM_DEVICE, + rx_desc_pool->buf_size); + rx_desc->unmapped = 1; + dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num); + DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head, + ebuf_tail, rx_desc); + /* + * if continuation bit is set then we have MSDU spread + * across multiple buffers, let us not decrement quota + * till we reap all buffers of that MSDU. + */ + if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) + quota -= 1; + + dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], + &tail[rx_desc->pool_id], rx_desc); + num_rx_bufs_reaped++; + /* + * only if complete msdu is received for scatter case, + * then allow break. + */ + if (is_prev_msdu_last && + dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped)) + break; + } +done: + dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl); + + for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { + /* + * continue with next mac_id if no pkts were reaped + * from that pool + */ + if (!rx_bufs_reaped[mac_id]) + continue; + + dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; + + rx_desc_pool = &soc->rx_desc_buf[mac_id]; + + dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, + rx_desc_pool, rx_bufs_reaped[mac_id], + &head[mac_id], &tail[mac_id]); + } + + dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]); + /* Peer can be NULL is case of LFR */ + if (qdf_likely(peer)) + vdev = NULL; + + /* + * BIG loop where each nbuf is dequeued from global queue, + * processed and queued back on a per vdev basis. These nbufs + * are sent to stack as and when we run out of nbufs + * or a new nbuf dequeued from global queue has a different + * vdev when compared to previous nbuf. + */ + nbuf = nbuf_head; + while (nbuf) { + next = nbuf->next; + if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { + nbuf = next; + DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); + continue; + } + + rx_tlv_hdr = qdf_nbuf_data(nbuf); + vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); + peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); + + if (dp_rx_is_list_ready(deliver_list_head, vdev, peer, + peer_id, vdev_id)) { + dp_rx_deliver_to_stack(soc, vdev, peer, + deliver_list_head, + deliver_list_tail); + deliver_list_head = NULL; + deliver_list_tail = NULL; + } + + /* Get TID from struct cb->tid_val, save to tid */ + if (qdf_nbuf_is_rx_chfrag_start(nbuf)) + tid = qdf_nbuf_get_tid_val(nbuf); + + if (qdf_unlikely(!peer)) { + peer = dp_peer_get_ref_by_id(soc, peer_id, + DP_MOD_ID_RX); + } else if (peer && peer->peer_id != peer_id) { + dp_peer_unref_delete(peer, DP_MOD_ID_RX); + peer = dp_peer_get_ref_by_id(soc, peer_id, + DP_MOD_ID_RX); + } + + if (peer) { + QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; + qdf_dp_trace_set_track(nbuf, QDF_RX); + QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; + QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = + QDF_NBUF_RX_PKT_DATA_TRACK; + } + + rx_bufs_used++; + + if (qdf_likely(peer)) { + vdev = peer->vdev; + } else { + nbuf->next = NULL; + dp_rx_deliver_to_pkt_capture_no_peer( + soc, nbuf, pkt_capture_offload); + if (!pkt_capture_offload) + dp_rx_deliver_to_stack_no_peer(soc, nbuf); + nbuf = next; + continue; + } + + if (qdf_unlikely(!vdev)) { + qdf_nbuf_free(nbuf); + nbuf = next; + DP_STATS_INC(soc, rx.err.invalid_vdev, 1); + continue; + } + + /* when hlos tid override is enabled, save tid in + * skb->priority + */ + if (qdf_unlikely(vdev->skip_sw_tid_classification & + DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) + qdf_nbuf_set_priority(nbuf, tid); + + rx_pdev = vdev->pdev; + DP_RX_TID_SAVE(nbuf, tid); + if (qdf_unlikely(rx_pdev->delay_stats_flag) || + qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled( + soc->wlan_cfg_ctx))) + qdf_nbuf_set_timestamp(nbuf); + + tid_stats = + &rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid]; + + /* + * Check if DMA completed -- msdu_done is the last bit + * to be written + */ + if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) && + !hal_rx_attn_msdu_done_get(hal_soc, + rx_tlv_hdr))) { + dp_err("MSDU DONE failure"); + DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); + hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, + QDF_TRACE_LEVEL_INFO); + tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; + qdf_nbuf_free(nbuf); + qdf_assert(0); + nbuf = next; + continue; + } + + DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); + /* + * First IF condition: + * 802.11 Fragmented pkts are reinjected to REO + * HW block as SG pkts and for these pkts we only + * need to pull the RX TLVS header length. + * Second IF condition: + * The below condition happens when an MSDU is spread + * across multiple buffers. This can happen in two cases + * 1. The nbuf size is smaller then the received msdu. + * ex: we have set the nbuf size to 2048 during + * nbuf_alloc. but we received an msdu which is + * 2304 bytes in size then this msdu is spread + * across 2 nbufs. + * + * 2. AMSDUs when RAW mode is enabled. + * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread + * across 1st nbuf and 2nd nbuf and last MSDU is + * spread across 2nd nbuf and 3rd nbuf. + * + * for these scenarios let us create a skb frag_list and + * append these buffers till the last MSDU of the AMSDU + * Third condition: + * This is the most likely case, we receive 802.3 pkts + * decapsulated by HW, here we need to set the pkt length. + */ + hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata); + if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { + bool is_mcbc, is_sa_vld, is_da_vld; + + is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, + rx_tlv_hdr); + is_sa_vld = + hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, + rx_tlv_hdr); + is_da_vld = + hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, + rx_tlv_hdr); + + qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); + qdf_nbuf_set_da_valid(nbuf, is_da_vld); + qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); + + qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); + } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { + msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); + nbuf = dp_rx_sg_create(soc, nbuf); + next = nbuf->next; + + if (qdf_nbuf_is_raw_frame(nbuf)) { + DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); + DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len); + } else { + qdf_nbuf_free(nbuf); + DP_STATS_INC(soc, rx.err.scatter_msdu, 1); + dp_info_rl("scatter msdu len %d, dropped", + msdu_len); + nbuf = next; + continue; + } + } else { + msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); + pkt_len = msdu_len + + msdu_metadata.l3_hdr_pad + + soc->rx_pkt_tlv_size; + + qdf_nbuf_set_pktlen(nbuf, pkt_len); + dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad); + } + + /* + * process frame for mulitpass phrase processing + */ + if (qdf_unlikely(vdev->multipass_en)) { + if (dp_rx_multipass_process(peer, nbuf, tid) == false) { + DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1); + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } + } + + if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { + dp_rx_err("%pK: Policy Check Drop pkt", soc); + tid_stats->fail_cnt[POLICY_CHECK_DROP]++; + /* Drop & free packet */ + qdf_nbuf_free(nbuf); + /* Statistics */ + nbuf = next; + continue; + } + + if (qdf_unlikely(peer && (peer->nawds_enabled) && + (qdf_nbuf_is_da_mcbc(nbuf)) && + (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, + rx_tlv_hdr) == + false))) { + tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; + DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } + + /* + * Drop non-EAPOL frames from unauthorized peer. + */ + if (qdf_likely(peer) && qdf_unlikely(!peer->authorize)) { + bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || + qdf_nbuf_is_ipv4_wapi_pkt(nbuf); + + if (!is_eapol) { + DP_STATS_INC(soc, + rx.err.peer_unauth_rx_pkt_drop, + 1); + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } + } + + if (soc->process_rx_status) + dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); + + /* Update the protocol tag in SKB based on CCE metadata */ + dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, + reo_ring_num, false, true); + + /* Update the flow tag in SKB based on FSE metadata */ + dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); + + dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, + reo_ring_num, tid_stats); + + if (qdf_unlikely(vdev->mesh_vdev)) { + if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) + == QDF_STATUS_SUCCESS) { + dp_rx_info("%pK: mesh pkt filtered", soc); + tid_stats->fail_cnt[MESH_FILTER_DROP]++; + DP_STATS_INC(vdev->pdev, dropped.mesh_filter, + 1); + + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } + dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); + } + + if (qdf_likely(vdev->rx_decap_type == + htt_cmn_pkt_type_ethernet) && + qdf_likely(!vdev->mesh_vdev)) { + /* WDS Destination Address Learning */ + dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf); + + /* WDS Source Port Learning */ + if (qdf_likely(vdev->wds_enabled)) + dp_rx_wds_srcport_learn(soc, + rx_tlv_hdr, + peer, + nbuf, + msdu_metadata); + + /* Intrabss-fwd */ + if (dp_rx_check_ap_bridge(vdev)) + if (DP_RX_INTRABSS_FWD(soc, peer, rx_tlv_hdr, + nbuf, msdu_metadata)) { + nbuf = next; + tid_stats->intrabss_cnt++; + continue; /* Get next desc */ + } + } + + dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); + + dp_rx_update_stats(soc, nbuf); + DP_RX_LIST_APPEND(deliver_list_head, + deliver_list_tail, + nbuf); + DP_STATS_INC_PKT(peer, rx.to_stack, 1, + QDF_NBUF_CB_RX_PKT_LEN(nbuf)); + if (qdf_unlikely(peer->in_twt)) + DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1, + QDF_NBUF_CB_RX_PKT_LEN(nbuf)); + + tid_stats->delivered_to_stack++; + nbuf = next; + } + + if (qdf_likely(deliver_list_head)) { + if (qdf_likely(peer)) { + dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id, + pkt_capture_offload, + deliver_list_head); + if (!pkt_capture_offload) + dp_rx_deliver_to_stack(soc, vdev, peer, + deliver_list_head, + deliver_list_tail); + } else { + nbuf = deliver_list_head; + while (nbuf) { + next = nbuf->next; + nbuf->next = NULL; + dp_rx_deliver_to_stack_no_peer(soc, nbuf); + nbuf = next; + } + } + } + + if (qdf_likely(peer)) + dp_peer_unref_delete(peer, DP_MOD_ID_RX); + + if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { + if (quota) { + num_pending = + dp_rx_srng_get_num_pending(hal_soc, + hal_ring_hdl, + num_entries, + &near_full); + if (num_pending) { + DP_STATS_INC(soc, rx.hp_oos2, 1); + + if (!hif_exec_should_yield(scn, intr_id)) + goto more_data; + + if (qdf_unlikely(near_full)) { + DP_STATS_INC(soc, rx.near_full, 1); + goto more_data; + } + } + } + + if (vdev && vdev->osif_fisa_flush) + vdev->osif_fisa_flush(soc, reo_ring_num); + + if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { + vdev->osif_gro_flush(vdev->osif_vdev, + reo_ring_num); + } + } + + /* Update histogram statistics by looping through pdev's */ + DP_RX_HIST_STATS_PER_PDEV(); + + return rx_bufs_used; /* Assume no scale factor for now */ +} diff --git a/dp/wifi3.0/be/dp_be_rx.h b/dp/wifi3.0/be/dp_be_rx.h new file mode 100644 index 0000000000..867f9292c7 --- /dev/null +++ b/dp/wifi3.0/be/dp_be_rx.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DP_BE_RX_H_ +#define _DP_BE_RX_H_ + +#include +#include "dp_be.h" + +uint32_t dp_rx_process_be(struct dp_intr *int_ctx, + hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, + uint32_t quota); +#endif diff --git a/dp/wifi3.0/dp_main.c b/dp/wifi3.0/dp_main.c index 402ffd7cb8..11e0b71f0e 100644 --- a/dp/wifi3.0/dp_main.c +++ b/dp/wifi3.0/dp_main.c @@ -2246,7 +2246,7 @@ static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget) for (ring = 0; ring < soc->num_reo_dest_rings; ring++) { if (!(rx_mask & (1 << ring))) continue; - work_done = dp_rx_process(int_ctx, + work_done = soc->arch_ops.dp_rx_process(int_ctx, soc->reo_dest_ring[ring].hal_srng, ring, remaining_quota); diff --git a/dp/wifi3.0/dp_rx.c b/dp/wifi3.0/dp_rx.c index 883274f40c..2f7e2d7f11 100644 --- a/dp/wifi3.0/dp_rx.c +++ b/dp/wifi3.0/dp_rx.c @@ -19,6 +19,7 @@ #include "hal_hw_headers.h" #include "dp_types.h" #include "dp_rx.h" +#include "dp_tx.h" #include "dp_peer.h" #include "hal_rx.h" #include "hal_api.h" @@ -37,13 +38,6 @@ #ifndef QCA_HOST_MODE_WIFI_DISABLED -#ifdef ATH_RX_PRI_SAVE -#define DP_RX_TID_SAVE(_nbuf, _tid) \ - (qdf_nbuf_set_priority(_nbuf, _tid)) -#else -#define DP_RX_TID_SAVE(_nbuf, _tid) -#endif - #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING static inline bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf) @@ -62,11 +56,6 @@ bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf) return true; } #endif -static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) -{ - return vdev->ap_bridge_enabled; -} - #endif /* QCA_HOST_MODE_WIFI_DISABLED */ #ifdef DUP_RX_DESC_WAR @@ -97,7 +86,6 @@ void dp_rx_dump_info_and_assert(struct dp_soc *soc, #ifndef QCA_HOST_MODE_WIFI_DISABLED #ifdef RX_DESC_SANITY_WAR -static inline QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, hal_ring_handle_t hal_ring_hdl, hal_ring_desc_t ring_desc, @@ -115,8 +103,10 @@ QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, } return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc); - if (qdf_unlikely(!(return_buffer_manager == HAL_RX_BUF_RBM_SW1_BM || - return_buffer_manager == HAL_RX_BUF_RBM_SW3_BM))) { + if (qdf_unlikely(!(return_buffer_manager == + HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) || + return_buffer_manager == + HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) { goto fail; } @@ -130,17 +120,7 @@ fail: return QDF_STATUS_E_NULL_VALUE; } -#else -static inline -QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, - hal_ring_handle_t hal_ring_hdl, - hal_ring_desc_t ring_desc, - struct dp_rx_desc *rx_desc) -{ - return QDF_STATUS_SUCCESS; -} #endif - #endif /* QCA_HOST_MODE_WIFI_DISABLED */ /** @@ -534,14 +514,12 @@ dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, #ifndef QCA_HOST_MODE_WIFI_DISABLED #ifndef FEATURE_WDS -static void -dp_rx_da_learn(struct dp_soc *soc, - uint8_t *rx_tlv_hdr, - struct dp_peer *ta_peer, - qdf_nbuf_t nbuf) +void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr, + struct dp_peer *ta_peer, qdf_nbuf_t nbuf) { } #endif + /* * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic * @@ -552,7 +530,7 @@ dp_rx_da_learn(struct dp_soc *soc, * * Return: bool: true if it is forwarded else false */ -static bool +bool dp_rx_intrabss_fwd(struct dp_soc *soc, struct dp_peer *ta_peer, uint8_t *rx_tlv_hdr, @@ -1166,7 +1144,6 @@ static void dp_rx_print_offload_info(struct dp_soc *soc, * * Return: None */ -static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) { @@ -1412,29 +1389,6 @@ static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev, } #ifdef QCA_SUPPORT_WDS_EXTENDED -/** - * dp_rx_wds_ext() - Make different lists for 4-address and 3-address frames - * @nbuf_head: skb list head - * @vdev: vdev - * @peer: peer - * @peer_id: peer id of new received frame - * @vdev_id: vdev_id of new received frame - * - * Return: true if peer_ids are different. - */ -static inline bool -dp_rx_is_list_ready(qdf_nbuf_t nbuf_head, - struct dp_vdev *vdev, - struct dp_peer *peer, - uint16_t peer_id, - uint8_t vdev_id) -{ - if (nbuf_head && peer && (peer->peer_id != peer_id)) - return true; - - return false; -} - /** * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta * @soc: core txrx main context @@ -1473,19 +1427,6 @@ dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, } #else -static inline bool -dp_rx_is_list_ready(qdf_nbuf_t nbuf_head, - struct dp_vdev *vdev, - struct dp_peer *peer, - uint16_t peer_id, - uint8_t vdev_id) -{ - if (nbuf_head && vdev && (vdev->vdev_id != vdev_id)) - return true; - - return false; -} - static inline bool dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, struct dp_peer *peer, qdf_nbuf_t nbuf_head) @@ -1737,39 +1678,6 @@ void dp_rx_deliver_to_stack(struct dp_soc *soc, } #ifndef QCA_HOST_MODE_WIFI_DISABLED - -/** - * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. - * @nbuf: pointer to the first msdu of an amsdu. - * @rx_tlv_hdr: pointer to the start of RX TLV headers. - * - * The ipsumed field of the skb is set based on whether HW validated the - * IP/TCP/UDP checksum. - * - * Return: void - */ -static inline void dp_rx_cksum_offload(struct dp_pdev *pdev, - qdf_nbuf_t nbuf, - uint8_t *rx_tlv_hdr) -{ - qdf_nbuf_rx_cksum_t cksum = {0}; - //TODO - Move this to ring desc api - //HAL_RX_MSDU_DESC_IP_CHKSUM_FAIL_GET - //HAL_RX_MSDU_DESC_TCP_UDP_CHKSUM_FAIL_GET - uint32_t ip_csum_err, tcp_udp_csum_er; - - hal_rx_tlv_csum_err_get(pdev->soc->hal_soc, rx_tlv_hdr, &ip_csum_err, - &tcp_udp_csum_er); - - if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) { - cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; - qdf_nbuf_set_rx_cksum(nbuf, &cksum); - } else { - DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err); - DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er); - } -} - #ifdef VDEV_PEER_PROTOCOL_COUNT #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer) \ { \ @@ -1806,12 +1714,10 @@ static inline void dp_rx_cksum_offload(struct dp_pdev *pdev, * update all the per msdu stats for that nbuf. * Return: void */ -static void dp_rx_msdu_stats_update(struct dp_soc *soc, - qdf_nbuf_t nbuf, - uint8_t *rx_tlv_hdr, - struct dp_peer *peer, - uint8_t ring_id, - struct cdp_tid_rx_stats *tid_stats) +void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, struct dp_peer *peer, + uint8_t ring_id, + struct cdp_tid_rx_stats *tid_stats) { bool is_ampdu, is_not_amsdu; uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type; @@ -1923,21 +1829,6 @@ static void dp_rx_msdu_stats_update(struct dp_soc *soc, } } -static inline bool is_sa_da_idx_valid(struct dp_soc *soc, - uint8_t *rx_tlv_hdr, - qdf_nbuf_t nbuf, - struct hal_rx_msdu_metadata msdu_info) -{ - if ((qdf_nbuf_is_sa_valid(nbuf) && - (msdu_info.sa_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) || - (!qdf_nbuf_is_da_mcbc(nbuf) && - qdf_nbuf_is_da_valid(nbuf) && - (msdu_info.da_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) - return false; - - return true; -} - #ifndef WDS_VENDOR_EXTENSION int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev, @@ -1957,7 +1848,6 @@ int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, * * Return: NONE */ -static inline QDF_STATUS dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc, struct dp_rx_desc *rx_desc) { @@ -1990,19 +1880,11 @@ dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, qdf_assert_always(pkt_len <= rx_desc_pool->buf_size); } #else -static inline -QDF_STATUS dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc, - struct dp_rx_desc *rx_desc) -{ - return QDF_STATUS_SUCCESS; -} - static inline void dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { } #endif #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT -static inline bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped) { bool limit_hit = false; @@ -2017,19 +1899,18 @@ bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped) return limit_hit; } -static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc) +bool dp_rx_enable_eol_data_check(struct dp_soc *soc) { return soc->wlan_cfg_ctx->rx_enable_eol_data_check; } #else -static inline bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped) { return false; } -static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc) +bool dp_rx_enable_eol_data_check(struct dp_soc *soc) { return false; } @@ -2049,7 +1930,6 @@ static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc) * * Return: None */ -static void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) { uint16_t peer_id; @@ -2103,7 +1983,6 @@ deliver_fail: dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX); } #else -static inline void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) { DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1, @@ -2129,7 +2008,6 @@ void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) * * Return: Number of pending entries if any */ -static uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, hal_ring_handle_t hal_ring_hdl, uint32_t num_entries, @@ -2192,7 +2070,6 @@ void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding) * Return: true - raw frame, dropped * false - not raw frame, do nothing */ -static inline bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) { if (qdf_nbuf_is_raw_frame(nbuf)) { @@ -2202,12 +2079,6 @@ bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) return false; } -#else -static inline -bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) -{ - return false; -} #endif #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY @@ -2219,7 +2090,7 @@ bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) * * Returns: None */ -static inline void +void dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, hal_ring_desc_t ring_desc) { @@ -2247,12 +2118,6 @@ dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, record->hbi.sw_cookie = hbi.sw_cookie; record->hbi.rbm = hbi.rbm; } -#else -static inline void -dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, - hal_ring_desc_t ring_desc) -{ -} #endif #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR @@ -2263,17 +2128,11 @@ dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, * * Returns: none */ -static inline void dp_rx_update_stats(struct dp_soc *soc, - qdf_nbuf_t nbuf) +void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) { DP_STATS_INC_PKT(soc, rx.ingress, 1, QDF_NBUF_CB_RX_PKT_LEN(nbuf)); } -#else -static inline void dp_rx_update_stats(struct dp_soc *soc, - qdf_nbuf_t nbuf) -{ -} #endif #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 @@ -2350,800 +2209,6 @@ static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc, return false; } #endif - -#ifdef DISABLE_EAPOL_INTRABSS_FWD -/* - * dp_rx_intrabss_fwd_wrapper() - Wrapper API for intrabss fwd. For EAPOL - * pkt with DA not equal to vdev mac addr, fwd is not allowed. - * @soc: core txrx main context - * @ta_peer: source peer entry - * @rx_tlv_hdr: start address of rx tlvs - * @nbuf: nbuf that has to be intrabss forwarded - * @msdu_metadata: msdu metadata - * - * Return: true if it is forwarded else false - */ -static inline -bool dp_rx_intrabss_fwd_wrapper(struct dp_soc *soc, struct dp_peer *ta_peer, - uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, - struct hal_rx_msdu_metadata msdu_metadata) -{ - if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) && - qdf_mem_cmp(qdf_nbuf_data(nbuf) + - QDF_NBUF_DEST_MAC_OFFSET, - ta_peer->vdev->mac_addr.raw, - QDF_MAC_ADDR_SIZE))) { - qdf_nbuf_free(nbuf); - DP_STATS_INC(soc, rx.err.intrabss_eapol_drop, 1); - return true; - } - - return dp_rx_intrabss_fwd(soc, ta_peer, rx_tlv_hdr, nbuf, - msdu_metadata); -} - -#define DP_RX_INTRABSS_FWD(soc, peer, rx_tlv_hdr, nbuf, msdu_metadata) \ - dp_rx_intrabss_fwd_wrapper(soc, peer, rx_tlv_hdr, nbuf, \ - msdu_metadata) -#else -#define DP_RX_INTRABSS_FWD(soc, peer, rx_tlv_hdr, nbuf, msdu_metadata) \ - dp_rx_intrabss_fwd(soc, peer, rx_tlv_hdr, nbuf, msdu_metadata) -#endif - -/** - * dp_rx_process() - Brain of the Rx processing functionality - * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) - * @int_ctx: per interrupt context - * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced - * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. - * @quota: No. of units (packets) that can be serviced in one shot. - * - * This function implements the core of Rx functionality. This is - * expected to handle only non-error frames. - * - * Return: uint32_t: No. of elements processed - */ -uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl, - uint8_t reo_ring_num, uint32_t quota) -{ - hal_ring_desc_t ring_desc; - hal_soc_handle_t hal_soc; - struct dp_rx_desc *rx_desc = NULL; - qdf_nbuf_t nbuf, next; - bool near_full; - union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT]; - union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT]; - uint32_t num_pending; - uint32_t rx_bufs_used = 0, rx_buf_cookie; - uint16_t msdu_len = 0; - uint16_t peer_id; - uint8_t vdev_id; - struct dp_peer *peer; - struct dp_vdev *vdev; - uint32_t pkt_len = 0; - struct hal_rx_mpdu_desc_info mpdu_desc_info; - struct hal_rx_msdu_desc_info msdu_desc_info; - enum hal_reo_error_status error; - uint32_t peer_mdata; - uint8_t *rx_tlv_hdr; - uint32_t rx_bufs_reaped[MAX_PDEV_CNT]; - uint8_t mac_id = 0; - struct dp_pdev *rx_pdev; - struct dp_srng *dp_rxdma_srng; - struct rx_desc_pool *rx_desc_pool; - struct dp_soc *soc = int_ctx->soc; - uint8_t ring_id = 0; - uint8_t core_id = 0; - struct cdp_tid_rx_stats *tid_stats; - qdf_nbuf_t nbuf_head; - qdf_nbuf_t nbuf_tail; - qdf_nbuf_t deliver_list_head; - qdf_nbuf_t deliver_list_tail; - uint32_t num_rx_bufs_reaped = 0; - uint32_t intr_id; - struct hif_opaque_softc *scn; - int32_t tid = 0; - bool is_prev_msdu_last = true; - uint32_t num_entries_avail = 0; - uint32_t rx_ol_pkt_cnt = 0; - uint32_t num_entries = 0; - struct hal_rx_msdu_metadata msdu_metadata; - QDF_STATUS status; - qdf_nbuf_t ebuf_head; - qdf_nbuf_t ebuf_tail; - uint8_t pkt_capture_offload = 0; - - DP_HIST_INIT(); - - qdf_assert_always(soc && hal_ring_hdl); - hal_soc = soc->hal_soc; - qdf_assert_always(hal_soc); - - scn = soc->hif_handle; - hif_pm_runtime_mark_dp_rx_busy(scn); - intr_id = int_ctx->dp_intr_id; - num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); - -more_data: - /* reset local variables here to be re-used in the function */ - nbuf_head = NULL; - nbuf_tail = NULL; - deliver_list_head = NULL; - deliver_list_tail = NULL; - peer = NULL; - vdev = NULL; - num_rx_bufs_reaped = 0; - ebuf_head = NULL; - ebuf_tail = NULL; - - qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); - qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); - qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); - qdf_mem_zero(head, sizeof(head)); - qdf_mem_zero(tail, sizeof(tail)); - - if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) { - - /* - * Need API to convert from hal_ring pointer to - * Ring Type / Ring Id combo - */ - DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); - QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, - FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); - goto done; - } - - /* - * start reaping the buffers from reo ring and queue - * them in per vdev queue. - * Process the received pkts in a different per vdev loop. - */ - while (qdf_likely(quota && - (ring_desc = hal_srng_dst_peek(hal_soc, - hal_ring_hdl)))) { - - error = HAL_RX_ERROR_STATUS_GET(ring_desc); - ring_id = hal_srng_ring_id_get(hal_ring_hdl); - - if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { - dp_rx_err("%pK: HAL RING 0x%pK:error %d", - soc, hal_ring_hdl, error); - DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1); - /* Don't know how to deal with this -- assert */ - qdf_assert(0); - } - - dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc); - rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); - status = dp_rx_cookie_check_and_invalidate(ring_desc); - if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { - DP_STATS_INC(soc, rx.err.stale_cookie, 1); - qdf_assert_always(0); - } - - rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); - status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, - ring_desc, rx_desc); - if (QDF_IS_STATUS_ERROR(status)) { - if (qdf_unlikely(rx_desc && rx_desc->nbuf)) { - qdf_assert_always(rx_desc->unmapped); - dp_ipa_reo_ctx_buf_mapping_lock( - soc, - reo_ring_num); - dp_ipa_handle_rx_buf_smmu_mapping( - soc, - rx_desc->nbuf, - RX_DATA_BUFFER_SIZE, - false); - qdf_nbuf_unmap_nbytes_single( - soc->osdev, - rx_desc->nbuf, - QDF_DMA_FROM_DEVICE, - RX_DATA_BUFFER_SIZE); - rx_desc->unmapped = 1; - dp_ipa_reo_ctx_buf_mapping_unlock( - soc, - reo_ring_num); - dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, - rx_desc->pool_id); - dp_rx_add_to_free_desc_list( - &head[rx_desc->pool_id], - &tail[rx_desc->pool_id], - rx_desc); - } - hal_srng_dst_get_next(hal_soc, hal_ring_hdl); - continue; - } - - /* - * this is a unlikely scenario where the host is reaping - * a descriptor which it already reaped just a while ago - * but is yet to replenish it back to HW. - * In this case host will dump the last 128 descriptors - * including the software descriptor rx_desc and assert. - */ - - if (qdf_unlikely(!rx_desc->in_use)) { - DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); - dp_info_rl("Reaping rx_desc not in use!"); - dp_rx_dump_info_and_assert(soc, hal_ring_hdl, - ring_desc, rx_desc); - /* ignore duplicate RX desc and continue to process */ - /* Pop out the descriptor */ - hal_srng_dst_get_next(hal_soc, hal_ring_hdl); - continue; - } - - status = dp_rx_desc_nbuf_sanity_check(ring_desc, rx_desc); - if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { - DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); - dp_info_rl("Nbuf sanity check failure!"); - dp_rx_dump_info_and_assert(soc, hal_ring_hdl, - ring_desc, rx_desc); - rx_desc->in_err_state = 1; - hal_srng_dst_get_next(hal_soc, hal_ring_hdl); - continue; - } - - if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { - dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); - DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); - dp_rx_dump_info_and_assert(soc, hal_ring_hdl, - ring_desc, rx_desc); - } - - /* Get MPDU DESC info */ - hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info); - - /* Get MSDU DESC info */ - hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info); - - if (qdf_unlikely(msdu_desc_info.msdu_flags & - HAL_MSDU_F_MSDU_CONTINUATION)) { - /* previous msdu has end bit set, so current one is - * the new MPDU - */ - if (is_prev_msdu_last) { - /* Get number of entries available in HW ring */ - num_entries_avail = - hal_srng_dst_num_valid(hal_soc, - hal_ring_hdl, 1); - - /* For new MPDU check if we can read complete - * MPDU by comparing the number of buffers - * available and number of buffers needed to - * reap this MPDU - */ - if (((msdu_desc_info.msdu_len / - (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN) + - 1)) > num_entries_avail) { - DP_STATS_INC( - soc, - rx.msdu_scatter_wait_break, - 1); - dp_rx_cookie_reset_invalid_bit( - ring_desc); - break; - } - is_prev_msdu_last = false; - } - - } - - core_id = smp_processor_id(); - DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1); - - if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) - qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); - - if (qdf_unlikely(mpdu_desc_info.mpdu_flags & - HAL_MPDU_F_RAW_AMPDU)) - qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); - - if (!is_prev_msdu_last && - msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) - is_prev_msdu_last = true; - - /* Pop out the descriptor*/ - hal_srng_dst_get_next(hal_soc, hal_ring_hdl); - - rx_bufs_reaped[rx_desc->pool_id]++; - peer_mdata = mpdu_desc_info.peer_meta_data; - QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = - DP_PEER_METADATA_PEER_ID_GET(peer_mdata); - QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = - DP_PEER_METADATA_VDEV_ID_GET(peer_mdata); - - /* to indicate whether this msdu is rx offload */ - pkt_capture_offload = - DP_PEER_METADATA_OFFLOAD_GET(peer_mdata); - - /* - * save msdu flags first, last and continuation msdu in - * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and - * length to nbuf->cb. This ensures the info required for - * per pkt processing is always in the same cache line. - * This helps in improving throughput for smaller pkt - * sizes. - */ - if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) - qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); - - if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) - qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); - - if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) - qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); - - if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) - qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); - - if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) - qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); - - if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) - qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); - - qdf_nbuf_set_tid_val(rx_desc->nbuf, - HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc)); - qdf_nbuf_set_rx_reo_dest_ind( - rx_desc->nbuf, - HAL_RX_REO_MSDU_REO_DST_IND_GET(ring_desc)); - - QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; - - QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; - - /* - * move unmap after scattered msdu waiting break logic - * in case double skb unmap happened. - */ - rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; - dp_ipa_reo_ctx_buf_mapping_lock(soc, - reo_ring_num); - dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf, - rx_desc_pool->buf_size, - false); - qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, - QDF_DMA_FROM_DEVICE, - rx_desc_pool->buf_size); - rx_desc->unmapped = 1; - dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num); - DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head, - ebuf_tail, rx_desc); - /* - * if continuation bit is set then we have MSDU spread - * across multiple buffers, let us not decrement quota - * till we reap all buffers of that MSDU. - */ - if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) - quota -= 1; - - dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], - &tail[rx_desc->pool_id], - rx_desc); - - num_rx_bufs_reaped++; - /* - * only if complete msdu is received for scatter case, - * then allow break. - */ - if (is_prev_msdu_last && - dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped)) - break; - } -done: - dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl); - - for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { - /* - * continue with next mac_id if no pkts were reaped - * from that pool - */ - if (!rx_bufs_reaped[mac_id]) - continue; - - dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; - - rx_desc_pool = &soc->rx_desc_buf[mac_id]; - - dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, - rx_desc_pool, rx_bufs_reaped[mac_id], - &head[mac_id], &tail[mac_id]); - } - - dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]); - /* Peer can be NULL is case of LFR */ - if (qdf_likely(peer)) - vdev = NULL; - - /* - * BIG loop where each nbuf is dequeued from global queue, - * processed and queued back on a per vdev basis. These nbufs - * are sent to stack as and when we run out of nbufs - * or a new nbuf dequeued from global queue has a different - * vdev when compared to previous nbuf. - */ - nbuf = nbuf_head; - while (nbuf) { - next = nbuf->next; - if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { - nbuf = next; - DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); - continue; - } - - rx_tlv_hdr = qdf_nbuf_data(nbuf); - vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); - peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); - - if (dp_rx_is_list_ready(deliver_list_head, vdev, peer, - peer_id, vdev_id)) { - dp_rx_deliver_to_stack(soc, vdev, peer, - deliver_list_head, - deliver_list_tail); - deliver_list_head = NULL; - deliver_list_tail = NULL; - } - - /* Get TID from struct cb->tid_val, save to tid */ - if (qdf_nbuf_is_rx_chfrag_start(nbuf)) - tid = qdf_nbuf_get_tid_val(nbuf); - - if (qdf_unlikely(!peer)) { - peer = dp_peer_get_ref_by_id(soc, peer_id, - DP_MOD_ID_RX); - } else if (peer && peer->peer_id != peer_id) { - dp_peer_unref_delete(peer, DP_MOD_ID_RX); - peer = dp_peer_get_ref_by_id(soc, peer_id, - DP_MOD_ID_RX); - } - - if (peer) { - QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; - qdf_dp_trace_set_track(nbuf, QDF_RX); - QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; - QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = - QDF_NBUF_RX_PKT_DATA_TRACK; - } - - rx_bufs_used++; - - if (qdf_likely(peer)) { - vdev = peer->vdev; - } else { - nbuf->next = NULL; - dp_rx_deliver_to_pkt_capture_no_peer( - soc, nbuf, pkt_capture_offload); - if (!pkt_capture_offload) - dp_rx_deliver_to_stack_no_peer(soc, nbuf); - nbuf = next; - continue; - } - - if (qdf_unlikely(!vdev)) { - qdf_nbuf_free(nbuf); - nbuf = next; - DP_STATS_INC(soc, rx.err.invalid_vdev, 1); - continue; - } - - /* when hlos tid override is enabled, save tid in - * skb->priority - */ - if (qdf_unlikely(vdev->skip_sw_tid_classification & - DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) - qdf_nbuf_set_priority(nbuf, tid); - - rx_pdev = vdev->pdev; - DP_RX_TID_SAVE(nbuf, tid); - if (qdf_unlikely(rx_pdev->delay_stats_flag) || - qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled( - soc->wlan_cfg_ctx))) - qdf_nbuf_set_timestamp(nbuf); - - ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); - tid_stats = - &rx_pdev->stats.tid_stats.tid_rx_stats[ring_id][tid]; - - /* - * Check if DMA completed -- msdu_done is the last bit - * to be written - */ - if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) && - !hal_rx_attn_msdu_done_get(rx_tlv_hdr))) { - dp_err("MSDU DONE failure"); - DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); - hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, - QDF_TRACE_LEVEL_INFO); - tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; - qdf_nbuf_free(nbuf); - qdf_assert(0); - nbuf = next; - continue; - } - - DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); - /* - * First IF condition: - * 802.11 Fragmented pkts are reinjected to REO - * HW block as SG pkts and for these pkts we only - * need to pull the RX TLVS header length. - * Second IF condition: - * The below condition happens when an MSDU is spread - * across multiple buffers. This can happen in two cases - * 1. The nbuf size is smaller then the received msdu. - * ex: we have set the nbuf size to 2048 during - * nbuf_alloc. but we received an msdu which is - * 2304 bytes in size then this msdu is spread - * across 2 nbufs. - * - * 2. AMSDUs when RAW mode is enabled. - * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread - * across 1st nbuf and 2nd nbuf and last MSDU is - * spread across 2nd nbuf and 3rd nbuf. - * - * for these scenarios let us create a skb frag_list and - * append these buffers till the last MSDU of the AMSDU - * Third condition: - * This is the most likely case, we receive 802.3 pkts - * decapsulated by HW, here we need to set the pkt length. - */ - hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata); - if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { - bool is_mcbc, is_sa_vld, is_da_vld; - - is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, - rx_tlv_hdr); - is_sa_vld = - hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, - rx_tlv_hdr); - is_da_vld = - hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, - rx_tlv_hdr); - - qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); - qdf_nbuf_set_da_valid(nbuf, is_da_vld); - qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); - - qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); - } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { - msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); - nbuf = dp_rx_sg_create(soc, nbuf); - next = nbuf->next; - - if (qdf_nbuf_is_raw_frame(nbuf)) { - DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); - DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len); - } else { - qdf_nbuf_free(nbuf); - DP_STATS_INC(soc, rx.err.scatter_msdu, 1); - dp_info_rl("scatter msdu len %d, dropped", - msdu_len); - nbuf = next; - continue; - } - } else { - - msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); - pkt_len = msdu_len + - msdu_metadata.l3_hdr_pad + - RX_PKT_TLVS_LEN; - - dp_rx_desc_nbuf_len_sanity_check(soc, pkt_len); - - qdf_nbuf_set_pktlen(nbuf, pkt_len); - dp_rx_skip_tlvs(nbuf, msdu_metadata.l3_hdr_pad); - } - - /* - * process frame for mulitpass phrase processing - */ - if (qdf_unlikely(vdev->multipass_en)) { - if (dp_rx_multipass_process(peer, nbuf, tid) == false) { - DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1); - qdf_nbuf_free(nbuf); - nbuf = next; - continue; - } - } - - if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { - dp_rx_err("%pK: Policy Check Drop pkt", soc); - tid_stats->fail_cnt[POLICY_CHECK_DROP]++; - /* Drop & free packet */ - qdf_nbuf_free(nbuf); - /* Statistics */ - nbuf = next; - continue; - } - - if (qdf_unlikely(peer && (peer->nawds_enabled) && - (qdf_nbuf_is_da_mcbc(nbuf)) && - (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, - rx_tlv_hdr) == - false))) { - tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; - DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); - qdf_nbuf_free(nbuf); - nbuf = next; - continue; - } - - /* - * Drop non-EAPOL frames from unauthorized peer. - */ - if (qdf_likely(peer) && qdf_unlikely(!peer->authorize)) { - bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || - qdf_nbuf_is_ipv4_wapi_pkt(nbuf); - - if (!is_eapol) { - DP_STATS_INC(soc, - rx.err.peer_unauth_rx_pkt_drop, - 1); - qdf_nbuf_free(nbuf); - nbuf = next; - continue; - } - } - - if (soc->process_rx_status) - dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); - - /* Update the protocol tag in SKB based on CCE metadata */ - dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, - reo_ring_num, false, true); - - /* Update the flow tag in SKB based on FSE metadata */ - dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); - - dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, - ring_id, tid_stats); - - if (qdf_unlikely(vdev->mesh_vdev)) { - if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) - == QDF_STATUS_SUCCESS) { - dp_rx_info("%pK: mesh pkt filtered", soc); - tid_stats->fail_cnt[MESH_FILTER_DROP]++; - DP_STATS_INC(vdev->pdev, dropped.mesh_filter, - 1); - - qdf_nbuf_free(nbuf); - nbuf = next; - continue; - } - dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); - } - - if (qdf_likely(vdev->rx_decap_type == - htt_cmn_pkt_type_ethernet) && - qdf_likely(!vdev->mesh_vdev)) { - /* WDS Destination Address Learning */ - dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf); - - /* Due to HW issue, sometimes we see that the sa_idx - * and da_idx are invalid with sa_valid and da_valid - * bits set - * - * in this case we also see that value of - * sa_sw_peer_id is set as 0 - * - * Drop the packet if sa_idx and da_idx OOB or - * sa_sw_peerid is 0 - */ - if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf, - msdu_metadata)) { - qdf_nbuf_free(nbuf); - nbuf = next; - DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); - continue; - } - /* WDS Source Port Learning */ - if (qdf_likely(vdev->wds_enabled)) - dp_rx_wds_srcport_learn(soc, - rx_tlv_hdr, - peer, - nbuf, - msdu_metadata); - - /* Intrabss-fwd */ - if (dp_rx_check_ap_bridge(vdev)) - if (DP_RX_INTRABSS_FWD(soc, peer, rx_tlv_hdr, - nbuf, msdu_metadata)) { - nbuf = next; - tid_stats->intrabss_cnt++; - continue; /* Get next desc */ - } - - if (qdf_unlikely(dp_rx_mec_check_wrapper(soc, - peer, - rx_tlv_hdr, - nbuf))) { - /* this is a looped back MCBC pkt,drop it */ - DP_STATS_INC_PKT(peer, rx.mec_drop, 1, - QDF_NBUF_CB_RX_PKT_LEN(nbuf)); - qdf_nbuf_free(nbuf); - nbuf = next; - continue; - } - } - - dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); - - dp_rx_update_stats(soc, nbuf); - DP_RX_LIST_APPEND(deliver_list_head, - deliver_list_tail, - nbuf); - DP_STATS_INC_PKT(peer, rx.to_stack, 1, - QDF_NBUF_CB_RX_PKT_LEN(nbuf)); - if (qdf_unlikely(peer->in_twt)) - DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1, - QDF_NBUF_CB_RX_PKT_LEN(nbuf)); - - tid_stats->delivered_to_stack++; - nbuf = next; - } - - if (qdf_likely(deliver_list_head)) { - if (qdf_likely(peer)) { - dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id, - pkt_capture_offload, - deliver_list_head); - if (!pkt_capture_offload) - dp_rx_deliver_to_stack(soc, vdev, peer, - deliver_list_head, - deliver_list_tail); - } - else { - nbuf = deliver_list_head; - while (nbuf) { - next = nbuf->next; - nbuf->next = NULL; - dp_rx_deliver_to_stack_no_peer(soc, nbuf); - nbuf = next; - } - } - } - - if (qdf_likely(peer)) - dp_peer_unref_delete(peer, DP_MOD_ID_RX); - - if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { - if (quota) { - num_pending = - dp_rx_srng_get_num_pending(hal_soc, - hal_ring_hdl, - num_entries, - &near_full); - if (num_pending) { - DP_STATS_INC(soc, rx.hp_oos2, 1); - - if (!hif_exec_should_yield(scn, intr_id)) - goto more_data; - - if (qdf_unlikely(near_full)) { - DP_STATS_INC(soc, rx.near_full, 1); - goto more_data; - } - } - } - - if (vdev && vdev->osif_fisa_flush) - vdev->osif_fisa_flush(soc, reo_ring_num); - - if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { - vdev->osif_gro_flush(vdev->osif_vdev, - reo_ring_num); - } - } - - /* Update histogram statistics by looping through pdev's */ - DP_RX_HIST_STATS_PER_PDEV(); - - return rx_bufs_used; /* Assume no scale factor for now */ -} - #endif /* QCA_HOST_MODE_WIFI_DISABLED */ QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev) @@ -3503,7 +2568,7 @@ QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev) rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); - rx_desc_pool->owner = DP_WBM2SW_RBM; + rx_desc_pool->owner = DP_WBM2SW_RBM(soc->wbm_sw0_bm_id); rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE; rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; /* Disable monitor dest processing via frag */ diff --git a/dp/wifi3.0/dp_rx.h b/dp/wifi3.0/dp_rx.h index 7c0a810413..7c8181845d 100644 --- a/dp/wifi3.0/dp_rx.h +++ b/dp/wifi3.0/dp_rx.h @@ -20,7 +20,6 @@ #define _DP_RX_H #include "hal_rx.h" -#include "dp_tx.h" #include "dp_peer.h" #include "dp_internal.h" @@ -37,12 +36,12 @@ #endif /* RXDMA_OPTIMIZATION */ #ifdef QCA_HOST2FW_RXBUF_RING -#define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW1_BM +#define DP_WBM2SW_RBM(sw0_bm_id) HAL_RX_BUF_RBM_SW1_BM(sw0_bm_id) /* RBM value used for re-injecting defragmented packets into REO */ -#define DP_DEFRAG_RBM HAL_RX_BUF_RBM_SW3_BM +#define DP_DEFRAG_RBM(sw0_bm_id) HAL_RX_BUF_RBM_SW3_BM(sw0_bm_id) #else -#define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW3_BM -#define DP_DEFRAG_RBM DP_WBM2SW_RBM +#define DP_WBM2SW_RBM(sw0_bm_id) HAL_RX_BUF_RBM_SW3_BM(sw0_bm_id) +#define DP_DEFRAG_RBM(sw0_bm_id) DP_WBM2SW_RBM(sw0_bm_id) #endif /* QCA_HOST2FW_RXBUF_RING */ #define RX_BUFFER_RESERVATION 0 @@ -155,6 +154,12 @@ struct dp_rx_desc { }; #ifndef QCA_HOST_MODE_WIFI_DISABLED +#ifdef ATH_RX_PRI_SAVE +#define DP_RX_TID_SAVE(_nbuf, _tid) \ + (qdf_nbuf_set_priority(_nbuf, _tid)) +#else +#define DP_RX_TID_SAVE(_nbuf, _tid) +#endif /* RX Descriptor Multi Page memory alloc related */ #define DP_RX_DESC_OFFSET_NUM_BITS 8 @@ -577,6 +582,11 @@ void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie) #ifndef QCA_HOST_MODE_WIFI_DISABLED +static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev) +{ + return vdev->ap_bridge_enabled; +} + #ifdef DP_RX_DESC_COOKIE_INVALIDATE static inline QDF_STATUS dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc) @@ -1105,6 +1115,62 @@ void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev, } #ifndef QCA_HOST_MODE_WIFI_DISABLED +/* + * dp_rx_intrabss_fwd() - API for intrabss fwd. For EAPOL + * pkt with DA not equal to vdev mac addr, fwd is not allowed. + * @soc: core txrx main context + * @ta_peer: source peer entry + * @rx_tlv_hdr: start address of rx tlvs + * @nbuf: nbuf that has to be intrabss forwarded + * @msdu_metadata: msdu metadata + * + * Return: true if it is forwarded else false + */ + +bool dp_rx_intrabss_fwd(struct dp_soc *soc, + struct dp_peer *ta_peer, + uint8_t *rx_tlv_hdr, + qdf_nbuf_t nbuf, + struct hal_rx_msdu_metadata msdu_metadata); + +#ifdef DISABLE_EAPOL_INTRABSS_FWD +/* + * dp_rx_intrabss_fwd_wrapper() - Wrapper API for intrabss fwd. For EAPOL + * pkt with DA not equal to vdev mac addr, fwd is not allowed. + * @soc: core txrx main context + * @ta_peer: source peer entry + * @rx_tlv_hdr: start address of rx tlvs + * @nbuf: nbuf that has to be intrabss forwarded + * @msdu_metadata: msdu metadata + * + * Return: true if it is forwarded else false + */ +static inline +bool dp_rx_intrabss_fwd_wrapper(struct dp_soc *soc, struct dp_peer *ta_peer, + uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, + struct hal_rx_msdu_metadata msdu_metadata) +{ + if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) && + qdf_mem_cmp(qdf_nbuf_data(nbuf) + + QDF_NBUF_DEST_MAC_OFFSET, + ta_peer->vdev->mac_addr.raw, + QDF_MAC_ADDR_SIZE))) { + qdf_nbuf_free(nbuf); + DP_STATS_INC(soc, rx.err.intrabss_eapol_drop, 1); + return true; + } + + return dp_rx_intrabss_fwd(soc, ta_peer, rx_tlv_hdr, nbuf, + msdu_metadata); +} + +#define DP_RX_INTRABSS_FWD(soc, peer, rx_tlv_hdr, nbuf, msdu_metadata) \ + dp_rx_intrabss_fwd_wrapper(soc, peer, rx_tlv_hdr, nbuf, \ + msdu_metadata) +#else /* DISABLE_EAPOL_INTRABSS_FWD */ +#define DP_RX_INTRABSS_FWD(soc, peer, rx_tlv_hdr, nbuf, msdu_metadata) \ + dp_rx_intrabss_fwd(soc, peer, rx_tlv_hdr, nbuf, msdu_metadata) +#endif /* DISABLE_EAPOL_INTRABSS_FWD */ /** * dp_rx_defrag_concat() - Concatenate the fragments @@ -1139,6 +1205,9 @@ static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src) #endif /* QCA_HOST_MODE_WIFI_DISABLED */ #ifndef FEATURE_WDS +void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr, + struct dp_peer *ta_peer, qdf_nbuf_t nbuf); + static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active) { return QDF_STATUS_SUCCESS; @@ -1742,4 +1811,163 @@ static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, } #endif /* FEATURE_MEC */ #endif /* QCA_HOST_MODE_WIFI_DISABLED */ + +#ifdef RECEIVE_OFFLOAD +void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, + qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt); +#else +void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, + qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) +{ +} +#endif + +void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, struct dp_peer *peer, + uint8_t ring_id, + struct cdp_tid_rx_stats *tid_stats); + +void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf); + +uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, + hal_ring_handle_t hal_ring_hdl, + uint32_t num_entries, + bool *near_full); + +#ifdef WLAN_FEATURE_DP_RX_RING_HISTORY +void dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, + hal_ring_desc_t ring_desc); +#else +static inline void +dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, + hal_ring_desc_t ring_desc) +{ +} +#endif + +#ifndef QCA_HOST_MODE_WIFI_DISABLED +#ifdef RX_DESC_SANITY_WAR +QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, + hal_ring_handle_t hal_ring_hdl, + hal_ring_desc_t ring_desc, + struct dp_rx_desc *rx_desc); +#else +static inline +QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, + hal_ring_handle_t hal_ring_hdl, + hal_ring_desc_t ring_desc, + struct dp_rx_desc *rx_desc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef DP_RX_DROP_RAW_FRM +bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf); +#else +static inline +bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) +{ + return false; +} +#endif + +#ifdef RX_DESC_DEBUG_CHECK +QDF_STATUS dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc, + struct dp_rx_desc *rx_desc); +#else +static inline +QDF_STATUS dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc, + struct dp_rx_desc *rx_desc) +{ + return QDF_STATUS_SUCCESS; +} +#endif + +#ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR +void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf); +#else +static inline +void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) +{ +} +#endif + +/** + * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. + * @nbuf: pointer to the first msdu of an amsdu. + * @rx_tlv_hdr: pointer to the start of RX TLV headers. + * + * The ipsumed field of the skb is set based on whether HW validated the + * IP/TCP/UDP checksum. + * + * Return: void + */ +static inline +void dp_rx_cksum_offload(struct dp_pdev *pdev, + qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr) +{ + qdf_nbuf_rx_cksum_t cksum = {0}; + //TODO - Move this to ring desc api + //HAL_RX_MSDU_DESC_IP_CHKSUM_FAIL_GET + //HAL_RX_MSDU_DESC_TCP_UDP_CHKSUM_FAIL_GET + uint32_t ip_csum_err, tcp_udp_csum_er; + + hal_rx_tlv_csum_err_get(pdev->soc->hal_soc, rx_tlv_hdr, &ip_csum_err, + &tcp_udp_csum_er); + + if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) { + cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; + qdf_nbuf_set_rx_cksum(nbuf, &cksum); + } else { + DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err); + DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er); + } +} + +#endif /* QCA_HOST_MODE_WIFI_DISABLED */ + +bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped); +bool dp_rx_enable_eol_data_check(struct dp_soc *soc); +void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf); + +#ifdef QCA_SUPPORT_WDS_EXTENDED +/** + * dp_rx_is_list_ready() - Make different lists for 4-address + and 3-address frames + * @nbuf_head: skb list head + * @vdev: vdev + * @peer: peer + * @peer_id: peer id of new received frame + * @vdev_id: vdev_id of new received frame + * + * Return: true if peer_ids are different. + */ +static inline bool +dp_rx_is_list_ready(qdf_nbuf_t nbuf_head, + struct dp_vdev *vdev, + struct dp_peer *peer, + uint16_t peer_id, + uint8_t vdev_id) +{ + if (nbuf_head && peer && (peer->peer_id != peer_id)) + return true; + + return false; +} +#else +static inline bool +dp_rx_is_list_ready(qdf_nbuf_t nbuf_head, + struct dp_vdev *vdev, + struct dp_peer *peer, + uint16_t peer_id, + uint8_t vdev_id) +{ + if (nbuf_head && vdev && (vdev->vdev_id != vdev_id)) + return true; + + return false; +} +#endif #endif /* _DP_RX_H */ diff --git a/dp/wifi3.0/dp_types.h b/dp/wifi3.0/dp_types.h index a64ae87d23..fc9dfb825c 100644 --- a/dp/wifi3.0/dp_types.h +++ b/dp/wifi3.0/dp_types.h @@ -1544,6 +1544,9 @@ struct dp_arch_ops { void (*tx_comp_get_params_from_hal_desc)(struct dp_soc *soc, void *tx_comp_hal_desc, struct dp_tx_desc_s **desc); + uint32_t (*dp_rx_process)(struct dp_intr *int_ctx, + hal_ring_handle_t hal_ring_hdl, + uint8_t reo_ring_num, uint32_t quota); /* Control Arch Ops */ QDF_STATUS (*txrx_set_vdev_param)(struct dp_soc *soc, struct dp_vdev *vdev, diff --git a/dp/wifi3.0/li/dp_li_rx.c b/dp/wifi3.0/li/dp_li_rx.c new file mode 100644 index 0000000000..afa2495b01 --- /dev/null +++ b/dp/wifi3.0/li/dp_li_rx.c @@ -0,0 +1,781 @@ +/* + * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "cdp_txrx_cmn_struct.h" +#include "hal_hw_headers.h" +#include "dp_types.h" +#include "dp_rx.h" +#include "dp_li_rx.h" +#include "dp_peer.h" +#include "hal_rx.h" +#include "hal_li_rx.h" +#include "hal_api.h" +#include "hal_li_api.h" +#include "qdf_nbuf.h" +#ifdef MESH_MODE_SUPPORT +#include "if_meta_hdr.h" +#endif +#include "dp_internal.h" +#include "dp_rx_mon.h" +#include "dp_ipa.h" +#ifdef FEATURE_WDS +#include "dp_txrx_wds.h" +#endif +#include "dp_hist.h" +#include "dp_rx_buffer_pool.h" + +static inline +bool is_sa_da_idx_valid(struct dp_soc *soc, uint8_t *rx_tlv_hdr, + qdf_nbuf_t nbuf, struct hal_rx_msdu_metadata msdu_info) +{ + if ((qdf_nbuf_is_sa_valid(nbuf) && + (msdu_info.sa_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) || + (!qdf_nbuf_is_da_mcbc(nbuf) && qdf_nbuf_is_da_valid(nbuf) && + (msdu_info.da_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) + return false; + + return true; +} + +/** + * dp_rx_process_li() - Brain of the Rx processing functionality + * Called from the bottom half (tasklet/NET_RX_SOFTIRQ) + * @int_ctx: per interrupt context + * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced + * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring. + * @quota: No. of units (packets) that can be serviced in one shot. + * + * This function implements the core of Rx functionality. This is + * expected to handle only non-error frames. + * + * Return: uint32_t: No. of elements processed + */ +uint32_t dp_rx_process_li(struct dp_intr *int_ctx, + hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, + uint32_t quota) +{ + hal_ring_desc_t ring_desc; + hal_soc_handle_t hal_soc; + struct dp_rx_desc *rx_desc = NULL; + qdf_nbuf_t nbuf, next; + bool near_full; + union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT]; + union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT]; + uint32_t num_pending; + uint32_t rx_bufs_used = 0, rx_buf_cookie; + uint16_t msdu_len = 0; + uint16_t peer_id; + uint8_t vdev_id; + struct dp_peer *peer; + struct dp_vdev *vdev; + uint32_t pkt_len = 0; + struct hal_rx_mpdu_desc_info mpdu_desc_info; + struct hal_rx_msdu_desc_info msdu_desc_info; + enum hal_reo_error_status error; + uint32_t peer_mdata; + uint8_t *rx_tlv_hdr; + uint32_t rx_bufs_reaped[MAX_PDEV_CNT]; + uint8_t mac_id = 0; + struct dp_pdev *rx_pdev; + struct dp_srng *dp_rxdma_srng; + struct rx_desc_pool *rx_desc_pool; + struct dp_soc *soc = int_ctx->soc; + uint8_t core_id = 0; + struct cdp_tid_rx_stats *tid_stats; + qdf_nbuf_t nbuf_head; + qdf_nbuf_t nbuf_tail; + qdf_nbuf_t deliver_list_head; + qdf_nbuf_t deliver_list_tail; + uint32_t num_rx_bufs_reaped = 0; + uint32_t intr_id; + struct hif_opaque_softc *scn; + int32_t tid = 0; + bool is_prev_msdu_last = true; + uint32_t num_entries_avail = 0; + uint32_t rx_ol_pkt_cnt = 0; + uint32_t num_entries = 0; + struct hal_rx_msdu_metadata msdu_metadata; + QDF_STATUS status; + qdf_nbuf_t ebuf_head; + qdf_nbuf_t ebuf_tail; + uint8_t pkt_capture_offload = 0; + + DP_HIST_INIT(); + + qdf_assert_always(soc && hal_ring_hdl); + hal_soc = soc->hal_soc; + qdf_assert_always(hal_soc); + + scn = soc->hif_handle; + hif_pm_runtime_mark_dp_rx_busy(scn); + intr_id = int_ctx->dp_intr_id; + num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl); + +more_data: + /* reset local variables here to be re-used in the function */ + nbuf_head = NULL; + nbuf_tail = NULL; + deliver_list_head = NULL; + deliver_list_tail = NULL; + peer = NULL; + vdev = NULL; + num_rx_bufs_reaped = 0; + ebuf_head = NULL; + ebuf_tail = NULL; + + qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped)); + qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info)); + qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); + qdf_mem_zero(head, sizeof(head)); + qdf_mem_zero(tail, sizeof(tail)); + + if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) { + /* + * Need API to convert from hal_ring pointer to + * Ring Type / Ring Id combo + */ + DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1); + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + FL("HAL RING Access Failed -- %pK"), hal_ring_hdl); + goto done; + } + + /* + * start reaping the buffers from reo ring and queue + * them in per vdev queue. + * Process the received pkts in a different per vdev loop. + */ + while (qdf_likely(quota && + (ring_desc = hal_srng_dst_peek(hal_soc, + hal_ring_hdl)))) { + error = HAL_RX_ERROR_STATUS_GET(ring_desc); + if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) { + dp_rx_err("%pK: HAL RING 0x%pK:error %d", + soc, hal_ring_hdl, error); + DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num], + 1); + /* Don't know how to deal with this -- assert */ + qdf_assert(0); + } + + dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc); + rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc); + status = dp_rx_cookie_check_and_invalidate(ring_desc); + if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { + DP_STATS_INC(soc, rx.err.stale_cookie, 1); + break; + } + + rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie); + status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl, + ring_desc, rx_desc); + if (QDF_IS_STATUS_ERROR(status)) { + if (qdf_unlikely(rx_desc && rx_desc->nbuf)) { + qdf_assert_always(rx_desc->unmapped); + dp_ipa_reo_ctx_buf_mapping_lock(soc, + reo_ring_num); + dp_ipa_handle_rx_buf_smmu_mapping( + soc, + rx_desc->nbuf, + RX_DATA_BUFFER_SIZE, + false); + qdf_nbuf_unmap_nbytes_single( + soc->osdev, + rx_desc->nbuf, + QDF_DMA_FROM_DEVICE, + RX_DATA_BUFFER_SIZE); + rx_desc->unmapped = 1; + dp_ipa_reo_ctx_buf_mapping_unlock(soc, + reo_ring_num); + dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, + rx_desc->pool_id); + dp_rx_add_to_free_desc_list( + &head[rx_desc->pool_id], + &tail[rx_desc->pool_id], + rx_desc); + } + hal_srng_dst_get_next(hal_soc, hal_ring_hdl); + continue; + } + + /* + * this is a unlikely scenario where the host is reaping + * a descriptor which it already reaped just a while ago + * but is yet to replenish it back to HW. + * In this case host will dump the last 128 descriptors + * including the software descriptor rx_desc and assert. + */ + + if (qdf_unlikely(!rx_desc->in_use)) { + DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1); + dp_info_rl("Reaping rx_desc not in use!"); + dp_rx_dump_info_and_assert(soc, hal_ring_hdl, + ring_desc, rx_desc); + /* ignore duplicate RX desc and continue to process */ + /* Pop out the descriptor */ + hal_srng_dst_get_next(hal_soc, hal_ring_hdl); + continue; + } + + status = dp_rx_desc_nbuf_sanity_check(ring_desc, rx_desc); + if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) { + DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1); + dp_info_rl("Nbuf sanity check failure!"); + dp_rx_dump_info_and_assert(soc, hal_ring_hdl, + ring_desc, rx_desc); + rx_desc->in_err_state = 1; + hal_srng_dst_get_next(hal_soc, hal_ring_hdl); + continue; + } + + if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) { + dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie); + DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1); + dp_rx_dump_info_and_assert(soc, hal_ring_hdl, + ring_desc, rx_desc); + } + + /* Get MPDU DESC info */ + hal_rx_mpdu_desc_info_get_li(ring_desc, &mpdu_desc_info); + + /* Get MSDU DESC info */ + hal_rx_msdu_desc_info_get(hal_soc, ring_desc, &msdu_desc_info); + + if (qdf_unlikely(msdu_desc_info.msdu_flags & + HAL_MSDU_F_MSDU_CONTINUATION)) { + /* previous msdu has end bit set, so current one is + * the new MPDU + */ + if (is_prev_msdu_last) { + /* Get number of entries available in HW ring */ + num_entries_avail = + hal_srng_dst_num_valid(hal_soc, + hal_ring_hdl, 1); + + /* For new MPDU check if we can read complete + * MPDU by comparing the number of buffers + * available and number of buffers needed to + * reap this MPDU + */ + if ((msdu_desc_info.msdu_len / + (RX_DATA_BUFFER_SIZE - + soc->rx_pkt_tlv_size) + 1) > + num_entries_avail) { + DP_STATS_INC(soc, + rx.msdu_scatter_wait_break, + 1); + dp_rx_cookie_reset_invalid_bit( + ring_desc); + break; + } + is_prev_msdu_last = false; + } + } + + core_id = smp_processor_id(); + DP_STATS_INC(soc, rx.ring_packets[core_id][reo_ring_num], 1); + + if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT) + qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1); + + if (qdf_unlikely(mpdu_desc_info.mpdu_flags & + HAL_MPDU_F_RAW_AMPDU)) + qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1); + + if (!is_prev_msdu_last && + msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) + is_prev_msdu_last = true; + + /* Pop out the descriptor*/ + hal_srng_dst_get_next(hal_soc, hal_ring_hdl); + + rx_bufs_reaped[rx_desc->pool_id]++; + peer_mdata = mpdu_desc_info.peer_meta_data; + QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) = + DP_PEER_METADATA_PEER_ID_GET(peer_mdata); + QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) = + DP_PEER_METADATA_VDEV_ID_GET(peer_mdata); + + /* to indicate whether this msdu is rx offload */ + pkt_capture_offload = + DP_PEER_METADATA_OFFLOAD_GET(peer_mdata); + + /* + * save msdu flags first, last and continuation msdu in + * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and + * length to nbuf->cb. This ensures the info required for + * per pkt processing is always in the same cache line. + * This helps in improving throughput for smaller pkt + * sizes. + */ + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU) + qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1); + + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) + qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1); + + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU) + qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1); + + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC) + qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1); + + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID) + qdf_nbuf_set_da_valid(rx_desc->nbuf, 1); + + if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID) + qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1); + + qdf_nbuf_set_tid_val(rx_desc->nbuf, + HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc)); + qdf_nbuf_set_rx_reo_dest_ind( + rx_desc->nbuf, + HAL_RX_REO_MSDU_REO_DST_IND_GET(ring_desc)); + + QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len; + + QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num; + + /* + * move unmap after scattered msdu waiting break logic + * in case double skb unmap happened. + */ + rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id]; + dp_ipa_reo_ctx_buf_mapping_lock(soc, reo_ring_num); + dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf, + rx_desc_pool->buf_size, + false); + qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf, + QDF_DMA_FROM_DEVICE, + rx_desc_pool->buf_size); + rx_desc->unmapped = 1; + dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num); + DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head, + ebuf_tail, rx_desc); + /* + * if continuation bit is set then we have MSDU spread + * across multiple buffers, let us not decrement quota + * till we reap all buffers of that MSDU. + */ + if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) + quota -= 1; + + dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id], + &tail[rx_desc->pool_id], rx_desc); + num_rx_bufs_reaped++; + /* + * only if complete msdu is received for scatter case, + * then allow break. + */ + if (is_prev_msdu_last && + dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped)) + break; + } +done: + dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl); + + for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) { + /* + * continue with next mac_id if no pkts were reaped + * from that pool + */ + if (!rx_bufs_reaped[mac_id]) + continue; + + dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id]; + + rx_desc_pool = &soc->rx_desc_buf[mac_id]; + + dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng, + rx_desc_pool, rx_bufs_reaped[mac_id], + &head[mac_id], &tail[mac_id]); + } + + dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]); + /* Peer can be NULL is case of LFR */ + if (qdf_likely(peer)) + vdev = NULL; + + /* + * BIG loop where each nbuf is dequeued from global queue, + * processed and queued back on a per vdev basis. These nbufs + * are sent to stack as and when we run out of nbufs + * or a new nbuf dequeued from global queue has a different + * vdev when compared to previous nbuf. + */ + nbuf = nbuf_head; + while (nbuf) { + next = nbuf->next; + if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) { + nbuf = next; + DP_STATS_INC(soc, rx.err.raw_frm_drop, 1); + continue; + } + + rx_tlv_hdr = qdf_nbuf_data(nbuf); + vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf); + peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf); + + if (dp_rx_is_list_ready(deliver_list_head, vdev, peer, + peer_id, vdev_id)) { + dp_rx_deliver_to_stack(soc, vdev, peer, + deliver_list_head, + deliver_list_tail); + deliver_list_head = NULL; + deliver_list_tail = NULL; + } + + /* Get TID from struct cb->tid_val, save to tid */ + if (qdf_nbuf_is_rx_chfrag_start(nbuf)) + tid = qdf_nbuf_get_tid_val(nbuf); + + if (qdf_unlikely(!peer)) { + peer = dp_peer_get_ref_by_id(soc, peer_id, + DP_MOD_ID_RX); + } else if (peer && peer->peer_id != peer_id) { + dp_peer_unref_delete(peer, DP_MOD_ID_RX); + peer = dp_peer_get_ref_by_id(soc, peer_id, + DP_MOD_ID_RX); + } + + if (peer) { + QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false; + qdf_dp_trace_set_track(nbuf, QDF_RX); + QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1; + QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) = + QDF_NBUF_RX_PKT_DATA_TRACK; + } + + rx_bufs_used++; + + if (qdf_likely(peer)) { + vdev = peer->vdev; + } else { + nbuf->next = NULL; + dp_rx_deliver_to_pkt_capture_no_peer( + soc, nbuf, pkt_capture_offload); + if (!pkt_capture_offload) + dp_rx_deliver_to_stack_no_peer(soc, nbuf); + nbuf = next; + continue; + } + + if (qdf_unlikely(!vdev)) { + qdf_nbuf_free(nbuf); + nbuf = next; + DP_STATS_INC(soc, rx.err.invalid_vdev, 1); + continue; + } + + /* when hlos tid override is enabled, save tid in + * skb->priority + */ + if (qdf_unlikely(vdev->skip_sw_tid_classification & + DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) + qdf_nbuf_set_priority(nbuf, tid); + + rx_pdev = vdev->pdev; + DP_RX_TID_SAVE(nbuf, tid); + if (qdf_unlikely(rx_pdev->delay_stats_flag) || + qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled( + soc->wlan_cfg_ctx))) + qdf_nbuf_set_timestamp(nbuf); + + tid_stats = + &rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid]; + + /* + * Check if DMA completed -- msdu_done is the last bit + * to be written + */ + if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) && + !hal_rx_attn_msdu_done_get_li(rx_tlv_hdr))) { + dp_err("MSDU DONE failure"); + DP_STATS_INC(soc, rx.err.msdu_done_fail, 1); + hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr, + QDF_TRACE_LEVEL_INFO); + tid_stats->fail_cnt[MSDU_DONE_FAILURE]++; + qdf_nbuf_free(nbuf); + qdf_assert(0); + nbuf = next; + continue; + } + + DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id); + /* + * First IF condition: + * 802.11 Fragmented pkts are reinjected to REO + * HW block as SG pkts and for these pkts we only + * need to pull the RX TLVS header length. + * Second IF condition: + * The below condition happens when an MSDU is spread + * across multiple buffers. This can happen in two cases + * 1. The nbuf size is smaller then the received msdu. + * ex: we have set the nbuf size to 2048 during + * nbuf_alloc. but we received an msdu which is + * 2304 bytes in size then this msdu is spread + * across 2 nbufs. + * + * 2. AMSDUs when RAW mode is enabled. + * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread + * across 1st nbuf and 2nd nbuf and last MSDU is + * spread across 2nd nbuf and 3rd nbuf. + * + * for these scenarios let us create a skb frag_list and + * append these buffers till the last MSDU of the AMSDU + * Third condition: + * This is the most likely case, we receive 802.3 pkts + * decapsulated by HW, here we need to set the pkt length. + */ + hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata); + if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) { + bool is_mcbc, is_sa_vld, is_da_vld; + + is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, + rx_tlv_hdr); + is_sa_vld = + hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, + rx_tlv_hdr); + is_da_vld = + hal_rx_msdu_end_da_is_valid_get(soc->hal_soc, + rx_tlv_hdr); + + qdf_nbuf_set_da_mcbc(nbuf, is_mcbc); + qdf_nbuf_set_da_valid(nbuf, is_da_vld); + qdf_nbuf_set_sa_valid(nbuf, is_sa_vld); + + qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size); + } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) { + msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); + nbuf = dp_rx_sg_create(soc, nbuf); + next = nbuf->next; + + if (qdf_nbuf_is_raw_frame(nbuf)) { + DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1); + DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len); + } else { + qdf_nbuf_free(nbuf); + DP_STATS_INC(soc, rx.err.scatter_msdu, 1); + dp_info_rl("scatter msdu len %d, dropped", + msdu_len); + nbuf = next; + continue; + } + } else { + msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); + pkt_len = msdu_len + + msdu_metadata.l3_hdr_pad + + soc->rx_pkt_tlv_size; + + qdf_nbuf_set_pktlen(nbuf, pkt_len); + dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad); + } + + /* + * process frame for mulitpass phrase processing + */ + if (qdf_unlikely(vdev->multipass_en)) { + if (dp_rx_multipass_process(peer, nbuf, tid) == false) { + DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1); + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } + } + + if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) { + dp_rx_err("%pK: Policy Check Drop pkt", soc); + tid_stats->fail_cnt[POLICY_CHECK_DROP]++; + /* Drop & free packet */ + qdf_nbuf_free(nbuf); + /* Statistics */ + nbuf = next; + continue; + } + + if (qdf_unlikely(peer && (peer->nawds_enabled) && + (qdf_nbuf_is_da_mcbc(nbuf)) && + (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc, + rx_tlv_hdr) == + false))) { + tid_stats->fail_cnt[NAWDS_MCAST_DROP]++; + DP_STATS_INC(peer, rx.nawds_mcast_drop, 1); + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } + + /* + * Drop non-EAPOL frames from unauthorized peer. + */ + if (qdf_likely(peer) && qdf_unlikely(!peer->authorize)) { + bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) || + qdf_nbuf_is_ipv4_wapi_pkt(nbuf); + + if (!is_eapol) { + DP_STATS_INC(soc, + rx.err.peer_unauth_rx_pkt_drop, + 1); + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } + } + + if (soc->process_rx_status) + dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr); + + /* Update the protocol tag in SKB based on CCE metadata */ + dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr, + reo_ring_num, false, true); + + /* Update the flow tag in SKB based on FSE metadata */ + dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); + + dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, + reo_ring_num, tid_stats); + + if (qdf_unlikely(vdev->mesh_vdev)) { + if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr) + == QDF_STATUS_SUCCESS) { + dp_rx_info("%pK: mesh pkt filtered", soc); + tid_stats->fail_cnt[MESH_FILTER_DROP]++; + DP_STATS_INC(vdev->pdev, dropped.mesh_filter, + 1); + + qdf_nbuf_free(nbuf); + nbuf = next; + continue; + } + dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer); + } + + if (qdf_likely(vdev->rx_decap_type == + htt_cmn_pkt_type_ethernet) && + qdf_likely(!vdev->mesh_vdev)) { + /* WDS Destination Address Learning */ + dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf); + + /* Due to HW issue, sometimes we see that the sa_idx + * and da_idx are invalid with sa_valid and da_valid + * bits set + * + * in this case we also see that value of + * sa_sw_peer_id is set as 0 + * + * Drop the packet if sa_idx and da_idx OOB or + * sa_sw_peerid is 0 + */ + if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf, + msdu_metadata)) { + qdf_nbuf_free(nbuf); + nbuf = next; + DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1); + continue; + } + /* WDS Source Port Learning */ + if (qdf_likely(vdev->wds_enabled)) + dp_rx_wds_srcport_learn(soc, + rx_tlv_hdr, + peer, + nbuf, + msdu_metadata); + + /* Intrabss-fwd */ + if (dp_rx_check_ap_bridge(vdev)) + if (DP_RX_INTRABSS_FWD(soc, peer, rx_tlv_hdr, + nbuf, msdu_metadata)) { + nbuf = next; + tid_stats->intrabss_cnt++; + continue; /* Get next desc */ + } + } + + dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt); + + dp_rx_update_stats(soc, nbuf); + DP_RX_LIST_APPEND(deliver_list_head, + deliver_list_tail, + nbuf); + DP_STATS_INC_PKT(peer, rx.to_stack, 1, + QDF_NBUF_CB_RX_PKT_LEN(nbuf)); + if (qdf_unlikely(peer->in_twt)) + DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1, + QDF_NBUF_CB_RX_PKT_LEN(nbuf)); + + tid_stats->delivered_to_stack++; + nbuf = next; + } + + if (qdf_likely(deliver_list_head)) { + if (qdf_likely(peer)) { + dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id, + pkt_capture_offload, + deliver_list_head); + if (!pkt_capture_offload) + dp_rx_deliver_to_stack(soc, vdev, peer, + deliver_list_head, + deliver_list_tail); + } else { + nbuf = deliver_list_head; + while (nbuf) { + next = nbuf->next; + nbuf->next = NULL; + dp_rx_deliver_to_stack_no_peer(soc, nbuf); + nbuf = next; + } + } + } + + if (qdf_likely(peer)) + dp_peer_unref_delete(peer, DP_MOD_ID_RX); + + if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) { + if (quota) { + num_pending = + dp_rx_srng_get_num_pending(hal_soc, + hal_ring_hdl, + num_entries, + &near_full); + if (num_pending) { + DP_STATS_INC(soc, rx.hp_oos2, 1); + + if (!hif_exec_should_yield(scn, intr_id)) + goto more_data; + + if (qdf_unlikely(near_full)) { + DP_STATS_INC(soc, rx.near_full, 1); + goto more_data; + } + } + } + + if (vdev && vdev->osif_fisa_flush) + vdev->osif_fisa_flush(soc, reo_ring_num); + + if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) { + vdev->osif_gro_flush(vdev->osif_vdev, + reo_ring_num); + } + } + + /* Update histogram statistics by looping through pdev's */ + DP_RX_HIST_STATS_PER_PDEV(); + + return rx_bufs_used; /* Assume no scale factor for now */ +} diff --git a/dp/wifi3.0/li/dp_li_rx.h b/dp/wifi3.0/li/dp_li_rx.h new file mode 100644 index 0000000000..47f6d006b4 --- /dev/null +++ b/dp/wifi3.0/li/dp_li_rx.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _DP_LI_RX_H_ +#define _DP_LI_RX_H_ + +#include +#include "dp_li.h" + +uint32_t dp_rx_process_li(struct dp_intr *int_ctx, + hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, + uint32_t quota); +#endif diff --git a/hal/wifi3.0/li/hal_li_generic_api.c b/hal/wifi3.0/li/hal_li_generic_api.c index d764b722e7..5530ff8393 100644 --- a/hal/wifi3.0/li/hal_li_generic_api.c +++ b/hal/wifi3.0/li/hal_li_generic_api.c @@ -465,23 +465,6 @@ static uint16_t hal_rx_attn_phy_ppdu_id_get_li(uint8_t *buf) return phy_ppdu_id; } -/** - * hal_rx_attn_msdu_done_get_li() - Get msdi done flag from RX TLV - * @buf: RX tlv address - * - * Return: msdu done flag - */ -static uint32_t hal_rx_attn_msdu_done_get_li(uint8_t *buf) -{ - struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; - struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; - uint32_t msdu_done; - - msdu_done = HAL_RX_ATTN_MSDU_DONE_GET(rx_attn); - - return msdu_done; -} - /** * hal_rx_msdu_start_msdu_len_get(): API to get the MSDU length * from rx_msdu_start TLV @@ -936,26 +919,6 @@ hal_rx_tlv_csum_err_get_li(uint8_t *rx_tlv_hdr, uint32_t *ip_csum_err, *tcp_udp_csum_err = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr); } -void hal_rx_mpdu_desc_info_get_li(void *desc_addr, - void *mpdu_desc_info_hdl) -{ - struct reo_destination_ring *reo_dst_ring; - struct hal_rx_mpdu_desc_info *mpdu_desc_info = - (struct hal_rx_mpdu_desc_info *)mpdu_desc_info_hdl; - uint32_t *mpdu_info; - - reo_dst_ring = (struct reo_destination_ring *)desc_addr; - - mpdu_info = (uint32_t *)&reo_dst_ring->rx_mpdu_desc_info_details; - - mpdu_desc_info->msdu_count = HAL_RX_MPDU_MSDU_COUNT_GET(mpdu_info); - mpdu_desc_info->mpdu_seq = HAL_RX_MPDU_SEQUENCE_NUMBER_GET(mpdu_info); - mpdu_desc_info->mpdu_flags = HAL_RX_MPDU_FLAGS_GET(mpdu_info); - mpdu_desc_info->peer_meta_data = - HAL_RX_MPDU_DESC_PEER_META_DATA_GET(mpdu_info); - mpdu_desc_info->bar_frame = HAL_RX_MPDU_BAR_FRAME_GET(mpdu_info); -} - static uint8_t hal_rx_err_status_get_li(hal_ring_desc_t rx_desc) { return HAL_RX_ERROR_STATUS_GET(rx_desc); diff --git a/hal/wifi3.0/li/hal_li_rx.h b/hal/wifi3.0/li/hal_li_rx.h index ec2f1b4650..748d2276ec 100644 --- a/hal/wifi3.0/li/hal_li_rx.h +++ b/hal/wifi3.0/li/hal_li_rx.h @@ -1218,6 +1218,44 @@ uint8_t hal_rx_get_rx_more_frag_bit(uint8_t *buf) return frame_ctrl; } +static inline +void hal_rx_mpdu_desc_info_get_li(void *desc_addr, + void *mpdu_desc_info_hdl) +{ + struct reo_destination_ring *reo_dst_ring; + struct hal_rx_mpdu_desc_info *mpdu_desc_info = + (struct hal_rx_mpdu_desc_info *)mpdu_desc_info_hdl; + uint32_t *mpdu_info; + + reo_dst_ring = (struct reo_destination_ring *)desc_addr; + + mpdu_info = (uint32_t *)&reo_dst_ring->rx_mpdu_desc_info_details; + + mpdu_desc_info->msdu_count = HAL_RX_MPDU_MSDU_COUNT_GET(mpdu_info); + mpdu_desc_info->mpdu_seq = HAL_RX_MPDU_SEQUENCE_NUMBER_GET(mpdu_info); + mpdu_desc_info->mpdu_flags = HAL_RX_MPDU_FLAGS_GET(mpdu_info); + mpdu_desc_info->peer_meta_data = + HAL_RX_MPDU_DESC_PEER_META_DATA_GET(mpdu_info); + mpdu_desc_info->bar_frame = HAL_RX_MPDU_BAR_FRAME_GET(mpdu_info); +} + +/** + * hal_rx_attn_msdu_done_get_li() - Get msdi done flag from RX TLV + * @buf: RX tlv address + * + * Return: msdu done flag + */ +static inline uint32_t hal_rx_attn_msdu_done_get_li(uint8_t *buf) +{ + struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf; + struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn; + uint32_t msdu_done; + + msdu_done = HAL_RX_ATTN_MSDU_DONE_GET(rx_attn); + + return msdu_done; +} + #define HAL_RX_MSDU_START_NSS_GET(_rx_msdu_start) \ (_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start), \ RX_MSDU_START_5_NSS_OFFSET)), \