123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023 |
- /*
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
- #include "dp_types.h"
- #include "hal_rx.h"
- #include "hal_api.h"
- #include "qdf_trace.h"
- #include "qdf_nbuf.h"
- #include "hal_api_mon.h"
- #include "dp_rx.h"
- #include "dp_rx_mon.h"
- #include "dp_internal.h"
- #include "dp_htt.h"
- #include "dp_full_mon.h"
- #include "qdf_mem.h"
- #ifdef QCA_SUPPORT_FULL_MON
- uint32_t
- dp_rx_mon_status_process(struct dp_soc *soc,
- struct dp_intr *int_ctx,
- uint32_t mac_id,
- uint32_t quota);
- /*
- * dp_rx_mon_status_buf_validate () - Validate first monitor status buffer addr
- * against status buf addr given in monitor destination ring
- *
- * @pdev: DP pdev handle
- * @int_ctx: Interrupt context
- * @mac_id: lmac id
- *
- * Return: QDF_STATUS
- */
- static inline enum dp_mon_reap_status
- dp_rx_mon_status_buf_validate(struct dp_pdev *pdev,
- struct dp_intr *int_ctx,
- uint32_t mac_id)
- {
- struct dp_soc *soc = pdev->soc;
- hal_soc_handle_t hal_soc;
- void *mon_status_srng;
- void *ring_entry;
- uint32_t rx_buf_cookie;
- qdf_nbuf_t status_nbuf;
- struct dp_rx_desc *rx_desc;
- uint64_t buf_paddr;
- struct rx_desc_pool *rx_desc_pool;
- uint32_t tlv_tag;
- void *rx_tlv;
- struct hal_rx_ppdu_info *ppdu_info;
- enum dp_mon_reap_status status = DP_MON_STATUS_MATCH;
- QDF_STATUS buf_status;
- uint32_t ppdu_id_diff;
- mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng;
- qdf_assert(mon_status_srng);
- if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) {
- QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
- "%s %d : HAL Monitor Status Ring Init Failed -- %pK",
- __func__, __LINE__, mon_status_srng);
- QDF_ASSERT(0);
- return status;
- }
- hal_soc = soc->hal_soc;
- qdf_assert(hal_soc);
- if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng))) {
- QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
- "%s %d : HAL SRNG access Failed -- %pK",
- __func__, __LINE__, mon_status_srng);
- QDF_ASSERT(0);
- return status;
- }
- ring_entry = hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng);
- if (!ring_entry) {
- QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
- "%s %d : HAL SRNG entry is NULL srng:-- %pK",
- __func__, __LINE__, mon_status_srng);
- status = DP_MON_STATUS_REPLENISH;
- goto done;
- }
- ppdu_info = &pdev->ppdu_info;
- rx_desc_pool = &soc->rx_desc_status[mac_id];
- buf_paddr = (HAL_RX_BUFFER_ADDR_31_0_GET(ring_entry) |
- ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(ring_entry)) << 32));
- if (!buf_paddr) {
- QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
- "%s %d : buf addr is NULL -- %pK",
- __func__, __LINE__, mon_status_srng);
- status = DP_MON_STATUS_REPLENISH;
- goto done;
- }
- rx_buf_cookie = HAL_RX_BUF_COOKIE_GET(ring_entry);
- rx_desc = dp_rx_cookie_2_va_mon_status(soc, rx_buf_cookie);
- qdf_assert(rx_desc);
- status_nbuf = rx_desc->nbuf;
- qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
- QDF_DMA_FROM_DEVICE);
- rx_tlv = qdf_nbuf_data(status_nbuf);
- buf_status = hal_get_rx_status_done(rx_tlv);
- /* If status buffer DMA is not done,
- * hold on to mon destination ring.
- */
- if (buf_status != QDF_STATUS_SUCCESS) {
- QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
- FL("Monitor status ring: DMA is not done "
- "for nbuf: %pK buf_addr: %llx"),
- status_nbuf, buf_paddr);
- status = dp_rx_mon_handle_status_buf_done(pdev,
- mon_status_srng);
- if (status == DP_MON_STATUS_REPLENISH) {
- union dp_rx_desc_list_elem_t *desc_list = NULL;
- union dp_rx_desc_list_elem_t *tail = NULL;
- /* If this is DMA not done WAR case, unmap and
- * free buffer and current SW descriptor
- * and make buf_addr_info NULL, so that call to
- * dp_rx_mon_status_process() replenishes entry to
- * status ring
- */
- qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf,
- QDF_DMA_FROM_DEVICE,
- rx_desc_pool->buf_size);
- qdf_nbuf_free(status_nbuf);
- dp_rx_add_to_free_desc_list(&desc_list,
- &tail, rx_desc);
- dp_rx_add_desc_list_to_free_list(soc, &desc_list,
- &tail, mac_id, rx_desc_pool);
- hal_rxdma_buff_addr_info_set(
- ring_entry,
- 0, 0, HAL_RX_BUF_RBM_SW3_BM);
- }
- goto done;
- }
- qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf,
- QDF_DMA_FROM_DEVICE,
- rx_desc_pool->buf_size);
- rx_tlv = hal_rx_status_get_next_tlv(rx_tlv);
- tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv);
- if (tlv_tag == WIFIRX_PPDU_START_E) {
- rx_tlv = (uint8_t *)rx_tlv + HAL_RX_TLV32_HDR_SIZE;
- pdev->mon_desc->status_ppdu_id =
- HAL_RX_GET(rx_tlv, RX_PPDU_START_0, PHY_PPDU_ID);
- pdev->status_buf_addr = buf_paddr;
- }
- if (pdev->mon_desc->ppdu_id < pdev->mon_desc->status_ppdu_id) {
- status = DP_MON_STATUS_LEAD;
- /* For wrap around case */
- ppdu_id_diff = pdev->mon_desc->status_ppdu_id -
- pdev->mon_desc->ppdu_id;
- if (ppdu_id_diff > DP_RX_MON_PPDU_ID_WRAP)
- status = DP_MON_STATUS_LAG;
- } else if (pdev->mon_desc->ppdu_id > pdev->mon_desc->status_ppdu_id) {
- status = DP_MON_STATUS_LAG;
- /* For wrap around case */
- ppdu_id_diff = pdev->mon_desc->ppdu_id -
- pdev->mon_desc->status_ppdu_id;
- if (ppdu_id_diff > DP_RX_MON_PPDU_ID_WRAP)
- status = DP_MON_STATUS_LEAD;
- }
- if ((pdev->mon_desc->status_buf.paddr != buf_paddr) ||
- (pdev->mon_desc->ppdu_id != pdev->mon_desc->status_ppdu_id)) {
- QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
- FL("Monitor: PPDU id or status buf_addr mismatch "
- "status_ppdu_id: %d dest_ppdu_id: %d "
- "status_addr: %llx status_buf_cookie: %d "
- "dest_addr: %llx tlv_tag: %d"
- " status_nbuf: %pK pdev->hold_mon_dest: %d"),
- pdev->mon_desc->status_ppdu_id,
- pdev->mon_desc->ppdu_id, pdev->status_buf_addr,
- rx_buf_cookie,
- pdev->mon_desc->status_buf.paddr, tlv_tag,
- status_nbuf, pdev->hold_mon_dest_ring);
- }
- done:
- hal_srng_access_end(hal_soc, mon_status_srng);
- return status;
- }
- /*
- * dp_rx_mon_prepare_mon_mpdu () - API to prepare dp_mon_mpdu object
- *
- * @pdev: DP pdev object
- * @head_msdu: Head msdu
- * @tail_msdu: Tail msdu
- *
- */
- static inline struct dp_mon_mpdu *
- dp_rx_mon_prepare_mon_mpdu(struct dp_pdev *pdev,
- qdf_nbuf_t head_msdu,
- qdf_nbuf_t tail_msdu)
- {
- struct dp_mon_mpdu *mon_mpdu = NULL;
- mon_mpdu = qdf_mem_malloc(sizeof(struct dp_mon_mpdu));
- if (!mon_mpdu) {
- QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
- FL("Monitor MPDU object allocation failed -- %pK"),
- pdev);
- qdf_assert_always(0);
- }
- mon_mpdu->head = head_msdu;
- mon_mpdu->tail = tail_msdu;
- mon_mpdu->rs_flags = pdev->ppdu_info.rx_status.rs_flags;
- mon_mpdu->ant_signal_db = pdev->ppdu_info.rx_status.ant_signal_db;
- mon_mpdu->is_stbc = pdev->ppdu_info.rx_status.is_stbc;
- mon_mpdu->sgi = pdev->ppdu_info.rx_status.sgi;
- mon_mpdu->beamformed = pdev->ppdu_info.rx_status.beamformed;
- return mon_mpdu;
- }
- static inline void
- dp_rx_mon_drop_ppdu(struct dp_pdev *pdev, uint32_t mac_id)
- {
- struct dp_mon_mpdu *mpdu = NULL;
- struct dp_mon_mpdu *temp_mpdu = NULL;
- qdf_nbuf_t mon_skb, skb_next;
- if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
- TAILQ_FOREACH_SAFE(mpdu,
- &pdev->mon_mpdu_q,
- mpdu_list_elem,
- temp_mpdu) {
- TAILQ_REMOVE(&pdev->mon_mpdu_q,
- mpdu, mpdu_list_elem);
- mon_skb = mpdu->head;
- while (mon_skb) {
- skb_next = qdf_nbuf_next(mon_skb);
- QDF_TRACE(QDF_MODULE_ID_DP,
- QDF_TRACE_LEVEL_DEBUG,
- "[%s][%d] mon_skb=%pK len %u"
- " __func__, __LINE__",
- mon_skb, mon_skb->len);
- qdf_nbuf_free(mon_skb);
- mon_skb = skb_next;
- }
- qdf_mem_free(mpdu);
- }
- }
- pdev->mon_desc->drop_ppdu = 0;
- }
- /*
- * dp_rx_monitor_deliver_ppdu () - API to deliver all MPDU for a MPDU
- * to upper layer stack
- *
- * @soc: DP soc handle
- * @pdev: pdev
- * @mac_id: lmac id
- */
- static inline QDF_STATUS
- dp_rx_monitor_deliver_ppdu(struct dp_soc *soc,
- struct dp_pdev *pdev,
- uint32_t mac_id)
- {
- struct dp_mon_mpdu *mpdu = NULL;
- struct dp_mon_mpdu *temp_mpdu = NULL;
- if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
- TAILQ_FOREACH_SAFE(mpdu,
- &pdev->mon_mpdu_q,
- mpdu_list_elem,
- temp_mpdu) {
- TAILQ_REMOVE(&pdev->mon_mpdu_q,
- mpdu, mpdu_list_elem);
- /* Check for IEEE80211_AMSDU_FLAG in mpdu
- * and set in pdev->ppdu_info.rx_status
- */
- HAL_RX_SET_MSDU_AGGREGATION(mpdu,
- &(pdev->ppdu_info.rx_status));
- pdev->ppdu_info.rx_status.ant_signal_db =
- mpdu->ant_signal_db;
- pdev->ppdu_info.rx_status.is_stbc = mpdu->is_stbc;
- pdev->ppdu_info.rx_status.sgi = mpdu->sgi;
- pdev->ppdu_info.rx_status.beamformed = mpdu->beamformed;
- dp_rx_mon_deliver(soc, mac_id,
- mpdu->head, mpdu->tail);
- qdf_mem_free(mpdu);
- }
- }
- return QDF_STATUS_SUCCESS;
- }
- /**
- * dp_rx_mon_reap_status_ring () - Reap status_buf_count of status buffers for
- * status ring.
- *
- * @soc: DP soc handle
- * @pdev: pdev
- * @int_ctx: interrupt context
- * @mac_id: mac id on which interrupt is received
- * @quota: number of status ring entries to be reaped
- * @desc_info: Rx ppdu desc info
- */
- static inline uint32_t
- dp_rx_mon_reap_status_ring(struct dp_soc *soc,
- struct dp_pdev *pdev,
- struct dp_intr *int_ctx,
- uint32_t mac_id,
- uint32_t quota,
- struct hal_rx_mon_desc_info *desc_info)
- {
- uint8_t status_buf_count;
- uint32_t work_done = 0;
- enum dp_mon_reap_status status;
- status_buf_count = desc_info->status_buf_count;
- desc_info->drop_ppdu = false;
- status = dp_rx_mon_status_buf_validate(pdev, int_ctx, mac_id);
- switch (status) {
- case DP_MON_STATUS_NO_DMA:
- /* If DMA is not done for status ring entry,
- * hold on to monitor destination ring and
- * deliver current ppdu data once DMA is done.
- */
- pdev->hold_mon_dest_ring = true;
- break;
- case DP_MON_STATUS_LAG:
- /* If status_ppdu_id is lagging behind destination,
- * a. Hold on to destination ring
- * b. Drop status ppdus until ppdu id matches
- * c. Increment stats for ppdu_id mismatch and
- * status ppdu drop
- */
- pdev->hold_mon_dest_ring = true;
- pdev->rx_mon_stats.ppdu_id_mismatch++;
- pdev->rx_mon_stats.status_ppdu_drop++;
- break;
- case DP_MON_STATUS_LEAD:
- /* If status_ppdu_id is leading ahead destination,
- * a. Drop destination ring ppdu until ppdu_id matches
- * b. Unhold monitor destination ring so status ppdus
- * can be dropped.
- * c. Increment stats for ppdu_id mismatch and
- * destination ppdu drop
- */
- desc_info->drop_ppdu = true;
- pdev->hold_mon_dest_ring = false;
- pdev->rx_mon_stats.ppdu_id_mismatch++;
- pdev->rx_mon_stats.dest_ppdu_drop++;
- break;
- case DP_MON_STATUS_REPLENISH:
- /* If status ring hp entry is NULL, replenish it */
- work_done = dp_rx_mon_status_process(soc, int_ctx, mac_id, 1);
- break;
- case DP_MON_STATUS_MATCH:
- /* If status ppdu id matches with destnation,
- * unhold monitor destination ring and deliver ppdu
- */
- pdev->hold_mon_dest_ring = false;
- break;
- default:
- dp_err("mon reap status is not supported");
- }
- /* If status ring is lagging behind detination ring,
- * reap only one status buffer
- */
- if (status == DP_MON_STATUS_LAG)
- status_buf_count = 1;
- if (status == DP_MON_STATUS_LAG ||
- status == DP_MON_STATUS_MATCH) {
- work_done = dp_rx_mon_status_process(soc,
- int_ctx,
- mac_id,
- status_buf_count);
- }
- return work_done;
- }
- /**
- * dp_rx_mon_mpdu_reap () - This API reaps a mpdu from mon dest ring descriptor
- * and returns link descriptor to HW (WBM)
- *
- * @soc: DP soc handle
- * @pdev: pdev
- * @mac_id: lmac id
- * @ring_desc: SW monitor ring desc
- * @head_msdu: nbuf pointing to first msdu in a chain
- * @tail_msdu: nbuf pointing to last msdu in a chain
- * @head_desc: head pointer to free desc list
- * @tail_desc: tail pointer to free desc list
- *
- * Return: number of reaped buffers
- */
- static inline uint32_t
- dp_rx_mon_mpdu_reap(struct dp_soc *soc, struct dp_pdev *pdev, uint32_t mac_id,
- void *ring_desc, qdf_nbuf_t *head_msdu,
- qdf_nbuf_t *tail_msdu,
- union dp_rx_desc_list_elem_t **head_desc,
- union dp_rx_desc_list_elem_t **tail_desc)
- {
- struct dp_rx_desc *rx_desc = NULL;
- struct hal_rx_msdu_list msdu_list;
- uint32_t rx_buf_reaped = 0;
- uint16_t num_msdus = 0, msdu_index, rx_hdr_tlv_len, l3_hdr_pad;
- uint32_t total_frag_len = 0, frag_len = 0;
- bool drop_mpdu = false;
- bool msdu_frag = false, is_first_msdu = true, is_frag_non_raw = false;
- void *link_desc_va;
- uint8_t *rx_tlv_hdr;
- qdf_nbuf_t msdu = NULL, last_msdu = NULL;
- uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
- struct hal_rx_mon_desc_info *desc_info;
- uint16_t prev_ppdu_id;
- struct rx_desc_pool *rx_desc_pool = NULL;
- desc_info = pdev->mon_desc;
- /* Restore previous ppdu_id to use it while doing
- * status buffer validation
- */
- prev_ppdu_id = pdev->mon_desc->status_ppdu_id;
- qdf_mem_zero(desc_info, sizeof(struct hal_rx_mon_desc_info));
- pdev->mon_desc->status_ppdu_id = prev_ppdu_id;
- /* Read SW Mon ring descriptor */
- hal_rx_sw_mon_desc_info_get((struct hal_soc *)soc->hal_soc,
- ring_desc,
- (void *)desc_info);
- /* If end_of_ppdu is 1, return*/
- if (desc_info->end_of_ppdu)
- return rx_buf_reaped;
- /* If there is rxdma error, drop mpdu */
- if (qdf_unlikely(dp_rx_mon_is_rxdma_error(desc_info)
- == QDF_STATUS_SUCCESS)) {
- drop_mpdu = true;
- pdev->rx_mon_stats.dest_mpdu_drop++;
- }
- /*
- * while loop iterates through all link descriptors and
- * reaps msdu_count number of msdus for one SW_MONITOR_RING descriptor
- * and forms nbuf queue.
- */
- while (desc_info->msdu_count && desc_info->link_desc.paddr) {
- link_desc_va = dp_rx_cookie_2_mon_link_desc(pdev,
- desc_info->link_desc,
- mac_id);
- qdf_assert_always(link_desc_va);
- hal_rx_msdu_list_get(soc->hal_soc,
- link_desc_va,
- &msdu_list,
- &num_msdus);
- for (msdu_index = 0; msdu_index < num_msdus; msdu_index++) {
- rx_desc = dp_rx_get_mon_desc(soc,
- msdu_list.sw_cookie[msdu_index]);
- qdf_assert_always(rx_desc);
- msdu = DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc);
- if (rx_desc->unmapped == 0) {
- rx_desc_pool = dp_rx_get_mon_desc_pool(
- soc, mac_id, pdev->pdev_id);
- dp_rx_mon_buffer_unmap(soc, rx_desc,
- rx_desc_pool->buf_size);
- rx_desc->unmapped = 1;
- }
- if (drop_mpdu) {
- dp_rx_mon_buffer_free(rx_desc);
- msdu = NULL;
- /*
- * Dont rely on msdu_cnt in case of rxdma error
- * Dont decrement msdu_cnt
- */
- goto next_msdu;
- }
- rx_tlv_hdr = dp_rx_mon_get_buffer_data(rx_desc);
- if (is_first_msdu) {
- if (dp_rx_mon_alloc_parent_buffer(head_msdu)
- != QDF_STATUS_SUCCESS) {
- DP_STATS_INC(pdev,
- replenish.nbuf_alloc_fail,
- 1);
- qdf_frag_free(rx_tlv_hdr);
- QDF_TRACE(QDF_MODULE_ID_DP,
- QDF_TRACE_LEVEL_DEBUG,
- "[%s] failed to allocate parent buffer to hold all frag",
- __func__);
- drop_mpdu = true;
- desc_info->msdu_count--;
- goto next_msdu;
- }
- is_first_msdu = false;
- }
- if (hal_rx_desc_is_first_msdu(soc->hal_soc,
- rx_tlv_hdr))
- hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc,
- rx_tlv_hdr,
- &pdev->ppdu_info.rx_status);
- /** If msdu is fragmented, spread across multiple
- * buffers
- * a. calculate len of each fragmented buffer
- * b. calculate the number of fragmented buffers for
- * a msdu and decrement one msdu_count
- */
- dp_rx_mon_parse_desc_buffer(soc,
- &(msdu_list.msdu_info[msdu_index]),
- &msdu_frag,
- &total_frag_len,
- &frag_len,
- &l3_hdr_pad,
- rx_tlv_hdr,
- &is_frag_non_raw, rx_tlv_hdr);
- if (!msdu_frag)
- desc_info->msdu_count--;
- rx_hdr_tlv_len = SIZE_OF_MONITOR_TLV;
- /*
- * HW structures call this L3 header padding.
- * this is actually the offset
- * from the buffer beginning where the L2
- * header begins.
- */
- /*******************************************************
- * RX_PACKET *
- * ----------------------------------------------------*
- | RX_PKT_TLVS | L3 Padding header | msdu data| |
- * ----------------------------------------------------*
- ******************************************************/
- dp_rx_mon_buffer_set_pktlen(msdu,
- rx_hdr_tlv_len +
- l3_hdr_pad +
- frag_len);
- if (dp_rx_mon_add_msdu_to_list(head_msdu, msdu,
- &last_msdu,
- rx_tlv_hdr, frag_len,
- l3_hdr_pad)
- != QDF_STATUS_SUCCESS) {
- dp_rx_mon_add_msdu_to_list_failure_handler(rx_tlv_hdr,
- pdev, &last_msdu, head_msdu,
- tail_msdu, __func__);
- drop_mpdu = true;
- goto next_msdu;
- }
- next_msdu:
- rx_buf_reaped++;
- dp_rx_add_to_free_desc_list(head_desc,
- tail_desc,
- rx_desc);
- QDF_TRACE(QDF_MODULE_ID_DP,
- QDF_TRACE_LEVEL_DEBUG,
- FL("%s total_len %u frag_len %u flags %u"),
- total_frag_len, frag_len,
- msdu_list.msdu_info[msdu_index].msdu_flags);
- }
- hal_rxdma_buff_addr_info_set(rx_link_buf_info,
- desc_info->link_desc.paddr,
- desc_info->link_desc.sw_cookie,
- desc_info->link_desc.rbm);
- /* Get next link desc VA from current link desc */
- hal_rx_mon_next_link_desc_get(link_desc_va,
- &desc_info->link_desc);
- /* return msdu link descriptor to WBM */
- if (dp_rx_monitor_link_desc_return(pdev,
- (hal_buff_addrinfo_t)rx_link_buf_info,
- mac_id,
- HAL_BM_ACTION_PUT_IN_IDLE_LIST)
- != QDF_STATUS_SUCCESS) {
- QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
- "dp_rx_monitor_link_desc_return failed");
- }
- }
- pdev->rx_mon_stats.dest_mpdu_done++;
- dp_rx_mon_init_tail_msdu(head_msdu, msdu, last_msdu, tail_msdu);
- dp_rx_mon_remove_raw_frame_fcs_len(head_msdu, tail_msdu);
- return rx_buf_reaped;
- }
- /*
- * dp_rx_mon_deliver_prev_ppdu () - Deliver previous PPDU
- *
- * @pdev: DP pdev handle
- * @int_ctx: interrupt context
- * @mac_id: lmac id
- * @quota: quota
- *
- * Return: remaining qouta
- */
- static inline uint32_t
- dp_rx_mon_deliver_prev_ppdu(struct dp_pdev *pdev,
- struct dp_intr *int_ctx,
- uint32_t mac_id,
- uint32_t quota)
- {
- struct dp_soc *soc = pdev->soc;
- struct hal_rx_mon_desc_info *desc_info = pdev->mon_desc;
- uint32_t work_done = 0, work = 0;
- bool deliver_ppdu = false;
- enum dp_mon_reap_status status;
- while (pdev->hold_mon_dest_ring) {
- status = dp_rx_mon_status_buf_validate(pdev, int_ctx, mac_id);
- switch (status) {
- case DP_MON_STATUS_NO_DMA:
- /* If DMA is not done for status ring entry,
- * hold on to monitor destination ring and
- * deliver current ppdu data once DMA is done.
- */
- pdev->hold_mon_dest_ring = true;
- break;
- case DP_MON_STATUS_LAG:
- /* If status_ppdu_id is lagging behind destination,
- * a. Hold on to destination ring
- * b. Drop status ppdus until ppdu id matches
- * c. Increment stats for ppdu_id mismatch and
- * status ppdu drop
- */
- pdev->hold_mon_dest_ring = true;
- pdev->rx_mon_stats.ppdu_id_mismatch++;
- pdev->rx_mon_stats.status_ppdu_drop++;
- break;
- case DP_MON_STATUS_LEAD:
- /* If status_ppdu_id is leading ahead destination,
- * a. Drop destination ring ppdu until ppdu_id matches
- * b. Unhold monitor destination ring so status ppdus
- * can be dropped.
- * c. Increment stats for ppdu_id mismatch and
- * destination ppdu drop
- */
- desc_info->drop_ppdu = true;
- pdev->hold_mon_dest_ring = false;
- pdev->rx_mon_stats.ppdu_id_mismatch++;
- pdev->rx_mon_stats.dest_ppdu_drop++;
- break;
- case DP_MON_STATUS_REPLENISH:
- /* If status ring hp entry is NULL, replenish it */
- work = dp_rx_mon_status_process(soc, int_ctx, mac_id, 1);
- break;
- case DP_MON_STATUS_MATCH:
- /* If status ppdu id matches with destnation,
- * unhold monitor destination ring and deliver ppdu
- */
- pdev->hold_mon_dest_ring = false;
- break;
- default:
- dp_err("mon reap status is not supported");
- }
- /* When status ring entry's DMA is not done or
- * status ring entry is replenished, ppdu status is not
- * available for radiotap construction, so return and
- * check for status on next interrupt
- */
- if ((status == DP_MON_STATUS_NO_DMA) ||
- (status == DP_MON_STATUS_REPLENISH)) {
- return work_done;
- }
- if (status == DP_MON_STATUS_LAG) {
- work = dp_rx_mon_status_process(soc, int_ctx, mac_id, 1);
- if (!work)
- return 0;
- work_done += work;
- }
- deliver_ppdu = true;
- }
- if (deliver_ppdu) {
- if (pdev->mon_desc->drop_ppdu) {
- dp_rx_mon_drop_ppdu(pdev, mac_id);
- return work_done;
- }
- work_done += dp_rx_mon_status_process(soc, int_ctx, mac_id,
- desc_info->status_buf_count);
- dp_rx_monitor_deliver_ppdu(soc, pdev, mac_id);
- }
- return work_done;
- }
- /**
- * dp_rx_mon_process () - Core brain processing for monitor mode
- *
- * This API processes monitor destination ring followed by monitor status ring
- * Called from bottom half (tasklet/NET_RX_SOFTIRQ)
- *
- * @soc: datapath soc context
- * @int_ctx: interrupt context
- * @mac_id: mac_id on which interrupt is received
- * @quota: Number of status ring entry that can be serviced in one shot.
- *
- * @Return: Number of reaped status ring entries
- */
- uint32_t dp_rx_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
- uint32_t mac_id, uint32_t quota)
- {
- struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
- union dp_rx_desc_list_elem_t *head_desc = NULL;
- union dp_rx_desc_list_elem_t *tail_desc = NULL;
- uint32_t rx_bufs_reaped = 0;
- struct dp_mon_mpdu *mon_mpdu;
- struct cdp_pdev_mon_stats *rx_mon_stats;
- hal_rxdma_desc_t ring_desc;
- hal_soc_handle_t hal_soc;
- hal_ring_handle_t mon_dest_srng;
- qdf_nbuf_t head_msdu = NULL;
- qdf_nbuf_t tail_msdu = NULL;
- struct hal_rx_mon_desc_info *desc_info;
- int mac_for_pdev = mac_id;
- QDF_STATUS status;
- uint32_t work_done = 0;
- if (!pdev) {
- QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
- "pdev is null for mac_id = %d", mac_id);
- return work_done;
- }
- qdf_spin_lock_bh(&pdev->mon_lock);
- if (qdf_unlikely(!dp_soc_is_full_mon_enable(pdev))) {
- work_done += dp_rx_mon_status_process(soc, int_ctx,
- mac_id, quota);
- qdf_spin_unlock_bh(&pdev->mon_lock);
- return work_done;
- }
- desc_info = pdev->mon_desc;
- rx_mon_stats = &pdev->rx_mon_stats;
- work_done = dp_rx_mon_deliver_prev_ppdu(pdev, int_ctx, mac_id, quota);
- /* Do not proceed if work_done zero */
- if (!work_done && pdev->hold_mon_dest_ring) {
- qdf_spin_unlock_bh(&pdev->mon_lock);
- return work_done;
- }
- mon_dest_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_for_pdev);
- if (qdf_unlikely(!mon_dest_srng ||
- !hal_srng_initialized(mon_dest_srng))) {
- QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
- FL("HAL Monitor Destination Ring Init Failed -- %pK"),
- mon_dest_srng);
- goto done1;
- }
- hal_soc = soc->hal_soc;
- qdf_assert_always(hal_soc && pdev);
- if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dest_srng))) {
- QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
- FL("HAL Monitor Destination Ring access Failed -- %pK"),
- mon_dest_srng);
- goto done1;
- }
- /* Each entry in mon dest ring carries mpdu data
- * reap all msdus for a mpdu and form skb chain
- */
- while (qdf_likely(ring_desc =
- hal_srng_dst_peek(hal_soc, mon_dest_srng))) {
- head_msdu = NULL;
- tail_msdu = NULL;
- rx_bufs_reaped = dp_rx_mon_mpdu_reap(soc, pdev, mac_id,
- ring_desc, &head_msdu,
- &tail_msdu, &head_desc,
- &tail_desc);
- /* Assert if end_of_ppdu is zero and number of reaped buffers
- * are zero.
- */
- if (qdf_unlikely(!desc_info->end_of_ppdu && !rx_bufs_reaped)) {
- qdf_err("end_of_ppdu and rx_bufs_reaped are zero");
- }
- rx_mon_stats->mon_rx_bufs_reaped_dest += rx_bufs_reaped;
- /* replenish rx_bufs_reaped buffers back to
- * RxDMA Monitor buffer ring
- */
- if (rx_bufs_reaped) {
- status = dp_rx_buffers_replenish(soc, mac_id,
- dp_rxdma_get_mon_buf_ring(pdev,
- mac_for_pdev),
- dp_rx_get_mon_desc_pool(soc, mac_id,
- pdev->pdev_id),
- rx_bufs_reaped,
- &head_desc, &tail_desc);
- if (status != QDF_STATUS_SUCCESS)
- qdf_assert_always(0);
- rx_mon_stats->mon_rx_bufs_replenished_dest += rx_bufs_reaped;
- }
- head_desc = NULL;
- tail_desc = NULL;
- /* If end_of_ppdu is zero, it is a valid data mpdu
- * a. Add head_msdu and tail_msdu to mpdu list
- * b. continue reaping next SW_MONITOR_RING descriptor
- */
- if (!desc_info->end_of_ppdu) {
- /*
- * In case of rxdma error, MPDU is dropped
- * from sw_monitor_ring descriptor.
- * in this case, head_msdu remains NULL.
- * move srng to next and continue reaping next entry
- */
- if (!head_msdu) {
- ring_desc = hal_srng_dst_get_next(hal_soc,
- mon_dest_srng);
- continue;
- }
- /*
- * Prepare a MPDU object which holds chain of msdus
- * and MPDU specific status and add this is to
- * monitor mpdu queue
- */
- mon_mpdu = dp_rx_mon_prepare_mon_mpdu(pdev,
- head_msdu,
- tail_msdu);
- QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
- FL("Dest_srng: %pK MPDU_OBJ: %pK "
- "head_msdu: %pK tail_msdu: %pK -- "),
- mon_dest_srng,
- mon_mpdu,
- head_msdu,
- tail_msdu);
- TAILQ_INSERT_TAIL(&pdev->mon_mpdu_q,
- mon_mpdu,
- mpdu_list_elem);
- head_msdu = NULL;
- tail_msdu = NULL;
- ring_desc = hal_srng_dst_get_next(hal_soc,
- mon_dest_srng);
- continue;
- }
- /* It is observed sometimes that, ppdu_id, status_buf_addr
- * and link desc addr is NULL, this WAR is to handle same
- */
- if (!desc_info->ppdu_id && !desc_info->status_buf.paddr) {
- QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
- FL("ppdu_id: %d ring_entry: %pK"
- "status_buf_count: %d rxdma_push: %d"
- "rxdma_err: %d link_desc: %pK "),
- desc_info->ppdu_id, ring_desc,
- desc_info->status_buf_count,
- desc_info->rxdma_push_reason,
- desc_info->rxdma_error_code,
- desc_info->link_desc.paddr);
- goto next_entry;
- }
- /*
- * end_of_ppdu is one,
- * a. update ppdu_done stattistics
- * b. Replenish buffers back to mon buffer ring
- * c. reap status ring for a PPDU and deliver all mpdus
- * to upper layer
- */
- rx_mon_stats->dest_ppdu_done++;
- work_done += dp_rx_mon_reap_status_ring(soc, pdev, int_ctx,
- mac_id, quota, desc_info);
- /* Deliver all MPDUs for a PPDU */
- if (desc_info->drop_ppdu)
- dp_rx_mon_drop_ppdu(pdev, mac_id);
- else if (!pdev->hold_mon_dest_ring)
- dp_rx_monitor_deliver_ppdu(soc, pdev, mac_id);
- next_entry:
- hal_srng_dst_get_next(hal_soc, mon_dest_srng);
- break;
- }
- dp_srng_access_end(int_ctx, soc, mon_dest_srng);
- done1:
- qdf_spin_unlock_bh(&pdev->mon_lock);
- return work_done;
- }
- /**
- * dp_full_mon_attach() - attach full monitor mode
- * resources
- * @pdev: Datapath PDEV handle
- *
- * Return: void
- */
- void dp_full_mon_attach(struct dp_pdev *pdev)
- {
- struct dp_soc *soc = pdev->soc;
- if (!soc->full_mon_mode) {
- qdf_debug("Full monitor is not enabled");
- return;
- }
- pdev->mon_desc = qdf_mem_malloc(sizeof(struct hal_rx_mon_desc_info));
- if (!pdev->mon_desc) {
- qdf_err("Memory allocation failed for hal_rx_mon_desc_info ");
- return;
- }
- TAILQ_INIT(&pdev->mon_mpdu_q);
- }
- /**
- * dp_full_mon_detach() - detach full monitor mode
- * resources
- * @pdev: Datapath PDEV handle
- *
- * Return: void
- *
- */
- void dp_full_mon_detach(struct dp_pdev *pdev)
- {
- struct dp_soc *soc = pdev->soc;
- struct dp_mon_mpdu *mpdu = NULL;
- struct dp_mon_mpdu *temp_mpdu = NULL;
- if (!soc->full_mon_mode) {
- qdf_debug("Full monitor is not enabled");
- return;
- }
- if (pdev->mon_desc)
- qdf_mem_free(pdev->mon_desc);
- if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
- TAILQ_FOREACH_SAFE(mpdu,
- &pdev->mon_mpdu_q,
- mpdu_list_elem,
- temp_mpdu) {
- qdf_mem_free(mpdu);
- }
- }
- }
- #endif
|