123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368 |
- /*
- * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
- /*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
- /* OS abstraction libraries */
- #include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
- #include <cdf_atomic.h> /* cdf_atomic_read, etc. */
- #include <cdf_util.h> /* cdf_unlikely */
- /* APIs for other modules */
- #include <htt.h> /* HTT_TX_EXT_TID_MGMT */
- #include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
- #include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
- #include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
- /* internal header files relevant for all systems */
- #include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
- #include <ol_txrx_types.h> /* pdev stats */
- #include <ol_tx_desc.h> /* ol_tx_desc */
- #include <ol_tx_send.h> /* ol_tx_send */
- #include <ol_txrx.h>
- /* internal header files relevant only for HL systems */
- #include <ol_tx_queue.h> /* ol_tx_enqueue */
- /* internal header files relevant only for specific systems (Pronto) */
- #include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
- #include <ol_tx.h>
- #ifdef WLAN_FEATURE_FASTPATH
- #include <hif.h> /* HIF_DEVICE */
- #include <htc_api.h> /* Layering violation, but required for fast path */
- #include <htt_internal.h>
- #include <htt_types.h> /* htc_endpoint */
- int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
- unsigned int num_msdus, unsigned int transfer_id);
- #endif /* WLAN_FEATURE_FASTPATH */
- /*
- * The TXRX module doesn't accept tx frames unless the target has
- * enough descriptors for them.
- * For LL, the TXRX descriptor pool is sized to match the target's
- * descriptor pool. Hence, if the descriptor allocation in TXRX
- * succeeds, that guarantees that the target has room to accept
- * the new tx frame.
- */
- #define ol_tx_prepare_ll(tx_desc, vdev, msdu, msdu_info) \
- do { \
- struct ol_txrx_pdev_t *pdev = vdev->pdev; \
- (msdu_info)->htt.info.frame_type = pdev->htt_pkt_type; \
- tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info); \
- if (cdf_unlikely(!tx_desc)) { \
- TXRX_STATS_MSDU_LIST_INCR( \
- pdev, tx.dropped.host_reject, msdu); \
- return msdu; /* the list of unaccepted MSDUs */ \
- } \
- } while (0)
- #define ol_tx_prepare_tso(vdev, msdu, msdu_info) \
- do { \
- msdu_info.tso_info.curr_seg = NULL; \
- if (cdf_nbuf_is_tso(msdu)) { \
- int num_seg = cdf_nbuf_get_tso_num_seg(msdu); \
- msdu_info.tso_info.tso_seg_list = NULL; \
- msdu_info.tso_info.num_segs = num_seg; \
- while (num_seg) { \
- struct cdf_tso_seg_elem_t *tso_seg = \
- ol_tso_alloc_segment(vdev->pdev); \
- if (tso_seg) { \
- tso_seg->next = \
- msdu_info.tso_info.tso_seg_list; \
- msdu_info.tso_info.tso_seg_list \
- = tso_seg; \
- num_seg--; \
- } else {\
- cdf_print("TSO seg alloc failed!\n"); \
- } \
- } \
- cdf_nbuf_get_tso_info(vdev->pdev->osdev, \
- msdu, &msdu_info.tso_info); \
- msdu_info.tso_info.curr_seg = \
- msdu_info.tso_info.tso_seg_list; \
- num_seg = msdu_info.tso_info.num_segs; \
- } else { \
- msdu_info.tso_info.is_tso = 0; \
- msdu_info.tso_info.num_segs = 1; \
- } \
- } while (0)
- /**
- * ol_tx_send_data_frame() - send data frame
- * @sta_id: sta id
- * @skb: skb
- * @proto_type: proto type
- *
- * Return: skb/NULL for success
- */
- cdf_nbuf_t ol_tx_send_data_frame(uint8_t sta_id, cdf_nbuf_t skb,
- uint8_t proto_type)
- {
- void *cdf_ctx = cds_get_context(CDF_MODULE_ID_CDF_DEVICE);
- struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
- struct ol_txrx_peer_t *peer;
- cdf_nbuf_t ret;
- CDF_STATUS status;
- if (cdf_unlikely(!pdev)) {
- CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
- "%s:pdev is null", __func__);
- return skb;
- }
- if (cdf_unlikely(!cdf_ctx)) {
- TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
- "%s:cdf_ctx is null", __func__);
- return skb;
- }
- if (sta_id >= WLAN_MAX_STA_COUNT) {
- CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
- "%s:Invalid sta id", __func__);
- return skb;
- }
- peer = ol_txrx_peer_find_by_local_id(pdev, sta_id);
- if (!peer) {
- CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
- "%s:Invalid peer", __func__);
- return skb;
- }
- if (peer->state < ol_txrx_peer_state_conn) {
- CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
- "%s: station to be yet registered..dropping pkt", __func__);
- return skb;
- }
- status = cdf_nbuf_map_single(cdf_ctx, skb, CDF_DMA_TO_DEVICE);
- if (cdf_unlikely(status != CDF_STATUS_SUCCESS)) {
- CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
- "%s: nbuf map failed", __func__);
- return skb;
- }
- cdf_nbuf_trace_set_proto_type(skb, proto_type);
- if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
- && (cdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
- && (cdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
- cdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
- /* Terminate the (single-element) list of tx frames */
- cdf_nbuf_set_next(skb, NULL);
- ret = OL_TX_LL(peer->vdev, skb);
- if (ret) {
- CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
- "%s: Failed to tx", __func__);
- cdf_nbuf_unmap_single(cdf_ctx, ret, CDF_DMA_TO_DEVICE);
- return ret;
- }
- return NULL;
- }
- #ifdef IPA_OFFLOAD
- /**
- * ol_tx_send_ipa_data_frame() - send IPA data frame
- * @vdev: vdev
- * @skb: skb
- *
- * Return: skb/ NULL is for success
- */
- cdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
- cdf_nbuf_t skb)
- {
- ol_txrx_pdev_handle pdev = cds_get_context(CDF_MODULE_ID_TXRX);
- cdf_nbuf_t ret;
- if (cdf_unlikely(!pdev)) {
- TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
- "%s: pdev is NULL", __func__);
- return skb;
- }
- if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
- && (cdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
- && (cdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
- cdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
- /* Terminate the (single-element) list of tx frames */
- cdf_nbuf_set_next(skb, NULL);
- ret = OL_TX_LL((struct ol_txrx_vdev_t *)vdev, skb);
- if (ret) {
- TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
- "%s: Failed to tx", __func__);
- return ret;
- }
- return NULL;
- }
- #endif
- #if defined(FEATURE_TSO)
- cdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
- {
- cdf_nbuf_t msdu = msdu_list;
- struct ol_txrx_msdu_info_t msdu_info;
- msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
- msdu_info.htt.action.tx_comp_req = 0;
- /*
- * The msdu_list variable could be used instead of the msdu var,
- * but just to clarify which operations are done on a single MSDU
- * vs. a list of MSDUs, use a distinct variable for single MSDUs
- * within the list.
- */
- while (msdu) {
- cdf_nbuf_t next;
- struct ol_tx_desc_t *tx_desc;
- int segments = 1;
- msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
- msdu_info.peer = NULL;
- ol_tx_prepare_tso(vdev, msdu, msdu_info);
- segments = msdu_info.tso_info.num_segs;
- /*
- * The netbuf may get linked into a different list inside the
- * ol_tx_send function, so store the next pointer before the
- * tx_send call.
- */
- next = cdf_nbuf_next(msdu);
- /* init the current segment to the 1st segment in the list */
- while (segments) {
- if (msdu_info.tso_info.curr_seg)
- NBUF_MAPPED_PADDR_LO(msdu) = msdu_info.tso_info.
- curr_seg->seg.tso_frags[0].paddr_low_32;
- segments--;
- /**
- * if this is a jumbo nbuf, then increment the number
- * of nbuf users for each additional segment of the msdu.
- * This will ensure that the skb is freed only after
- * receiving tx completion for all segments of an nbuf
- */
- if (segments)
- cdf_nbuf_inc_users(msdu);
- ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
- /*
- * If debug display is enabled, show the meta-data being
- * downloaded to the target via the HTT tx descriptor.
- */
- htt_tx_desc_display(tx_desc->htt_tx_desc);
- ol_tx_send(vdev->pdev, tx_desc, msdu);
- if (msdu_info.tso_info.curr_seg) {
- msdu_info.tso_info.curr_seg =
- msdu_info.tso_info.curr_seg->next;
- }
- cdf_nbuf_dec_num_frags(msdu);
- if (msdu_info.tso_info.is_tso) {
- TXRX_STATS_TSO_INC_SEG(vdev->pdev);
- TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
- }
- } /* while segments */
- msdu = next;
- if (msdu_info.tso_info.is_tso) {
- TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
- TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
- }
- } /* while msdus */
- return NULL; /* all MSDUs were accepted */
- }
- #else /* TSO */
- cdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
- {
- cdf_nbuf_t msdu = msdu_list;
- struct ol_txrx_msdu_info_t msdu_info;
- msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
- msdu_info.htt.action.tx_comp_req = 0;
- msdu_info.tso_info.is_tso = 0;
- /*
- * The msdu_list variable could be used instead of the msdu var,
- * but just to clarify which operations are done on a single MSDU
- * vs. a list of MSDUs, use a distinct variable for single MSDUs
- * within the list.
- */
- while (msdu) {
- cdf_nbuf_t next;
- struct ol_tx_desc_t *tx_desc;
- msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
- msdu_info.peer = NULL;
- ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
- /*
- * If debug display is enabled, show the meta-data being
- * downloaded to the target via the HTT tx descriptor.
- */
- htt_tx_desc_display(tx_desc->htt_tx_desc);
- /*
- * The netbuf may get linked into a different list inside the
- * ol_tx_send function, so store the next pointer before the
- * tx_send call.
- */
- next = cdf_nbuf_next(msdu);
- ol_tx_send(vdev->pdev, tx_desc, msdu);
- msdu = next;
- }
- return NULL; /* all MSDUs were accepted */
- }
- #endif /* TSO */
- #ifdef WLAN_FEATURE_FASTPATH
- /**
- * ol_tx_prepare_ll_fast() Alloc and prepare Tx descriptor
- *
- * Allocate and prepare Tx descriptor with msdu and fragment descritor
- * inforamtion.
- *
- * @pdev: pointer to ol pdev handle
- * @vdev: pointer to ol vdev handle
- * @msdu: linked list of msdu packets
- * @pkt_download_len: packet download length
- * @ep_id: endpoint ID
- * @msdu_info: Handle to msdu_info
- *
- * Return: Pointer to Tx descriptor
- */
- static inline struct ol_tx_desc_t *
- ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
- ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu,
- uint32_t pkt_download_len, uint32_t ep_id,
- struct ol_txrx_msdu_info_t *msdu_info)
- {
- struct ol_tx_desc_t *tx_desc = NULL;
- uint32_t *htt_tx_desc;
- void *htc_hdr_vaddr;
- u_int32_t num_frags, i;
- tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
- if (cdf_unlikely(!tx_desc))
- return NULL;
- tx_desc->netbuf = msdu;
- if (msdu_info->tso_info.is_tso) {
- tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
- tx_desc->pkt_type = ol_tx_frm_tso;
- TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, msdu);
- } else {
- tx_desc->pkt_type = ol_tx_frm_std;
- }
- htt_tx_desc = tx_desc->htt_tx_desc;
- /* Make sure frags num is set to 0 */
- /*
- * Do this here rather than in hardstart, so
- * that we can hopefully take only one cache-miss while
- * accessing skb->cb.
- */
- /* HTT Header */
- /* TODO : Take care of multiple fragments */
- /* TODO: Precompute and store paddr in ol_tx_desc_t */
- /* Virtual address of the HTT/HTC header, added by driver */
- htc_hdr_vaddr = (char *)htt_tx_desc - HTC_HEADER_LEN;
- htt_tx_desc_init(pdev->htt_pdev, htt_tx_desc,
- tx_desc->htt_tx_desc_paddr, tx_desc->id, msdu,
- &msdu_info->htt, &msdu_info->tso_info,
- NULL, vdev->opmode == wlan_op_mode_ocb);
- num_frags = cdf_nbuf_get_num_frags(msdu);
- /* num_frags are expected to be 2 max */
- num_frags = (num_frags > CVG_NBUF_MAX_EXTRA_FRAGS) ?
- CVG_NBUF_MAX_EXTRA_FRAGS : num_frags;
- #if defined(HELIUMPLUS_PADDR64)
- /*
- * Use num_frags - 1, since 1 frag is used to store
- * the HTT/HTC descriptor
- * Refer to htt_tx_desc_init()
- */
- htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
- num_frags - 1);
- #else /* ! defined(HELIUMPLUSPADDR64) */
- htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
- num_frags-1);
- #endif /* defined(HELIUMPLUS_PADDR64) */
- if (msdu_info->tso_info.is_tso) {
- htt_tx_desc_fill_tso_info(pdev->htt_pdev,
- tx_desc->htt_frag_desc, &msdu_info->tso_info);
- TXRX_STATS_TSO_SEG_UPDATE(pdev,
- msdu_info->tso_info.curr_seg->seg);
- } else {
- for (i = 1; i < num_frags; i++) {
- cdf_size_t frag_len;
- u_int32_t frag_paddr;
- frag_len = cdf_nbuf_get_frag_len(msdu, i);
- frag_paddr = cdf_nbuf_get_frag_paddr_lo(msdu, i);
- #if defined(HELIUMPLUS_PADDR64)
- htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
- i - 1, frag_paddr, frag_len);
- #if defined(HELIUMPLUS_DEBUG)
- cdf_print("%s:%d: htt_fdesc=%p frag_paddr=%u len=%zu\n",
- __func__, __LINE__, tx_desc->htt_frag_desc,
- frag_paddr, frag_len);
- dump_pkt(netbuf, frag_paddr, 64);
- #endif /* HELIUMPLUS_DEBUG */
- #else /* ! defined(HELIUMPLUSPADDR64) */
- htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc,
- i - 1, frag_paddr, frag_len);
- #endif /* defined(HELIUMPLUS_PADDR64) */
- }
- }
- /*
- * Do we want to turn on word_stream bit-map here ? For linux, non-TSO
- * this is not required. We still have to mark the swap bit correctly,
- * when posting to the ring
- */
- /* Check to make sure, data download length is correct */
- /*
- * TODO : Can we remove this check and always download a fixed length ?
- * */
- if (cdf_unlikely(cdf_nbuf_len(msdu) < pkt_download_len))
- pkt_download_len = cdf_nbuf_len(msdu);
- /* Fill the HTC header information */
- /*
- * Passing 0 as the seq_no field, we can probably get away
- * with it for the time being, since this is not checked in f/w
- */
- /* TODO : Prefill this, look at multi-fragment case */
- HTC_TX_DESC_FILL(htc_hdr_vaddr, pkt_download_len, ep_id, 0);
- return tx_desc;
- }
- #if defined(FEATURE_TSO)
- /**
- * ol_tx_ll_fast() Update metadata information and send msdu to HIF/CE
- *
- * @vdev: handle to ol_txrx_vdev_t
- * @msdu_list: msdu list to be sent out.
- *
- * Return: on success return NULL, pointer to nbuf when it fails to send.
- */
- cdf_nbuf_t
- ol_tx_ll_fast(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
- {
- cdf_nbuf_t msdu = msdu_list;
- struct ol_txrx_pdev_t *pdev = vdev->pdev;
- uint32_t pkt_download_len =
- ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
- uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
- struct ol_txrx_msdu_info_t msdu_info;
- msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
- msdu_info.htt.action.tx_comp_req = 0;
- /*
- * The msdu_list variable could be used instead of the msdu var,
- * but just to clarify which operations are done on a single MSDU
- * vs. a list of MSDUs, use a distinct variable for single MSDUs
- * within the list.
- */
- while (msdu) {
- cdf_nbuf_t next;
- struct ol_tx_desc_t *tx_desc;
- int segments = 1;
- msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
- msdu_info.peer = NULL;
- ol_tx_prepare_tso(vdev, msdu, msdu_info);
- segments = msdu_info.tso_info.num_segs;
- /*
- * The netbuf may get linked into a different list
- * inside the ce_send_fast function, so store the next
- * pointer before the ce_send call.
- */
- next = cdf_nbuf_next(msdu);
- /* init the current segment to the 1st segment in the list */
- while (segments) {
- if (msdu_info.tso_info.curr_seg)
- NBUF_MAPPED_PADDR_LO(msdu) = msdu_info.tso_info.
- curr_seg->seg.tso_frags[0].paddr_low_32;
- segments--;
- /**
- * if this is a jumbo nbuf, then increment the number
- * of nbuf users for each additional segment of the msdu.
- * This will ensure that the skb is freed only after
- * receiving tx completion for all segments of an nbuf
- */
- if (segments)
- cdf_nbuf_inc_users(msdu);
- msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
- msdu_info.htt.info.vdev_id = vdev->vdev_id;
- msdu_info.htt.action.cksum_offload =
- cdf_nbuf_get_tx_cksum(msdu);
- switch (cdf_nbuf_get_exemption_type(msdu)) {
- case CDF_NBUF_EXEMPT_NO_EXEMPTION:
- case CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
- /* We want to encrypt this frame */
- msdu_info.htt.action.do_encrypt = 1;
- break;
- case CDF_NBUF_EXEMPT_ALWAYS:
- /* We don't want to encrypt this frame */
- msdu_info.htt.action.do_encrypt = 0;
- break;
- default:
- msdu_info.htt.action.do_encrypt = 1;
- cdf_assert(0);
- break;
- }
- tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
- pkt_download_len, ep_id,
- &msdu_info);
- if (cdf_likely(tx_desc)) {
- /*
- * If debug display is enabled, show the meta
- * data being downloaded to the target via the
- * HTT tx descriptor.
- */
- htt_tx_desc_display(tx_desc->htt_tx_desc);
- if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu,
- 1, ep_id))) {
- /*
- * The packet could not be sent.
- * Free the descriptor, return the
- * packet to the caller.
- */
- ol_tx_desc_free(pdev, tx_desc);
- return msdu;
- }
- if (msdu_info.tso_info.curr_seg) {
- msdu_info.tso_info.curr_seg =
- msdu_info.tso_info.curr_seg->next;
- }
- if (msdu_info.tso_info.is_tso) {
- cdf_nbuf_dec_num_frags(msdu);
- TXRX_STATS_TSO_INC_SEG(vdev->pdev);
- TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
- }
- } else {
- TXRX_STATS_MSDU_LIST_INCR(
- pdev, tx.dropped.host_reject, msdu);
- /* the list of unaccepted MSDUs */
- return msdu;
- }
- } /* while segments */
- msdu = next;
- if (msdu_info.tso_info.is_tso) {
- TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
- TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
- }
- } /* while msdus */
- return NULL; /* all MSDUs were accepted */
- }
- #else
- cdf_nbuf_t
- ol_tx_ll_fast(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
- {
- cdf_nbuf_t msdu = msdu_list;
- struct ol_txrx_pdev_t *pdev = vdev->pdev;
- uint32_t pkt_download_len =
- ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
- uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
- struct ol_txrx_msdu_info_t msdu_info;
- msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
- msdu_info.htt.action.tx_comp_req = 0;
- msdu_info.tso_info.is_tso = 0;
- /*
- * The msdu_list variable could be used instead of the msdu var,
- * but just to clarify which operations are done on a single MSDU
- * vs. a list of MSDUs, use a distinct variable for single MSDUs
- * within the list.
- */
- while (msdu) {
- cdf_nbuf_t next;
- struct ol_tx_desc_t *tx_desc;
- msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
- msdu_info.peer = NULL;
- msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
- msdu_info.htt.info.vdev_id = vdev->vdev_id;
- msdu_info.htt.action.cksum_offload =
- cdf_nbuf_get_tx_cksum(msdu);
- switch (cdf_nbuf_get_exemption_type(msdu)) {
- case CDF_NBUF_EXEMPT_NO_EXEMPTION:
- case CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
- /* We want to encrypt this frame */
- msdu_info.htt.action.do_encrypt = 1;
- break;
- case CDF_NBUF_EXEMPT_ALWAYS:
- /* We don't want to encrypt this frame */
- msdu_info.htt.action.do_encrypt = 0;
- break;
- default:
- msdu_info.htt.action.do_encrypt = 1;
- cdf_assert(0);
- break;
- }
- tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
- pkt_download_len, ep_id,
- &msdu_info);
- if (cdf_likely(tx_desc)) {
- /*
- * If debug display is enabled, show the meta-data being
- * downloaded to the target via the HTT tx descriptor.
- */
- htt_tx_desc_display(tx_desc->htt_tx_desc);
- /*
- * The netbuf may get linked into a different list
- * inside the ce_send_fast function, so store the next
- * pointer before the ce_send call.
- */
- next = cdf_nbuf_next(msdu);
- if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu, 1,
- ep_id))) {
- /* The packet could not be sent */
- /* Free the descriptor, return the packet to the
- * caller */
- ol_tx_desc_free(pdev, tx_desc);
- return msdu;
- }
- msdu = next;
- } else {
- TXRX_STATS_MSDU_LIST_INCR(
- pdev, tx.dropped.host_reject, msdu);
- return msdu; /* the list of unaccepted MSDUs */
- }
- }
- return NULL; /* all MSDUs were accepted */
- }
- #endif /* FEATURE_TSO */
- #endif /* WLAN_FEATURE_FASTPATH */
- #ifdef WLAN_FEATURE_FASTPATH
- /**
- * ol_tx_ll_wrapper() wrapper to ol_tx_ll
- *
- */
- static inline cdf_nbuf_t
- ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
- {
- struct ol_softc *hif_device =
- (struct ol_softc *)cds_get_context(CDF_MODULE_ID_HIF);
- if (cdf_likely(hif_device && hif_device->fastpath_mode_on))
- msdu_list = ol_tx_ll_fast(vdev, msdu_list);
- else
- msdu_list = ol_tx_ll(vdev, msdu_list);
- return msdu_list;
- }
- #else
- static inline cdf_nbuf_t
- ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
- {
- return ol_tx_ll(vdev, msdu_list);
- }
- #endif /* WLAN_FEATURE_FASTPATH */
- #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
- #define OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN 400
- #define OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS 5
- static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
- {
- int max_to_accept;
- cdf_spin_lock_bh(&vdev->ll_pause.mutex);
- if (vdev->ll_pause.paused_reason) {
- cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
- return;
- }
- /*
- * Send as much of the backlog as possible, but leave some margin
- * of unallocated tx descriptors that can be used for new frames
- * being transmitted by other vdevs.
- * Ideally there would be a scheduler, which would not only leave
- * some margin for new frames for other vdevs, but also would
- * fairly apportion the tx descriptors between multiple vdevs that
- * have backlogs in their pause queues.
- * However, the fairness benefit of having a scheduler for frames
- * from multiple vdev's pause queues is not sufficient to outweigh
- * the extra complexity.
- */
- max_to_accept = vdev->pdev->tx_desc.num_free -
- OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN;
- while (max_to_accept > 0 && vdev->ll_pause.txq.depth) {
- cdf_nbuf_t tx_msdu;
- max_to_accept--;
- vdev->ll_pause.txq.depth--;
- tx_msdu = vdev->ll_pause.txq.head;
- if (tx_msdu) {
- vdev->ll_pause.txq.head = cdf_nbuf_next(tx_msdu);
- if (NULL == vdev->ll_pause.txq.head)
- vdev->ll_pause.txq.tail = NULL;
- cdf_nbuf_set_next(tx_msdu, NULL);
- NBUF_UPDATE_TX_PKT_COUNT(tx_msdu,
- NBUF_TX_PKT_TXRX_DEQUEUE);
- tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
- /*
- * It is unexpected that ol_tx_ll would reject the frame
- * since we checked that there's room for it, though
- * there's an infinitesimal possibility that between the
- * time we checked the room available and now, a
- * concurrent batch of tx frames used up all the room.
- * For simplicity, just drop the frame.
- */
- if (tx_msdu) {
- cdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu,
- CDF_DMA_TO_DEVICE);
- cdf_nbuf_tx_free(tx_msdu, NBUF_PKT_ERROR);
- }
- }
- }
- if (vdev->ll_pause.txq.depth) {
- cdf_softirq_timer_cancel(&vdev->ll_pause.timer);
- cdf_softirq_timer_start(&vdev->ll_pause.timer,
- OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
- vdev->ll_pause.is_q_timer_on = true;
- if (vdev->ll_pause.txq.depth >= vdev->ll_pause.max_q_depth)
- vdev->ll_pause.q_overflow_cnt++;
- }
- cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
- }
- static cdf_nbuf_t
- ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
- cdf_nbuf_t msdu_list, uint8_t start_timer)
- {
- cdf_spin_lock_bh(&vdev->ll_pause.mutex);
- while (msdu_list &&
- vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
- cdf_nbuf_t next = cdf_nbuf_next(msdu_list);
- NBUF_UPDATE_TX_PKT_COUNT(msdu_list, NBUF_TX_PKT_TXRX_ENQUEUE);
- DPTRACE(cdf_dp_trace(msdu_list,
- CDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD,
- (uint8_t *)(cdf_nbuf_data(msdu_list)),
- sizeof(cdf_nbuf_data(msdu_list))));
- vdev->ll_pause.txq.depth++;
- if (!vdev->ll_pause.txq.head) {
- vdev->ll_pause.txq.head = msdu_list;
- vdev->ll_pause.txq.tail = msdu_list;
- } else {
- cdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list);
- }
- vdev->ll_pause.txq.tail = msdu_list;
- msdu_list = next;
- }
- if (vdev->ll_pause.txq.tail)
- cdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL);
- if (start_timer) {
- cdf_softirq_timer_cancel(&vdev->ll_pause.timer);
- cdf_softirq_timer_start(&vdev->ll_pause.timer,
- OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
- vdev->ll_pause.is_q_timer_on = true;
- }
- cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
- return msdu_list;
- }
- /*
- * Store up the tx frame in the vdev's tx queue if the vdev is paused.
- * If there are too many frames in the tx queue, reject it.
- */
- cdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
- {
- uint16_t eth_type;
- uint32_t paused_reason;
- if (msdu_list == NULL)
- return NULL;
- paused_reason = vdev->ll_pause.paused_reason;
- if (paused_reason) {
- if (cdf_unlikely((paused_reason &
- OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED) ==
- paused_reason)) {
- eth_type = (((struct ethernet_hdr_t *)
- cdf_nbuf_data(msdu_list))->
- ethertype[0] << 8) |
- (((struct ethernet_hdr_t *)
- cdf_nbuf_data(msdu_list))->ethertype[1]);
- if (ETHERTYPE_IS_EAPOL_WAPI(eth_type)) {
- msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
- return msdu_list;
- }
- }
- msdu_list = ol_tx_vdev_pause_queue_append(vdev, msdu_list, 1);
- } else {
- if (vdev->ll_pause.txq.depth > 0 ||
- vdev->pdev->tx_throttle.current_throttle_level !=
- THROTTLE_LEVEL_0) {
- /* not paused, but there is a backlog of frms
- from a prior pause or throttle off phase */
- msdu_list = ol_tx_vdev_pause_queue_append(
- vdev, msdu_list, 0);
- /* if throttle is disabled or phase is "on",
- send the frame */
- if (vdev->pdev->tx_throttle.current_throttle_level ==
- THROTTLE_LEVEL_0 ||
- vdev->pdev->tx_throttle.current_throttle_phase ==
- THROTTLE_PHASE_ON) {
- /* send as many frames as possible
- from the vdevs backlog */
- ol_tx_vdev_ll_pause_queue_send_base(vdev);
- }
- } else {
- /* not paused, no throttle and no backlog -
- send the new frames */
- msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
- }
- }
- return msdu_list;
- }
- /*
- * Run through the transmit queues for all the vdevs and
- * send the pending frames
- */
- void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
- {
- int max_to_send; /* tracks how many frames have been sent */
- cdf_nbuf_t tx_msdu;
- struct ol_txrx_vdev_t *vdev = NULL;
- uint8_t more;
- if (NULL == pdev)
- return;
- if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
- return;
- /* ensure that we send no more than tx_threshold frames at once */
- max_to_send = pdev->tx_throttle.tx_threshold;
- /* round robin through the vdev queues for the given pdev */
- /* Potential improvement: download several frames from the same vdev
- at a time, since it is more likely that those frames could be
- aggregated together, remember which vdev was serviced last,
- so the next call this function can resume the round-robin
- traversing where the current invocation left off */
- do {
- more = 0;
- TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
- cdf_spin_lock_bh(&vdev->ll_pause.mutex);
- if (vdev->ll_pause.txq.depth) {
- if (vdev->ll_pause.paused_reason) {
- cdf_spin_unlock_bh(&vdev->ll_pause.
- mutex);
- continue;
- }
- tx_msdu = vdev->ll_pause.txq.head;
- if (NULL == tx_msdu) {
- cdf_spin_unlock_bh(&vdev->ll_pause.
- mutex);
- continue;
- }
- max_to_send--;
- vdev->ll_pause.txq.depth--;
- vdev->ll_pause.txq.head =
- cdf_nbuf_next(tx_msdu);
- if (NULL == vdev->ll_pause.txq.head)
- vdev->ll_pause.txq.tail = NULL;
- cdf_nbuf_set_next(tx_msdu, NULL);
- tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
- /*
- * It is unexpected that ol_tx_ll would reject
- * the frame, since we checked that there's
- * room for it, though there's an infinitesimal
- * possibility that between the time we checked
- * the room available and now, a concurrent
- * batch of tx frames used up all the room.
- * For simplicity, just drop the frame.
- */
- if (tx_msdu) {
- cdf_nbuf_unmap(pdev->osdev, tx_msdu,
- CDF_DMA_TO_DEVICE);
- cdf_nbuf_tx_free(tx_msdu,
- NBUF_PKT_ERROR);
- }
- }
- /*check if there are more msdus to transmit */
- if (vdev->ll_pause.txq.depth)
- more = 1;
- cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
- }
- } while (more && max_to_send);
- vdev = NULL;
- TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
- cdf_spin_lock_bh(&vdev->ll_pause.mutex);
- if (vdev->ll_pause.txq.depth) {
- cdf_softirq_timer_cancel(&pdev->tx_throttle.tx_timer);
- cdf_softirq_timer_start(
- &pdev->tx_throttle.tx_timer,
- OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
- cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
- return;
- }
- cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
- }
- }
- void ol_tx_vdev_ll_pause_queue_send(void *context)
- {
- struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)context;
- struct ol_txrx_pdev_t *pdev = vdev->pdev;
- if (pdev->tx_throttle.current_throttle_level != THROTTLE_LEVEL_0 &&
- pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
- return;
- ol_tx_vdev_ll_pause_queue_send_base(vdev);
- }
- #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
- static inline int ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec)
- {
- return
- tx_spec &
- (ol_tx_spec_raw | ol_tx_spec_no_aggr | ol_tx_spec_no_encrypt);
- }
- static inline uint8_t ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec)
- {
- uint8_t sub_type = 0x1; /* 802.11 MAC header present */
- if (tx_spec & ol_tx_spec_no_aggr)
- sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_AGGR_S;
- if (tx_spec & ol_tx_spec_no_encrypt)
- sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
- if (tx_spec & ol_tx_spec_nwifi_no_encrypt)
- sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
- return sub_type;
- }
- cdf_nbuf_t
- ol_tx_non_std_ll(ol_txrx_vdev_handle vdev,
- enum ol_tx_spec tx_spec, cdf_nbuf_t msdu_list)
- {
- cdf_nbuf_t msdu = msdu_list;
- htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
- struct ol_txrx_msdu_info_t msdu_info;
- msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
- msdu_info.htt.action.tx_comp_req = 0;
- /*
- * The msdu_list variable could be used instead of the msdu var,
- * but just to clarify which operations are done on a single MSDU
- * vs. a list of MSDUs, use a distinct variable for single MSDUs
- * within the list.
- */
- while (msdu) {
- cdf_nbuf_t next;
- struct ol_tx_desc_t *tx_desc;
- msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
- msdu_info.peer = NULL;
- msdu_info.tso_info.is_tso = 0;
- ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
- /*
- * The netbuf may get linked into a different list inside the
- * ol_tx_send function, so store the next pointer before the
- * tx_send call.
- */
- next = cdf_nbuf_next(msdu);
- if (tx_spec != ol_tx_spec_std) {
- if (tx_spec & ol_tx_spec_no_free) {
- tx_desc->pkt_type = ol_tx_frm_no_free;
- } else if (tx_spec & ol_tx_spec_tso) {
- tx_desc->pkt_type = ol_tx_frm_tso;
- } else if (tx_spec & ol_tx_spec_nwifi_no_encrypt) {
- uint8_t sub_type =
- ol_txrx_tx_raw_subtype(tx_spec);
- htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
- htt_pkt_type_native_wifi,
- sub_type);
- } else if (ol_txrx_tx_is_raw(tx_spec)) {
- /* different types of raw frames */
- uint8_t sub_type =
- ol_txrx_tx_raw_subtype(tx_spec);
- htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
- htt_pkt_type_raw, sub_type);
- }
- }
- /*
- * If debug display is enabled, show the meta-data being
- * downloaded to the target via the HTT tx descriptor.
- */
- htt_tx_desc_display(tx_desc->htt_tx_desc);
- ol_tx_send(vdev->pdev, tx_desc, msdu);
- msdu = next;
- }
- return NULL; /* all MSDUs were accepted */
- }
- #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
- #define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) \
- do { \
- if (OL_TX_ENCAP(vdev, tx_desc, msdu, &tx_msdu_info) != A_OK) { \
- cdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); \
- ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1); \
- if (tx_msdu_info.peer) { \
- /* remove the peer reference added above */ \
- ol_txrx_peer_unref_delete(tx_msdu_info.peer); \
- } \
- goto MSDU_LOOP_BOTTOM; \
- } \
- } while (0)
- #else
- #define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) /* no-op */
- #endif
- /* tx filtering is handled within the target FW */
- #define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */
- /**
- * parse_ocb_tx_header() - Function to check for OCB
- * TX control header on a packet and extract it if present
- *
- * @msdu: Pointer to OS packet (cdf_nbuf_t)
- */
- #define OCB_HEADER_VERSION 1
- bool parse_ocb_tx_header(cdf_nbuf_t msdu,
- struct ocb_tx_ctrl_hdr_t *tx_ctrl)
- {
- struct ether_header *eth_hdr_p;
- struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
- /* Check if TX control header is present */
- eth_hdr_p = (struct ether_header *) cdf_nbuf_data(msdu);
- if (eth_hdr_p->ether_type != CDF_SWAP_U16(ETHERTYPE_OCB_TX))
- /* TX control header is not present. Nothing to do.. */
- return true;
- /* Remove the ethernet header */
- cdf_nbuf_pull_head(msdu, sizeof(struct ether_header));
- /* Parse the TX control header */
- tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *) cdf_nbuf_data(msdu);
- if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
- if (tx_ctrl)
- cdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
- sizeof(*tx_ctrl_hdr));
- } else {
- /* The TX control header is invalid. */
- return false;
- }
- /* Remove the TX control header */
- cdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
- return true;
- }
- cdf_nbuf_t
- ol_tx_non_std(ol_txrx_vdev_handle vdev,
- enum ol_tx_spec tx_spec, cdf_nbuf_t msdu_list)
- {
- return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
- }
- void
- ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle vdev,
- ol_txrx_data_tx_cb callback, void *ctxt)
- {
- struct ol_txrx_pdev_t *pdev = vdev->pdev;
- pdev->tx_data_callback.func = callback;
- pdev->tx_data_callback.ctxt = ctxt;
- }
- void
- ol_txrx_mgmt_tx_cb_set(ol_txrx_pdev_handle pdev,
- uint8_t type,
- ol_txrx_mgmt_tx_cb download_cb,
- ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
- {
- TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
- pdev->tx_mgmt.callbacks[type].download_cb = download_cb;
- pdev->tx_mgmt.callbacks[type].ota_ack_cb = ota_ack_cb;
- pdev->tx_mgmt.callbacks[type].ctxt = ctxt;
- }
- #if defined(HELIUMPLUS_PADDR64)
- void dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
- {
- uint32_t *frag_ptr_i_p;
- int i;
- cdf_print("OL TX Descriptor 0x%p msdu_id %d\n",
- tx_desc, tx_desc->id);
- cdf_print("HTT TX Descriptor vaddr: 0x%p paddr: 0x%x\n",
- tx_desc->htt_tx_desc, tx_desc->htt_tx_desc_paddr);
- cdf_print("%s %d: Fragment Descriptor 0x%p\n",
- __func__, __LINE__, tx_desc->htt_frag_desc);
- /* it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
- is already de-referrable (=> in virtual address space) */
- frag_ptr_i_p = tx_desc->htt_frag_desc;
- /* Dump 6 words of TSO flags */
- print_hex_dump(KERN_DEBUG, "MLE Desc:TSO Flags: ",
- DUMP_PREFIX_NONE, 8, 4,
- frag_ptr_i_p, 24, true);
- frag_ptr_i_p += 6; /* Skip 6 words of TSO flags */
- i = 0;
- while (*frag_ptr_i_p) {
- print_hex_dump(KERN_DEBUG, "MLE Desc:Frag Ptr: ",
- DUMP_PREFIX_NONE, 8, 4,
- frag_ptr_i_p, 8, true);
- i++;
- if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */
- break;
- else /* jump to next pointer - skip length */
- frag_ptr_i_p += 2;
- }
- return;
- }
- #endif /* HELIUMPLUS_PADDR64 */
- int
- ol_txrx_mgmt_send(ol_txrx_vdev_handle vdev,
- cdf_nbuf_t tx_mgmt_frm,
- uint8_t type, uint8_t use_6mbps, uint16_t chanfreq)
- {
- struct ol_txrx_pdev_t *pdev = vdev->pdev;
- struct ol_tx_desc_t *tx_desc;
- struct ol_txrx_msdu_info_t tx_msdu_info;
- tx_msdu_info.tso_info.is_tso = 0;
- tx_msdu_info.htt.action.use_6mbps = use_6mbps;
- tx_msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
- tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
- tx_msdu_info.htt.action.do_tx_complete =
- pdev->tx_mgmt.callbacks[type].ota_ack_cb ? 1 : 0;
- /*
- * FIX THIS: l2_hdr_type should only specify L2 header type
- * The Peregrine/Rome HTT layer provides the FW with a "pkt type"
- * that is a combination of L2 header type and 802.11 frame type.
- * If the 802.11 frame type is "mgmt", then the HTT pkt type is "mgmt".
- * But if the 802.11 frame type is "data", then the HTT pkt type is
- * the L2 header type (more or less): 802.3 vs. Native WiFi
- * (basic 802.11).
- * (Or the header type can be "raw", which is any version of the 802.11
- * header, and also implies that some of the offloaded tx data
- * processing steps may not apply.)
- * For efficiency, the Peregrine/Rome HTT uses the msdu_info's
- * l2_hdr_type field to program the HTT pkt type. Thus, this txrx SW
- * needs to overload the l2_hdr_type to indicate whether the frame is
- * data vs. mgmt, as well as 802.3 L2 header vs. 802.11 L2 header.
- * To fix this, the msdu_info's l2_hdr_type should be left specifying
- * just the L2 header type. For mgmt frames, there should be a
- * separate function to patch the HTT pkt type to store a "mgmt" value
- * rather than the L2 header type. Then the HTT pkt type can be
- * programmed efficiently for data frames, and the msdu_info's
- * l2_hdr_type field won't be confusingly overloaded to hold the 802.11
- * frame type rather than the L2 header type.
- */
- /*
- * FIX THIS: remove duplication of htt_frm_type_mgmt and
- * htt_pkt_type_mgmt
- * The htt module expects a "enum htt_pkt_type" value.
- * The htt_dxe module expects a "enum htt_frm_type" value.
- * This needs to be cleaned up, so both versions of htt use a
- * consistent method of specifying the frame type.
- */
- #ifdef QCA_SUPPORT_INTEGRATED_SOC
- /* tx mgmt frames always come with a 802.11 header */
- tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
- tx_msdu_info.htt.info.frame_type = htt_frm_type_mgmt;
- #else
- tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_mgmt;
- tx_msdu_info.htt.info.frame_type = htt_pkt_type_mgmt;
- #endif
- tx_msdu_info.peer = NULL;
- cdf_nbuf_map_single(pdev->osdev, tx_mgmt_frm, CDF_DMA_TO_DEVICE);
- /* For LL tx_comp_req is not used so initialized to 0 */
- tx_msdu_info.htt.action.tx_comp_req = 0;
- tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, &tx_msdu_info);
- /* FIX THIS -
- * The FW currently has trouble using the host's fragments table
- * for management frames. Until this is fixed, rather than
- * specifying the fragment table to the FW, specify just the
- * address of the initial fragment.
- */
- #if defined(HELIUMPLUS_PADDR64)
- /* dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
- tx_desc); */
- #endif /* defined(HELIUMPLUS_PADDR64) */
- if (tx_desc) {
- /*
- * Following the call to ol_tx_desc_ll, frag 0 is the
- * HTT tx HW descriptor, and the frame payload is in
- * frag 1.
- */
- htt_tx_desc_frags_table_set(
- pdev->htt_pdev,
- tx_desc->htt_tx_desc,
- cdf_nbuf_get_frag_paddr_lo(tx_mgmt_frm, 1),
- 0, 0);
- #if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG)
- dump_frag_desc(
- "after htt_tx_desc_frags_table_set",
- tx_desc);
- #endif /* defined(HELIUMPLUS_PADDR64) */
- }
- if (!tx_desc) {
- cdf_nbuf_unmap_single(pdev->osdev, tx_mgmt_frm,
- CDF_DMA_TO_DEVICE);
- return -EINVAL; /* can't accept the tx mgmt frame */
- }
- TXRX_STATS_MSDU_INCR(pdev, tx.mgmt, tx_mgmt_frm);
- TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
- tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE;
- htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
- NBUF_SET_PACKET_TRACK(tx_desc->netbuf, NBUF_TX_PKT_MGMT_TRACK);
- ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
- htt_pkt_type_mgmt);
- return 0; /* accepted the tx mgmt frame */
- }
- void ol_txrx_sync(ol_txrx_pdev_handle pdev, uint8_t sync_cnt)
- {
- htt_h2t_sync_msg(pdev->htt_pdev, sync_cnt);
- }
- cdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
- cdf_nbuf_t msdu, uint16_t peer_id)
- {
- struct ol_tx_desc_t *tx_desc;
- struct ol_txrx_msdu_info_t msdu_info;
- msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
- msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_INVALID;
- msdu_info.peer = NULL;
- msdu_info.htt.action.tx_comp_req = 0;
- msdu_info.tso_info.is_tso = 0;
- ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
- HTT_TX_DESC_POSTPONED_SET(*((uint32_t *) (tx_desc->htt_tx_desc)), true);
- htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id);
- ol_tx_send(vdev->pdev, tx_desc, msdu);
- return NULL;
- }
- #if defined(FEATURE_TSO)
- void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
- {
- int i;
- struct cdf_tso_seg_elem_t *c_element;
- c_element = cdf_mem_malloc(sizeof(struct cdf_tso_seg_elem_t));
- pdev->tso_seg_pool.freelist = c_element;
- for (i = 0; i < (num_seg - 1); i++) {
- c_element->next =
- cdf_mem_malloc(sizeof(struct cdf_tso_seg_elem_t));
- c_element = c_element->next;
- c_element->next = NULL;
- }
- pdev->tso_seg_pool.pool_size = num_seg;
- cdf_spinlock_init(&pdev->tso_seg_pool.tso_mutex);
- }
- void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
- {
- int i;
- struct cdf_tso_seg_elem_t *c_element;
- struct cdf_tso_seg_elem_t *temp;
- cdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
- c_element = pdev->tso_seg_pool.freelist;
- for (i = 0; i < pdev->tso_seg_pool.pool_size; i++) {
- temp = c_element->next;
- cdf_mem_free(c_element);
- c_element = temp;
- if (!c_element)
- break;
- }
- pdev->tso_seg_pool.freelist = NULL;
- pdev->tso_seg_pool.num_free = 0;
- pdev->tso_seg_pool.pool_size = 0;
- cdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
- cdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex);
- }
- #endif /* FEATURE_TSO */
|