dp_tx.c 95 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508
  1. /*
  2. * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "htt.h"
  19. #include "dp_tx.h"
  20. #include "dp_tx_desc.h"
  21. #include "dp_peer.h"
  22. #include "dp_types.h"
  23. #include "hal_tx.h"
  24. #include "qdf_mem.h"
  25. #include "qdf_nbuf.h"
  26. #include "qdf_net_types.h"
  27. #include <wlan_cfg.h>
  28. #ifdef MESH_MODE_SUPPORT
  29. #include "if_meta_hdr.h"
  30. #endif
  31. #ifdef TX_PER_PDEV_DESC_POOL
  32. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  33. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
  34. #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
  35. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
  36. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  37. #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
  38. #else
  39. #ifdef TX_PER_VDEV_DESC_POOL
  40. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
  41. #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
  42. #else
  43. #define DP_TX_GET_DESC_POOL_ID(vdev) qdf_get_cpu()
  44. #define DP_TX_GET_RING_ID(vdev) vdev->pdev->soc->tx_ring_map[qdf_get_cpu()]
  45. #endif /* TX_PER_VDEV_DESC_POOL */
  46. #endif /* TX_PER_PDEV_DESC_POOL */
  47. /* TODO Add support in TSO */
  48. #define DP_DESC_NUM_FRAG(x) 0
  49. /* disable TQM_BYPASS */
  50. #define TQM_BYPASS_WAR 0
  51. /* invalid peer id for reinject*/
  52. #define DP_INVALID_PEER 0XFFFE
  53. /*mapping between hal encrypt type and cdp_sec_type*/
  54. #define MAX_CDP_SEC_TYPE 12
  55. static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
  56. HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
  57. HAL_TX_ENCRYPT_TYPE_WEP_128,
  58. HAL_TX_ENCRYPT_TYPE_WEP_104,
  59. HAL_TX_ENCRYPT_TYPE_WEP_40,
  60. HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
  61. HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
  62. HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
  63. HAL_TX_ENCRYPT_TYPE_WAPI,
  64. HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
  65. HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
  66. HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
  67. HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
  68. /**
  69. * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
  70. * @vdev: DP Virtual device handle
  71. * @nbuf: Buffer pointer
  72. * @queue: queue ids container for nbuf
  73. *
  74. * TX packet queue has 2 instances, software descriptors id and dma ring id
  75. * Based on tx feature and hardware configuration queue id combination could be
  76. * different.
  77. * For example -
  78. * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
  79. * With no XPS,lock based resource protection, Descriptor pool ids are different
  80. * for each vdev, dma ring id will be same as single pdev id
  81. *
  82. * Return: None
  83. */
  84. static inline void dp_tx_get_queue(struct dp_vdev *vdev,
  85. qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
  86. {
  87. /* get flow id */
  88. queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
  89. queue->ring_id = DP_TX_GET_RING_ID(vdev);
  90. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  91. "%s, pool_id:%d ring_id: %d",
  92. __func__, queue->desc_pool_id, queue->ring_id);
  93. return;
  94. }
  95. #if defined(FEATURE_TSO)
  96. /**
  97. * dp_tx_tso_desc_release() - Release the tso segment
  98. * after unmapping all the fragments
  99. *
  100. * @pdev - physical device handle
  101. * @tx_desc - Tx software descriptor
  102. */
  103. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  104. struct dp_tx_desc_s *tx_desc)
  105. {
  106. TSO_DEBUG("%s: Free the tso descriptor", __func__);
  107. if (qdf_unlikely(tx_desc->tso_desc == NULL)) {
  108. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  109. "%s %d TSO desc is NULL!",
  110. __func__, __LINE__);
  111. qdf_assert(0);
  112. } else if (qdf_unlikely(tx_desc->tso_num_desc == NULL)) {
  113. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  114. "%s %d TSO common info is NULL!",
  115. __func__, __LINE__);
  116. qdf_assert(0);
  117. } else {
  118. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  119. (struct qdf_tso_num_seg_elem_t *) tx_desc->tso_num_desc;
  120. if (tso_num_desc->num_seg.tso_cmn_num_seg > 1) {
  121. tso_num_desc->num_seg.tso_cmn_num_seg--;
  122. qdf_nbuf_unmap_tso_segment(soc->osdev,
  123. tx_desc->tso_desc, false);
  124. } else {
  125. tso_num_desc->num_seg.tso_cmn_num_seg--;
  126. qdf_assert(tso_num_desc->num_seg.tso_cmn_num_seg == 0);
  127. qdf_nbuf_unmap_tso_segment(soc->osdev,
  128. tx_desc->tso_desc, true);
  129. dp_tso_num_seg_free(soc, tx_desc->pool_id,
  130. tx_desc->tso_num_desc);
  131. tx_desc->tso_num_desc = NULL;
  132. }
  133. dp_tx_tso_desc_free(soc,
  134. tx_desc->pool_id, tx_desc->tso_desc);
  135. tx_desc->tso_desc = NULL;
  136. }
  137. }
  138. #else
  139. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  140. struct dp_tx_desc_s *tx_desc)
  141. {
  142. return;
  143. }
  144. #endif
  145. /**
  146. * dp_tx_desc_release() - Release Tx Descriptor
  147. * @tx_desc : Tx Descriptor
  148. * @desc_pool_id: Descriptor Pool ID
  149. *
  150. * Deallocate all resources attached to Tx descriptor and free the Tx
  151. * descriptor.
  152. *
  153. * Return:
  154. */
  155. static void
  156. dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
  157. {
  158. struct dp_pdev *pdev = tx_desc->pdev;
  159. struct dp_soc *soc;
  160. uint8_t comp_status = 0;
  161. qdf_assert(pdev);
  162. soc = pdev->soc;
  163. if (tx_desc->frm_type == dp_tx_frm_tso)
  164. dp_tx_tso_desc_release(soc, tx_desc);
  165. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
  166. dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
  167. if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
  168. dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
  169. qdf_atomic_dec(&pdev->num_tx_outstanding);
  170. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  171. qdf_atomic_dec(&pdev->num_tx_exception);
  172. if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
  173. hal_tx_comp_get_buffer_source(&tx_desc->comp))
  174. comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp);
  175. else
  176. comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
  177. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  178. "Tx Completion Release desc %d status %d outstanding %d",
  179. tx_desc->id, comp_status,
  180. qdf_atomic_read(&pdev->num_tx_outstanding));
  181. dp_tx_desc_free(soc, tx_desc, desc_pool_id);
  182. return;
  183. }
  184. /**
  185. * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
  186. * @vdev: DP vdev Handle
  187. * @nbuf: skb
  188. *
  189. * Prepares and fills HTT metadata in the frame pre-header for special frames
  190. * that should be transmitted using varying transmit parameters.
  191. * There are 2 VDEV modes that currently needs this special metadata -
  192. * 1) Mesh Mode
  193. * 2) DSRC Mode
  194. *
  195. * Return: HTT metadata size
  196. *
  197. */
  198. static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  199. uint32_t *meta_data)
  200. {
  201. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  202. (struct htt_tx_msdu_desc_ext2_t *) meta_data;
  203. uint8_t htt_desc_size;
  204. /* Size rounded of multiple of 8 bytes */
  205. uint8_t htt_desc_size_aligned;
  206. uint8_t *hdr = NULL;
  207. HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 1);
  208. /*
  209. * Metadata - HTT MSDU Extension header
  210. */
  211. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  212. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  213. if (vdev->mesh_vdev) {
  214. /* Fill and add HTT metaheader */
  215. hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
  216. if (hdr == NULL) {
  217. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  218. "Error in filling HTT metadata\n");
  219. return 0;
  220. }
  221. qdf_mem_copy(hdr, desc_ext, htt_desc_size);
  222. } else if (vdev->opmode == wlan_op_mode_ocb) {
  223. /* Todo - Add support for DSRC */
  224. }
  225. return htt_desc_size_aligned;
  226. }
  227. /**
  228. * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
  229. * @tso_seg: TSO segment to process
  230. * @ext_desc: Pointer to MSDU extension descriptor
  231. *
  232. * Return: void
  233. */
  234. #if defined(FEATURE_TSO)
  235. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  236. void *ext_desc)
  237. {
  238. uint8_t num_frag;
  239. uint32_t tso_flags;
  240. /*
  241. * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
  242. * tcp_flag_mask
  243. *
  244. * Checksum enable flags are set in TCL descriptor and not in Extension
  245. * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
  246. */
  247. tso_flags = *(uint32_t *) &tso_seg->tso_flags;
  248. hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
  249. hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
  250. tso_seg->tso_flags.ip_len);
  251. hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
  252. hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
  253. for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
  254. uint32_t lo = 0;
  255. uint32_t hi = 0;
  256. qdf_dmaaddr_to_32s(
  257. tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
  258. hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
  259. tso_seg->tso_frags[num_frag].length);
  260. }
  261. return;
  262. }
  263. #else
  264. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  265. void *ext_desc)
  266. {
  267. return;
  268. }
  269. #endif
  270. #if defined(FEATURE_TSO)
  271. /**
  272. * dp_tx_free_tso_seg() - Loop through the tso segments
  273. * allocated and free them
  274. *
  275. * @soc: soc handle
  276. * @free_seg: list of tso segments
  277. * @msdu_info: msdu descriptor
  278. *
  279. * Return - void
  280. */
  281. static void dp_tx_free_tso_seg(struct dp_soc *soc,
  282. struct qdf_tso_seg_elem_t *free_seg,
  283. struct dp_tx_msdu_info_s *msdu_info)
  284. {
  285. struct qdf_tso_seg_elem_t *next_seg;
  286. while (free_seg) {
  287. next_seg = free_seg->next;
  288. dp_tx_tso_desc_free(soc,
  289. msdu_info->tx_queue.desc_pool_id,
  290. free_seg);
  291. free_seg = next_seg;
  292. }
  293. }
  294. /**
  295. * dp_tx_free_tso_num_seg() - Loop through the tso num segments
  296. * allocated and free them
  297. *
  298. * @soc: soc handle
  299. * @free_seg: list of tso segments
  300. * @msdu_info: msdu descriptor
  301. * Return - void
  302. */
  303. static void dp_tx_free_tso_num_seg(struct dp_soc *soc,
  304. struct qdf_tso_num_seg_elem_t *free_seg,
  305. struct dp_tx_msdu_info_s *msdu_info)
  306. {
  307. struct qdf_tso_num_seg_elem_t *next_seg;
  308. while (free_seg) {
  309. next_seg = free_seg->next;
  310. dp_tso_num_seg_free(soc,
  311. msdu_info->tx_queue.desc_pool_id,
  312. free_seg);
  313. free_seg = next_seg;
  314. }
  315. }
  316. /**
  317. * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
  318. * @vdev: virtual device handle
  319. * @msdu: network buffer
  320. * @msdu_info: meta data associated with the msdu
  321. *
  322. * Return: QDF_STATUS_SUCCESS success
  323. */
  324. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  325. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  326. {
  327. struct qdf_tso_seg_elem_t *tso_seg;
  328. int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
  329. struct dp_soc *soc = vdev->pdev->soc;
  330. struct qdf_tso_info_t *tso_info;
  331. struct qdf_tso_num_seg_elem_t *tso_num_seg;
  332. tso_info = &msdu_info->u.tso_info;
  333. tso_info->curr_seg = NULL;
  334. tso_info->tso_seg_list = NULL;
  335. tso_info->num_segs = num_seg;
  336. msdu_info->frm_type = dp_tx_frm_tso;
  337. tso_info->tso_num_seg_list = NULL;
  338. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  339. while (num_seg) {
  340. tso_seg = dp_tx_tso_desc_alloc(
  341. soc, msdu_info->tx_queue.desc_pool_id);
  342. if (tso_seg) {
  343. tso_seg->next = tso_info->tso_seg_list;
  344. tso_info->tso_seg_list = tso_seg;
  345. num_seg--;
  346. } else {
  347. struct qdf_tso_seg_elem_t *free_seg =
  348. tso_info->tso_seg_list;
  349. dp_tx_free_tso_seg(soc, free_seg, msdu_info);
  350. return QDF_STATUS_E_NOMEM;
  351. }
  352. }
  353. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  354. tso_num_seg = dp_tso_num_seg_alloc(soc,
  355. msdu_info->tx_queue.desc_pool_id);
  356. if (tso_num_seg) {
  357. tso_num_seg->next = tso_info->tso_num_seg_list;
  358. tso_info->tso_num_seg_list = tso_num_seg;
  359. } else {
  360. /* Bug: free tso_num_seg and tso_seg */
  361. /* Free the already allocated num of segments */
  362. struct qdf_tso_seg_elem_t *free_seg =
  363. tso_info->tso_seg_list;
  364. TSO_DEBUG(" %s: Failed alloc - Number of segs for a TSO packet",
  365. __func__);
  366. dp_tx_free_tso_seg(soc, free_seg, msdu_info);
  367. return QDF_STATUS_E_NOMEM;
  368. }
  369. msdu_info->num_seg =
  370. qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
  371. TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
  372. msdu_info->num_seg);
  373. if (!(msdu_info->num_seg)) {
  374. dp_tx_free_tso_seg(soc, tso_info->tso_seg_list, msdu_info);
  375. dp_tx_free_tso_num_seg(soc, tso_info->tso_num_seg_list,
  376. msdu_info);
  377. return QDF_STATUS_E_INVAL;
  378. }
  379. tso_info->curr_seg = tso_info->tso_seg_list;
  380. return QDF_STATUS_SUCCESS;
  381. }
  382. #else
  383. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  384. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  385. {
  386. return QDF_STATUS_E_NOMEM;
  387. }
  388. #endif
  389. /**
  390. * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
  391. * @vdev: DP Vdev handle
  392. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  393. * @desc_pool_id: Descriptor Pool ID
  394. *
  395. * Return:
  396. */
  397. static
  398. struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
  399. struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
  400. {
  401. uint8_t i;
  402. uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
  403. struct dp_tx_seg_info_s *seg_info;
  404. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  405. struct dp_soc *soc = vdev->pdev->soc;
  406. /* Allocate an extension descriptor */
  407. msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
  408. qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
  409. if (!msdu_ext_desc) {
  410. DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
  411. return NULL;
  412. }
  413. if (msdu_info->exception_fw &&
  414. qdf_unlikely(vdev->mesh_vdev)) {
  415. qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
  416. &msdu_info->meta_data[0],
  417. sizeof(struct htt_tx_msdu_desc_ext2_t));
  418. qdf_atomic_inc(&vdev->pdev->num_tx_exception);
  419. HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 1);
  420. }
  421. switch (msdu_info->frm_type) {
  422. case dp_tx_frm_sg:
  423. case dp_tx_frm_me:
  424. case dp_tx_frm_raw:
  425. seg_info = msdu_info->u.sg_info.curr_seg;
  426. /* Update the buffer pointers in MSDU Extension Descriptor */
  427. for (i = 0; i < seg_info->frag_cnt; i++) {
  428. hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
  429. seg_info->frags[i].paddr_lo,
  430. seg_info->frags[i].paddr_hi,
  431. seg_info->frags[i].len);
  432. }
  433. break;
  434. case dp_tx_frm_tso:
  435. dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
  436. &cached_ext_desc[0]);
  437. break;
  438. default:
  439. break;
  440. }
  441. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  442. cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
  443. hal_tx_ext_desc_sync(&cached_ext_desc[0],
  444. msdu_ext_desc->vaddr);
  445. return msdu_ext_desc;
  446. }
  447. /**
  448. * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
  449. * @vdev: DP vdev handle
  450. * @nbuf: skb
  451. * @desc_pool_id: Descriptor pool ID
  452. * @meta_data: Metadata to the fw
  453. * @tx_exc_metadata: Handle that holds exception path metadata
  454. * Allocate and prepare Tx descriptor with msdu information.
  455. *
  456. * Return: Pointer to Tx Descriptor on success,
  457. * NULL on failure
  458. */
  459. static
  460. struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
  461. qdf_nbuf_t nbuf, uint8_t desc_pool_id,
  462. struct dp_tx_msdu_info_s *msdu_info,
  463. struct cdp_tx_exception_metadata *tx_exc_metadata)
  464. {
  465. uint8_t align_pad;
  466. uint8_t is_exception = 0;
  467. uint8_t htt_hdr_size;
  468. struct ether_header *eh;
  469. struct dp_tx_desc_s *tx_desc;
  470. struct dp_pdev *pdev = vdev->pdev;
  471. struct dp_soc *soc = pdev->soc;
  472. /* Allocate software Tx descriptor */
  473. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  474. if (qdf_unlikely(!tx_desc)) {
  475. DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
  476. return NULL;
  477. }
  478. /* Flow control/Congestion Control counters */
  479. qdf_atomic_inc(&pdev->num_tx_outstanding);
  480. /* Initialize the SW tx descriptor */
  481. tx_desc->nbuf = nbuf;
  482. tx_desc->frm_type = dp_tx_frm_std;
  483. tx_desc->tx_encap_type = (tx_exc_metadata ?
  484. tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
  485. tx_desc->vdev = vdev;
  486. tx_desc->pdev = pdev;
  487. tx_desc->msdu_ext_desc = NULL;
  488. tx_desc->pkt_offset = 0;
  489. /*
  490. * For special modes (vdev_type == ocb or mesh), data frames should be
  491. * transmitted using varying transmit parameters (tx spec) which include
  492. * transmit rate, power, priority, channel, channel bandwidth , nss etc.
  493. * These are filled in HTT MSDU descriptor and sent in frame pre-header.
  494. * These frames are sent as exception packets to firmware.
  495. *
  496. * HW requirement is that metadata should always point to a
  497. * 8-byte aligned address. So we add alignment pad to start of buffer.
  498. * HTT Metadata should be ensured to be multiple of 8-bytes,
  499. * to get 8-byte aligned start address along with align_pad added
  500. *
  501. * |-----------------------------|
  502. * | |
  503. * |-----------------------------| <-----Buffer Pointer Address given
  504. * | | ^ in HW descriptor (aligned)
  505. * | HTT Metadata | |
  506. * | | |
  507. * | | | Packet Offset given in descriptor
  508. * | | |
  509. * |-----------------------------| |
  510. * | Alignment Pad | v
  511. * |-----------------------------| <----- Actual buffer start address
  512. * | SKB Data | (Unaligned)
  513. * | |
  514. * | |
  515. * | |
  516. * | |
  517. * | |
  518. * |-----------------------------|
  519. */
  520. if (qdf_unlikely((msdu_info->exception_fw)) ||
  521. (vdev->opmode == wlan_op_mode_ocb)) {
  522. align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
  523. if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
  524. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  525. "qdf_nbuf_push_head failed\n");
  526. goto failure;
  527. }
  528. htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
  529. msdu_info->meta_data);
  530. if (htt_hdr_size == 0)
  531. goto failure;
  532. tx_desc->pkt_offset = align_pad + htt_hdr_size;
  533. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  534. is_exception = 1;
  535. }
  536. if (qdf_unlikely(QDF_STATUS_SUCCESS !=
  537. qdf_nbuf_map(soc->osdev, nbuf,
  538. QDF_DMA_TO_DEVICE))) {
  539. /* Handle failure */
  540. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  541. "qdf_nbuf_map failed\n");
  542. DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
  543. goto failure;
  544. }
  545. if (qdf_unlikely(vdev->nawds_enabled)) {
  546. eh = (struct ether_header *) qdf_nbuf_data(nbuf);
  547. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  548. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  549. is_exception = 1;
  550. }
  551. }
  552. #if !TQM_BYPASS_WAR
  553. if (is_exception || tx_exc_metadata)
  554. #endif
  555. {
  556. /* Temporary WAR due to TQM VP issues */
  557. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  558. qdf_atomic_inc(&pdev->num_tx_exception);
  559. }
  560. return tx_desc;
  561. failure:
  562. dp_tx_desc_release(tx_desc, desc_pool_id);
  563. return NULL;
  564. }
  565. /**
  566. * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
  567. * @vdev: DP vdev handle
  568. * @nbuf: skb
  569. * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
  570. * @desc_pool_id : Descriptor Pool ID
  571. *
  572. * Allocate and prepare Tx descriptor with msdu and fragment descritor
  573. * information. For frames wth fragments, allocate and prepare
  574. * an MSDU extension descriptor
  575. *
  576. * Return: Pointer to Tx Descriptor on success,
  577. * NULL on failure
  578. */
  579. static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
  580. qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
  581. uint8_t desc_pool_id)
  582. {
  583. struct dp_tx_desc_s *tx_desc;
  584. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  585. struct dp_pdev *pdev = vdev->pdev;
  586. struct dp_soc *soc = pdev->soc;
  587. /* Allocate software Tx descriptor */
  588. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  589. if (!tx_desc) {
  590. DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
  591. return NULL;
  592. }
  593. /* Flow control/Congestion Control counters */
  594. qdf_atomic_inc(&pdev->num_tx_outstanding);
  595. /* Initialize the SW tx descriptor */
  596. tx_desc->nbuf = nbuf;
  597. tx_desc->frm_type = msdu_info->frm_type;
  598. tx_desc->tx_encap_type = vdev->tx_encap_type;
  599. tx_desc->vdev = vdev;
  600. tx_desc->pdev = pdev;
  601. tx_desc->pkt_offset = 0;
  602. tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
  603. tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
  604. /* Handle scattered frames - TSO/SG/ME */
  605. /* Allocate and prepare an extension descriptor for scattered frames */
  606. msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
  607. if (!msdu_ext_desc) {
  608. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  609. "%s Tx Extension Descriptor Alloc Fail\n",
  610. __func__);
  611. goto failure;
  612. }
  613. #if TQM_BYPASS_WAR
  614. /* Temporary WAR due to TQM VP issues */
  615. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  616. qdf_atomic_inc(&pdev->num_tx_exception);
  617. #endif
  618. if (qdf_unlikely(msdu_info->exception_fw))
  619. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  620. tx_desc->msdu_ext_desc = msdu_ext_desc;
  621. tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
  622. return tx_desc;
  623. failure:
  624. dp_tx_desc_release(tx_desc, desc_pool_id);
  625. return NULL;
  626. }
  627. /**
  628. * dp_tx_prepare_raw() - Prepare RAW packet TX
  629. * @vdev: DP vdev handle
  630. * @nbuf: buffer pointer
  631. * @seg_info: Pointer to Segment info Descriptor to be prepared
  632. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
  633. * descriptor
  634. *
  635. * Return:
  636. */
  637. static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  638. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  639. {
  640. qdf_nbuf_t curr_nbuf = NULL;
  641. uint16_t total_len = 0;
  642. qdf_dma_addr_t paddr;
  643. int32_t i;
  644. int32_t mapped_buf_num = 0;
  645. struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
  646. qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  647. DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
  648. /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
  649. if (qos_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)
  650. qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
  651. for (curr_nbuf = nbuf, i = 0; curr_nbuf;
  652. curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
  653. if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf,
  654. QDF_DMA_TO_DEVICE)) {
  655. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  656. "%s dma map error \n", __func__);
  657. DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
  658. mapped_buf_num = i;
  659. goto error;
  660. }
  661. paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
  662. seg_info->frags[i].paddr_lo = paddr;
  663. seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
  664. seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
  665. seg_info->frags[i].vaddr = (void *) curr_nbuf;
  666. total_len += qdf_nbuf_len(curr_nbuf);
  667. }
  668. seg_info->frag_cnt = i;
  669. seg_info->total_len = total_len;
  670. seg_info->next = NULL;
  671. sg_info->curr_seg = seg_info;
  672. msdu_info->frm_type = dp_tx_frm_raw;
  673. msdu_info->num_seg = 1;
  674. return nbuf;
  675. error:
  676. i = 0;
  677. while (nbuf) {
  678. curr_nbuf = nbuf;
  679. if (i < mapped_buf_num) {
  680. qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE);
  681. i++;
  682. }
  683. nbuf = qdf_nbuf_next(nbuf);
  684. qdf_nbuf_free(curr_nbuf);
  685. }
  686. return NULL;
  687. }
  688. /**
  689. * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
  690. * @soc: DP Soc Handle
  691. * @vdev: DP vdev handle
  692. * @tx_desc: Tx Descriptor Handle
  693. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  694. * @fw_metadata: Metadata to send to Target Firmware along with frame
  695. * @ring_id: Ring ID of H/W ring to which we enqueue the packet
  696. * @tx_exc_metadata: Handle that holds exception path meta data
  697. *
  698. * Gets the next free TCL HW DMA descriptor and sets up required parameters
  699. * from software Tx descriptor
  700. *
  701. * Return:
  702. */
  703. static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
  704. struct dp_tx_desc_s *tx_desc, uint8_t tid,
  705. uint16_t fw_metadata, uint8_t ring_id,
  706. struct cdp_tx_exception_metadata
  707. *tx_exc_metadata)
  708. {
  709. uint8_t type;
  710. uint16_t length;
  711. void *hal_tx_desc, *hal_tx_desc_cached;
  712. qdf_dma_addr_t dma_addr;
  713. uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
  714. enum cdp_sec_type sec_type = (tx_exc_metadata ?
  715. tx_exc_metadata->sec_type : vdev->sec_type);
  716. /* Return Buffer Manager ID */
  717. uint8_t bm_id = ring_id;
  718. void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
  719. hal_tx_desc_cached = (void *) cached_desc;
  720. qdf_mem_zero_outline(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
  721. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
  722. length = HAL_TX_EXT_DESC_WITH_META_DATA;
  723. type = HAL_TX_BUF_TYPE_EXT_DESC;
  724. dma_addr = tx_desc->msdu_ext_desc->paddr;
  725. } else {
  726. length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
  727. type = HAL_TX_BUF_TYPE_BUFFER;
  728. dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
  729. }
  730. hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
  731. hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
  732. dma_addr , bm_id, tx_desc->id, type);
  733. hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
  734. hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
  735. hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
  736. hal_tx_desc_set_lmac_id(hal_tx_desc_cached,
  737. HAL_TX_DESC_DEFAULT_LMAC_ID);
  738. hal_tx_desc_set_dscp_tid_table_id(hal_tx_desc_cached,
  739. vdev->dscp_tid_map_id);
  740. hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
  741. sec_type_map[sec_type]);
  742. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  743. "%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
  744. __func__, length, type, (uint64_t)dma_addr,
  745. tx_desc->pkt_offset, tx_desc->id);
  746. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  747. hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
  748. hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
  749. vdev->hal_desc_addr_search_flags);
  750. /* verify checksum offload configuration*/
  751. if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
  752. ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
  753. || qdf_nbuf_is_tso(tx_desc->nbuf))) {
  754. hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
  755. hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
  756. }
  757. if (tid != HTT_TX_EXT_TID_INVALID)
  758. hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
  759. if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
  760. hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
  761. /* Sync cached descriptor with HW */
  762. hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
  763. if (!hal_tx_desc) {
  764. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  765. "%s TCL ring full ring_id:%d\n", __func__, ring_id);
  766. DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
  767. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  768. return QDF_STATUS_E_RESOURCES;
  769. }
  770. tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
  771. hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
  772. DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
  773. /*
  774. * If one packet is enqueued in HW, PM usage count needs to be
  775. * incremented by one to prevent future runtime suspend. This
  776. * should be tied with the success of enqueuing. It will be
  777. * decremented after the packet has been sent.
  778. */
  779. hif_pm_runtime_get_noresume(soc->hif_handle);
  780. return QDF_STATUS_SUCCESS;
  781. }
  782. /**
  783. * dp_cce_classify() - Classify the frame based on CCE rules
  784. * @vdev: DP vdev handle
  785. * @nbuf: skb
  786. *
  787. * Classify frames based on CCE rules
  788. * Return: bool( true if classified,
  789. * else false)
  790. */
  791. static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  792. {
  793. struct ether_header *eh = NULL;
  794. uint16_t ether_type;
  795. qdf_llc_t *llcHdr;
  796. qdf_nbuf_t nbuf_clone = NULL;
  797. qdf_dot3_qosframe_t *qos_wh = NULL;
  798. /* for mesh packets don't do any classification */
  799. if (qdf_unlikely(vdev->mesh_vdev))
  800. return false;
  801. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  802. eh = (struct ether_header *) qdf_nbuf_data(nbuf);
  803. ether_type = eh->ether_type;
  804. llcHdr = (qdf_llc_t *)(nbuf->data +
  805. sizeof(struct ether_header));
  806. } else {
  807. qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  808. if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
  809. if (qdf_unlikely(
  810. qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
  811. qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
  812. ether_type = *(uint16_t *)(nbuf->data
  813. + QDF_IEEE80211_4ADDR_HDR_LEN
  814. + sizeof(qdf_llc_t)
  815. - sizeof(ether_type));
  816. llcHdr = (qdf_llc_t *)(nbuf->data +
  817. QDF_IEEE80211_4ADDR_HDR_LEN);
  818. } else {
  819. ether_type = *(uint16_t *)(nbuf->data
  820. + QDF_IEEE80211_3ADDR_HDR_LEN
  821. + sizeof(qdf_llc_t)
  822. - sizeof(ether_type));
  823. llcHdr = (qdf_llc_t *)(nbuf->data +
  824. QDF_IEEE80211_3ADDR_HDR_LEN);
  825. }
  826. if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
  827. && (ether_type ==
  828. qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
  829. DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
  830. return true;
  831. }
  832. }
  833. return false;
  834. }
  835. if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
  836. ether_type = *(uint16_t *)(nbuf->data + 2*ETHER_ADDR_LEN +
  837. sizeof(*llcHdr));
  838. nbuf_clone = qdf_nbuf_clone(nbuf);
  839. qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
  840. if (ether_type == htons(ETHERTYPE_8021Q)) {
  841. qdf_nbuf_pull_head(nbuf_clone,
  842. sizeof(qdf_net_vlanhdr_t));
  843. }
  844. } else {
  845. if (ether_type == htons(ETHERTYPE_8021Q)) {
  846. nbuf_clone = qdf_nbuf_clone(nbuf);
  847. qdf_nbuf_pull_head(nbuf_clone,
  848. sizeof(qdf_net_vlanhdr_t));
  849. }
  850. }
  851. if (qdf_unlikely(nbuf_clone))
  852. nbuf = nbuf_clone;
  853. if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
  854. || qdf_nbuf_is_ipv4_arp_pkt(nbuf)
  855. || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
  856. || qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
  857. || (qdf_nbuf_is_ipv4_pkt(nbuf)
  858. && qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
  859. || (qdf_nbuf_is_ipv6_pkt(nbuf) &&
  860. qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
  861. if (qdf_unlikely(nbuf_clone != NULL))
  862. qdf_nbuf_free(nbuf_clone);
  863. return true;
  864. }
  865. if (qdf_unlikely(nbuf_clone != NULL))
  866. qdf_nbuf_free(nbuf_clone);
  867. return false;
  868. }
  869. /**
  870. * dp_tx_classify_tid() - Obtain TID to be used for this frame
  871. * @vdev: DP vdev handle
  872. * @nbuf: skb
  873. *
  874. * Extract the DSCP or PCP information from frame and map into TID value.
  875. * Software based TID classification is required when more than 2 DSCP-TID
  876. * mapping tables are needed.
  877. * Hardware supports 2 DSCP-TID mapping tables
  878. *
  879. * Return: void
  880. */
  881. static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  882. struct dp_tx_msdu_info_s *msdu_info)
  883. {
  884. uint8_t tos = 0, dscp_tid_override = 0;
  885. uint8_t *hdr_ptr, *L3datap;
  886. uint8_t is_mcast = 0;
  887. struct ether_header *eh = NULL;
  888. qdf_ethervlan_header_t *evh = NULL;
  889. uint16_t ether_type;
  890. qdf_llc_t *llcHdr;
  891. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  892. /* for mesh packets don't do any classification */
  893. if (qdf_unlikely(vdev->mesh_vdev))
  894. return;
  895. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  896. eh = (struct ether_header *) nbuf->data;
  897. hdr_ptr = eh->ether_dhost;
  898. L3datap = hdr_ptr + sizeof(struct ether_header);
  899. } else {
  900. qdf_dot3_qosframe_t *qos_wh =
  901. (qdf_dot3_qosframe_t *) nbuf->data;
  902. msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
  903. qos_wh->i_qos[0] & DP_QOS_TID : 0;
  904. return;
  905. }
  906. is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
  907. ether_type = eh->ether_type;
  908. llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(struct ether_header));
  909. /*
  910. * Check if packet is dot3 or eth2 type.
  911. */
  912. if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
  913. ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN +
  914. sizeof(*llcHdr));
  915. if (ether_type == htons(ETHERTYPE_8021Q)) {
  916. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
  917. sizeof(*llcHdr);
  918. ether_type = (uint16_t)*(nbuf->data + 2*ETHER_ADDR_LEN
  919. + sizeof(*llcHdr) +
  920. sizeof(qdf_net_vlanhdr_t));
  921. } else {
  922. L3datap = hdr_ptr + sizeof(struct ether_header) +
  923. sizeof(*llcHdr);
  924. }
  925. } else {
  926. if (ether_type == htons(ETHERTYPE_8021Q)) {
  927. evh = (qdf_ethervlan_header_t *) eh;
  928. ether_type = evh->ether_type;
  929. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
  930. }
  931. }
  932. /*
  933. * Find priority from IP TOS DSCP field
  934. */
  935. if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
  936. qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
  937. if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
  938. /* Only for unicast frames */
  939. if (!is_mcast) {
  940. /* send it on VO queue */
  941. msdu_info->tid = DP_VO_TID;
  942. }
  943. } else {
  944. /*
  945. * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
  946. * from TOS byte.
  947. */
  948. tos = ip->ip_tos;
  949. dscp_tid_override = 1;
  950. }
  951. } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
  952. /* TODO
  953. * use flowlabel
  954. *igmpmld cases to be handled in phase 2
  955. */
  956. unsigned long ver_pri_flowlabel;
  957. unsigned long pri;
  958. ver_pri_flowlabel = *(unsigned long *) L3datap;
  959. pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
  960. DP_IPV6_PRIORITY_SHIFT;
  961. tos = pri;
  962. dscp_tid_override = 1;
  963. } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
  964. msdu_info->tid = DP_VO_TID;
  965. else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
  966. /* Only for unicast frames */
  967. if (!is_mcast) {
  968. /* send ucast arp on VO queue */
  969. msdu_info->tid = DP_VO_TID;
  970. }
  971. }
  972. /*
  973. * Assign all MCAST packets to BE
  974. */
  975. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  976. if (is_mcast) {
  977. tos = 0;
  978. dscp_tid_override = 1;
  979. }
  980. }
  981. if (dscp_tid_override == 1) {
  982. tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  983. msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
  984. }
  985. return;
  986. }
  987. #ifdef CONVERGED_TDLS_ENABLE
  988. /**
  989. * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
  990. * @tx_desc: TX descriptor
  991. *
  992. * Return: None
  993. */
  994. static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
  995. {
  996. if (tx_desc->vdev) {
  997. if (tx_desc->vdev->is_tdls_frame)
  998. tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
  999. tx_desc->vdev->is_tdls_frame = false;
  1000. }
  1001. }
  1002. /**
  1003. * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
  1004. * @tx_desc: TX descriptor
  1005. * @vdev: datapath vdev handle
  1006. *
  1007. * Return: None
  1008. */
  1009. static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
  1010. struct dp_vdev *vdev)
  1011. {
  1012. struct hal_tx_completion_status ts = {0};
  1013. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1014. hal_tx_comp_get_status(&tx_desc->comp, &ts);
  1015. if (vdev->tx_non_std_data_callback.func) {
  1016. qdf_nbuf_set_next(tx_desc->nbuf, NULL);
  1017. vdev->tx_non_std_data_callback.func(
  1018. vdev->tx_non_std_data_callback.ctxt,
  1019. nbuf, ts.status);
  1020. return;
  1021. }
  1022. }
  1023. #endif
  1024. /**
  1025. * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
  1026. * @vdev: DP vdev handle
  1027. * @nbuf: skb
  1028. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  1029. * @meta_data: Metadata to the fw
  1030. * @tx_q: Tx queue to be used for this Tx frame
  1031. * @peer_id: peer_id of the peer in case of NAWDS frames
  1032. * @tx_exc_metadata: Handle that holds exception path metadata
  1033. *
  1034. * Return: NULL on success,
  1035. * nbuf when it fails to send
  1036. */
  1037. static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1038. struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
  1039. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1040. {
  1041. struct dp_pdev *pdev = vdev->pdev;
  1042. struct dp_soc *soc = pdev->soc;
  1043. struct dp_tx_desc_s *tx_desc;
  1044. QDF_STATUS status;
  1045. struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
  1046. void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
  1047. uint16_t htt_tcl_metadata = 0;
  1048. uint8_t tid = msdu_info->tid;
  1049. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 0);
  1050. /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
  1051. tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
  1052. msdu_info, tx_exc_metadata);
  1053. if (!tx_desc) {
  1054. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1055. "%s Tx_desc prepare Fail vdev %pK queue %d\n",
  1056. __func__, vdev, tx_q->desc_pool_id);
  1057. return nbuf;
  1058. }
  1059. if (qdf_unlikely(soc->cce_disable)) {
  1060. if (dp_cce_classify(vdev, nbuf) == true) {
  1061. DP_STATS_INC(vdev, tx_i.cce_classified, 1);
  1062. tid = DP_VO_TID;
  1063. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1064. }
  1065. }
  1066. dp_tx_update_tdls_flags(tx_desc);
  1067. if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
  1068. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1069. "%s %d : HAL RING Access Failed -- %pK\n",
  1070. __func__, __LINE__, hal_srng);
  1071. DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
  1072. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1073. goto fail_return;
  1074. }
  1075. if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
  1076. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1077. HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
  1078. } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
  1079. HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
  1080. HTT_TCL_METADATA_TYPE_PEER_BASED);
  1081. HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
  1082. peer_id);
  1083. } else
  1084. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1085. /* Enqueue the Tx MSDU descriptor to HW for transmit */
  1086. status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
  1087. htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
  1088. if (status != QDF_STATUS_SUCCESS) {
  1089. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1090. "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n",
  1091. __func__, tx_desc, tx_q->ring_id);
  1092. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1093. goto fail_return;
  1094. }
  1095. nbuf = NULL;
  1096. fail_return:
  1097. if (hif_pm_runtime_get(soc->hif_handle) == 0) {
  1098. hal_srng_access_end(soc->hal_soc, hal_srng);
  1099. hif_pm_runtime_put(soc->hif_handle);
  1100. } else {
  1101. hal_srng_access_end_reap(soc->hal_soc, hal_srng);
  1102. }
  1103. return nbuf;
  1104. }
  1105. /**
  1106. * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
  1107. * @vdev: DP vdev handle
  1108. * @nbuf: skb
  1109. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  1110. *
  1111. * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
  1112. *
  1113. * Return: NULL on success,
  1114. * nbuf when it fails to send
  1115. */
  1116. #if QDF_LOCK_STATS
  1117. static noinline
  1118. #else
  1119. static
  1120. #endif
  1121. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1122. struct dp_tx_msdu_info_s *msdu_info)
  1123. {
  1124. uint8_t i;
  1125. struct dp_pdev *pdev = vdev->pdev;
  1126. struct dp_soc *soc = pdev->soc;
  1127. struct dp_tx_desc_s *tx_desc;
  1128. bool is_cce_classified = false;
  1129. QDF_STATUS status;
  1130. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  1131. void *hal_srng = soc->tcl_data_ring[tx_q->ring_id].hal_srng;
  1132. if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
  1133. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1134. "%s %d : HAL RING Access Failed -- %pK\n",
  1135. __func__, __LINE__, hal_srng);
  1136. DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
  1137. return nbuf;
  1138. }
  1139. if (qdf_unlikely(soc->cce_disable)) {
  1140. is_cce_classified = dp_cce_classify(vdev, nbuf);
  1141. if (is_cce_classified) {
  1142. DP_STATS_INC(vdev, tx_i.cce_classified, 1);
  1143. msdu_info->tid = DP_VO_TID;
  1144. }
  1145. }
  1146. if (msdu_info->frm_type == dp_tx_frm_me)
  1147. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  1148. i = 0;
  1149. /* Print statement to track i and num_seg */
  1150. /*
  1151. * For each segment (maps to 1 MSDU) , prepare software and hardware
  1152. * descriptors using information in msdu_info
  1153. */
  1154. while (i < msdu_info->num_seg) {
  1155. /*
  1156. * Setup Tx descriptor for an MSDU, and MSDU extension
  1157. * descriptor
  1158. */
  1159. tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
  1160. tx_q->desc_pool_id);
  1161. if (!tx_desc) {
  1162. if (msdu_info->frm_type == dp_tx_frm_me) {
  1163. dp_tx_me_free_buf(pdev,
  1164. (void *)(msdu_info->u.sg_info
  1165. .curr_seg->frags[0].vaddr));
  1166. }
  1167. goto done;
  1168. }
  1169. if (msdu_info->frm_type == dp_tx_frm_me) {
  1170. tx_desc->me_buffer =
  1171. msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
  1172. tx_desc->flags |= DP_TX_DESC_FLAG_ME;
  1173. }
  1174. if (is_cce_classified)
  1175. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1176. /*
  1177. * Enqueue the Tx MSDU descriptor to HW for transmit
  1178. */
  1179. status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
  1180. vdev->htt_tcl_metadata, tx_q->ring_id, NULL);
  1181. if (status != QDF_STATUS_SUCCESS) {
  1182. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1183. "%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n",
  1184. __func__, tx_desc, tx_q->ring_id);
  1185. if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
  1186. dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
  1187. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1188. goto done;
  1189. }
  1190. /*
  1191. * TODO
  1192. * if tso_info structure can be modified to have curr_seg
  1193. * as first element, following 2 blocks of code (for TSO and SG)
  1194. * can be combined into 1
  1195. */
  1196. /*
  1197. * For frames with multiple segments (TSO, ME), jump to next
  1198. * segment.
  1199. */
  1200. if (msdu_info->frm_type == dp_tx_frm_tso) {
  1201. if (msdu_info->u.tso_info.curr_seg->next) {
  1202. msdu_info->u.tso_info.curr_seg =
  1203. msdu_info->u.tso_info.curr_seg->next;
  1204. /*
  1205. * If this is a jumbo nbuf, then increment the number of
  1206. * nbuf users for each additional segment of the msdu.
  1207. * This will ensure that the skb is freed only after
  1208. * receiving tx completion for all segments of an nbuf
  1209. */
  1210. qdf_nbuf_inc_users(nbuf);
  1211. /* Check with MCL if this is needed */
  1212. /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
  1213. }
  1214. }
  1215. /*
  1216. * For Multicast-Unicast converted packets,
  1217. * each converted frame (for a client) is represented as
  1218. * 1 segment
  1219. */
  1220. if ((msdu_info->frm_type == dp_tx_frm_sg) ||
  1221. (msdu_info->frm_type == dp_tx_frm_me)) {
  1222. if (msdu_info->u.sg_info.curr_seg->next) {
  1223. msdu_info->u.sg_info.curr_seg =
  1224. msdu_info->u.sg_info.curr_seg->next;
  1225. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  1226. }
  1227. }
  1228. i++;
  1229. }
  1230. nbuf = NULL;
  1231. done:
  1232. if (hif_pm_runtime_get(soc->hif_handle) == 0) {
  1233. hal_srng_access_end(soc->hal_soc, hal_srng);
  1234. hif_pm_runtime_put(soc->hif_handle);
  1235. } else {
  1236. hal_srng_access_end_reap(soc->hal_soc, hal_srng);
  1237. }
  1238. return nbuf;
  1239. }
  1240. /**
  1241. * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
  1242. * for SG frames
  1243. * @vdev: DP vdev handle
  1244. * @nbuf: skb
  1245. * @seg_info: Pointer to Segment info Descriptor to be prepared
  1246. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  1247. *
  1248. * Return: NULL on success,
  1249. * nbuf when it fails to send
  1250. */
  1251. static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1252. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  1253. {
  1254. uint32_t cur_frag, nr_frags;
  1255. qdf_dma_addr_t paddr;
  1256. struct dp_tx_sg_info_s *sg_info;
  1257. sg_info = &msdu_info->u.sg_info;
  1258. nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  1259. if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
  1260. QDF_DMA_TO_DEVICE)) {
  1261. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1262. "dma map error\n");
  1263. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  1264. qdf_nbuf_free(nbuf);
  1265. return NULL;
  1266. }
  1267. paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
  1268. seg_info->frags[0].paddr_lo = paddr;
  1269. seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
  1270. seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
  1271. seg_info->frags[0].vaddr = (void *) nbuf;
  1272. for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
  1273. if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
  1274. nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
  1275. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1276. "frag dma map error\n");
  1277. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  1278. qdf_nbuf_free(nbuf);
  1279. return NULL;
  1280. }
  1281. paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
  1282. seg_info->frags[cur_frag + 1].paddr_lo = paddr;
  1283. seg_info->frags[cur_frag + 1].paddr_hi =
  1284. ((uint64_t) paddr) >> 32;
  1285. seg_info->frags[cur_frag + 1].len =
  1286. qdf_nbuf_get_frag_size(nbuf, cur_frag);
  1287. }
  1288. seg_info->frag_cnt = (cur_frag + 1);
  1289. seg_info->total_len = qdf_nbuf_len(nbuf);
  1290. seg_info->next = NULL;
  1291. sg_info->curr_seg = seg_info;
  1292. msdu_info->frm_type = dp_tx_frm_sg;
  1293. msdu_info->num_seg = 1;
  1294. return nbuf;
  1295. }
  1296. #ifdef MESH_MODE_SUPPORT
  1297. /**
  1298. * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
  1299. and prepare msdu_info for mesh frames.
  1300. * @vdev: DP vdev handle
  1301. * @nbuf: skb
  1302. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  1303. *
  1304. * Return: NULL on failure,
  1305. * nbuf when extracted successfully
  1306. */
  1307. static
  1308. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1309. struct dp_tx_msdu_info_s *msdu_info)
  1310. {
  1311. struct meta_hdr_s *mhdr;
  1312. struct htt_tx_msdu_desc_ext2_t *meta_data =
  1313. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  1314. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  1315. if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
  1316. msdu_info->exception_fw = 0;
  1317. goto remove_meta_hdr;
  1318. }
  1319. msdu_info->exception_fw = 1;
  1320. qdf_mem_set(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t), 0);
  1321. meta_data->host_tx_desc_pool = 1;
  1322. meta_data->update_peer_cache = 1;
  1323. meta_data->learning_frame = 1;
  1324. if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
  1325. meta_data->power = mhdr->power;
  1326. meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
  1327. meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
  1328. meta_data->pream_type = mhdr->rate_info[0].preamble_type;
  1329. meta_data->retry_limit = mhdr->rate_info[0].max_tries;
  1330. meta_data->dyn_bw = 1;
  1331. meta_data->valid_pwr = 1;
  1332. meta_data->valid_mcs_mask = 1;
  1333. meta_data->valid_nss_mask = 1;
  1334. meta_data->valid_preamble_type = 1;
  1335. meta_data->valid_retries = 1;
  1336. meta_data->valid_bw_info = 1;
  1337. }
  1338. if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
  1339. meta_data->encrypt_type = 0;
  1340. meta_data->valid_encrypt_type = 1;
  1341. meta_data->learning_frame = 0;
  1342. }
  1343. meta_data->valid_key_flags = 1;
  1344. meta_data->key_flags = (mhdr->keyix & 0x3);
  1345. remove_meta_hdr:
  1346. if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
  1347. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1348. "qdf_nbuf_pull_head failed\n");
  1349. qdf_nbuf_free(nbuf);
  1350. return NULL;
  1351. }
  1352. if (mhdr->flags & METAHDR_FLAG_NOQOS)
  1353. msdu_info->tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
  1354. else
  1355. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  1356. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1357. "%s , Meta hdr %0x %0x %0x %0x %0x %0x"
  1358. " tid %d to_fw %d\n",
  1359. __func__, msdu_info->meta_data[0],
  1360. msdu_info->meta_data[1],
  1361. msdu_info->meta_data[2],
  1362. msdu_info->meta_data[3],
  1363. msdu_info->meta_data[4],
  1364. msdu_info->meta_data[5],
  1365. msdu_info->tid, msdu_info->exception_fw);
  1366. return nbuf;
  1367. }
  1368. #else
  1369. static
  1370. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1371. struct dp_tx_msdu_info_s *msdu_info)
  1372. {
  1373. return nbuf;
  1374. }
  1375. #endif
  1376. #ifdef DP_FEATURE_NAWDS_TX
  1377. /**
  1378. * dp_tx_prepare_nawds(): Tramit NAWDS frames
  1379. * @vdev: dp_vdev handle
  1380. * @nbuf: skb
  1381. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  1382. * @tx_q: Tx queue to be used for this Tx frame
  1383. * @meta_data: Meta date for mesh
  1384. * @peer_id: peer_id of the peer in case of NAWDS frames
  1385. *
  1386. * return: NULL on success nbuf on failure
  1387. */
  1388. static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1389. struct dp_tx_msdu_info_s *msdu_info)
  1390. {
  1391. struct dp_peer *peer = NULL;
  1392. struct dp_soc *soc = vdev->pdev->soc;
  1393. struct dp_ast_entry *ast_entry = NULL;
  1394. struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
  1395. uint16_t peer_id = HTT_INVALID_PEER;
  1396. struct dp_peer *sa_peer = NULL;
  1397. qdf_nbuf_t nbuf_copy;
  1398. qdf_spin_lock_bh(&(soc->ast_lock));
  1399. ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost));
  1400. if (ast_entry)
  1401. sa_peer = ast_entry->peer;
  1402. qdf_spin_unlock_bh(&(soc->ast_lock));
  1403. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  1404. if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
  1405. (peer->nawds_enabled)) {
  1406. if (sa_peer == peer) {
  1407. QDF_TRACE(QDF_MODULE_ID_DP,
  1408. QDF_TRACE_LEVEL_DEBUG,
  1409. " %s: broadcast multicast packet",
  1410. __func__);
  1411. DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
  1412. continue;
  1413. }
  1414. nbuf_copy = qdf_nbuf_copy(nbuf);
  1415. if (!nbuf_copy) {
  1416. QDF_TRACE(QDF_MODULE_ID_DP,
  1417. QDF_TRACE_LEVEL_ERROR,
  1418. "nbuf copy failed");
  1419. }
  1420. peer_id = peer->peer_ids[0];
  1421. nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy,
  1422. msdu_info, peer_id, NULL);
  1423. if (nbuf_copy != NULL) {
  1424. qdf_nbuf_free(nbuf_copy);
  1425. continue;
  1426. }
  1427. DP_STATS_INC_PKT(peer, tx.nawds_mcast,
  1428. 1, qdf_nbuf_len(nbuf));
  1429. }
  1430. }
  1431. if (peer_id == HTT_INVALID_PEER)
  1432. return nbuf;
  1433. return NULL;
  1434. }
  1435. #endif
  1436. /**
  1437. * dp_check_exc_metadata() - Checks if parameters are valid
  1438. * @tx_exc - holds all exception path parameters
  1439. *
  1440. * Returns true when all the parameters are valid else false
  1441. *
  1442. */
  1443. static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
  1444. {
  1445. if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) ||
  1446. tx_exc->tx_encap_type > htt_cmn_pkt_num_types ||
  1447. tx_exc->sec_type > cdp_num_sec_types) {
  1448. return false;
  1449. }
  1450. return true;
  1451. }
  1452. /**
  1453. * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
  1454. * @vap_dev: DP vdev handle
  1455. * @nbuf: skb
  1456. * @tx_exc_metadata: Handle that holds exception path meta data
  1457. *
  1458. * Entry point for Core Tx layer (DP_TX) invoked from
  1459. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  1460. *
  1461. * Return: NULL on success,
  1462. * nbuf when it fails to send
  1463. */
  1464. qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf,
  1465. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1466. {
  1467. struct ether_header *eh = NULL;
  1468. struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
  1469. struct dp_tx_msdu_info_s msdu_info;
  1470. qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
  1471. msdu_info.tid = tx_exc_metadata->tid;
  1472. eh = (struct ether_header *)qdf_nbuf_data(nbuf);
  1473. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1474. "%s , skb %pM",
  1475. __func__, nbuf->data);
  1476. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  1477. if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
  1478. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1479. "Invalid parameters in exception path");
  1480. goto fail;
  1481. }
  1482. /* Basic sanity checks for unsupported packets */
  1483. /* MESH mode */
  1484. if (qdf_unlikely(vdev->mesh_vdev)) {
  1485. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1486. "Mesh mode is not supported in exception path");
  1487. goto fail;
  1488. }
  1489. /* TSO or SG */
  1490. if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
  1491. qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  1492. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1493. "TSO and SG are not supported in exception path");
  1494. goto fail;
  1495. }
  1496. /* RAW */
  1497. if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
  1498. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1499. "Raw frame is not supported in exception path");
  1500. goto fail;
  1501. }
  1502. /* Mcast enhancement*/
  1503. if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
  1504. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  1505. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1506. "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW\n");
  1507. }
  1508. }
  1509. /*
  1510. * Get HW Queue to use for this frame.
  1511. * TCL supports upto 4 DMA rings, out of which 3 rings are
  1512. * dedicated for data and 1 for command.
  1513. * "queue_id" maps to one hardware ring.
  1514. * With each ring, we also associate a unique Tx descriptor pool
  1515. * to minimize lock contention for these resources.
  1516. */
  1517. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  1518. /* Reset the control block */
  1519. qdf_nbuf_reset_ctxt(nbuf);
  1520. /* Single linear frame */
  1521. /*
  1522. * If nbuf is a simple linear frame, use send_single function to
  1523. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  1524. * SRNG. There is no need to setup a MSDU extension descriptor.
  1525. */
  1526. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
  1527. tx_exc_metadata->peer_id, tx_exc_metadata);
  1528. return nbuf;
  1529. fail:
  1530. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1531. "pkt send failed");
  1532. return nbuf;
  1533. }
  1534. /**
  1535. * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
  1536. * @vap_dev: DP vdev handle
  1537. * @nbuf: skb
  1538. *
  1539. * Entry point for Core Tx layer (DP_TX) invoked from
  1540. * hard_start_xmit in OSIF/HDD
  1541. *
  1542. * Return: NULL on success,
  1543. * nbuf when it fails to send
  1544. */
  1545. #ifdef MESH_MODE_SUPPORT
  1546. qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
  1547. {
  1548. struct meta_hdr_s *mhdr;
  1549. qdf_nbuf_t nbuf_mesh = NULL;
  1550. qdf_nbuf_t nbuf_clone = NULL;
  1551. struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
  1552. uint8_t no_enc_frame = 0;
  1553. nbuf_mesh = qdf_nbuf_unshare(nbuf);
  1554. if (nbuf_mesh == NULL) {
  1555. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1556. "qdf_nbuf_unshare failed\n");
  1557. return nbuf;
  1558. }
  1559. nbuf = nbuf_mesh;
  1560. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  1561. if ((vdev->sec_type != cdp_sec_type_none) &&
  1562. (mhdr->flags & METAHDR_FLAG_NOENCRYPT))
  1563. no_enc_frame = 1;
  1564. if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
  1565. !no_enc_frame) {
  1566. nbuf_clone = qdf_nbuf_clone(nbuf);
  1567. if (nbuf_clone == NULL) {
  1568. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1569. "qdf_nbuf_clone failed\n");
  1570. return nbuf;
  1571. }
  1572. qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
  1573. }
  1574. if (nbuf_clone) {
  1575. if (!dp_tx_send(vap_dev, nbuf_clone)) {
  1576. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  1577. } else
  1578. qdf_nbuf_free(nbuf_clone);
  1579. }
  1580. if (no_enc_frame)
  1581. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
  1582. else
  1583. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
  1584. nbuf = dp_tx_send(vap_dev, nbuf);
  1585. if ((nbuf == NULL) && no_enc_frame) {
  1586. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  1587. }
  1588. return nbuf;
  1589. }
  1590. #else
  1591. qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
  1592. {
  1593. return dp_tx_send(vap_dev, nbuf);
  1594. }
  1595. #endif
  1596. /**
  1597. * dp_tx_send() - Transmit a frame on a given VAP
  1598. * @vap_dev: DP vdev handle
  1599. * @nbuf: skb
  1600. *
  1601. * Entry point for Core Tx layer (DP_TX) invoked from
  1602. * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
  1603. * cases
  1604. *
  1605. * Return: NULL on success,
  1606. * nbuf when it fails to send
  1607. */
  1608. qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
  1609. {
  1610. struct ether_header *eh = NULL;
  1611. struct dp_tx_msdu_info_s msdu_info;
  1612. struct dp_tx_seg_info_s seg_info;
  1613. struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
  1614. uint16_t peer_id = HTT_INVALID_PEER;
  1615. qdf_nbuf_t nbuf_mesh = NULL;
  1616. qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
  1617. qdf_mem_set(&seg_info, sizeof(seg_info), 0x0);
  1618. eh = (struct ether_header *)qdf_nbuf_data(nbuf);
  1619. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1620. "%s , skb %pM",
  1621. __func__, nbuf->data);
  1622. /*
  1623. * Set Default Host TID value to invalid TID
  1624. * (TID override disabled)
  1625. */
  1626. msdu_info.tid = HTT_TX_EXT_TID_INVALID;
  1627. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  1628. if (qdf_unlikely(vdev->mesh_vdev)) {
  1629. nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
  1630. &msdu_info);
  1631. if (nbuf_mesh == NULL) {
  1632. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1633. "Extracting mesh metadata failed\n");
  1634. return nbuf;
  1635. }
  1636. nbuf = nbuf_mesh;
  1637. }
  1638. /*
  1639. * Get HW Queue to use for this frame.
  1640. * TCL supports upto 4 DMA rings, out of which 3 rings are
  1641. * dedicated for data and 1 for command.
  1642. * "queue_id" maps to one hardware ring.
  1643. * With each ring, we also associate a unique Tx descriptor pool
  1644. * to minimize lock contention for these resources.
  1645. */
  1646. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  1647. /*
  1648. * TCL H/W supports 2 DSCP-TID mapping tables.
  1649. * Table 1 - Default DSCP-TID mapping table
  1650. * Table 2 - 1 DSCP-TID override table
  1651. *
  1652. * If we need a different DSCP-TID mapping for this vap,
  1653. * call tid_classify to extract DSCP/ToS from frame and
  1654. * map to a TID and store in msdu_info. This is later used
  1655. * to fill in TCL Input descriptor (per-packet TID override).
  1656. */
  1657. if (vdev->dscp_tid_map_id > 1)
  1658. dp_tx_classify_tid(vdev, nbuf, &msdu_info);
  1659. /* Reset the control block */
  1660. qdf_nbuf_reset_ctxt(nbuf);
  1661. /*
  1662. * Classify the frame and call corresponding
  1663. * "prepare" function which extracts the segment (TSO)
  1664. * and fragmentation information (for TSO , SG, ME, or Raw)
  1665. * into MSDU_INFO structure which is later used to fill
  1666. * SW and HW descriptors.
  1667. */
  1668. if (qdf_nbuf_is_tso(nbuf)) {
  1669. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1670. "%s TSO frame %pK\n", __func__, vdev);
  1671. DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
  1672. qdf_nbuf_len(nbuf));
  1673. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  1674. DP_STATS_INC(vdev, tx_i.tso.dropped_host, 1);
  1675. return nbuf;
  1676. }
  1677. goto send_multiple;
  1678. }
  1679. /* SG */
  1680. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  1681. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  1682. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1683. "%s non-TSO SG frame %pK\n", __func__, vdev);
  1684. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  1685. qdf_nbuf_len(nbuf));
  1686. goto send_multiple;
  1687. }
  1688. #ifdef ATH_SUPPORT_IQUE
  1689. /* Mcast to Ucast Conversion*/
  1690. if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
  1691. eh = (struct ether_header *)qdf_nbuf_data(nbuf);
  1692. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  1693. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1694. "%s Mcast frm for ME %pK\n", __func__, vdev);
  1695. DP_STATS_INC_PKT(vdev,
  1696. tx_i.mcast_en.mcast_pkt, 1,
  1697. qdf_nbuf_len(nbuf));
  1698. if (dp_tx_prepare_send_me(vdev, nbuf) ==
  1699. QDF_STATUS_SUCCESS) {
  1700. return NULL;
  1701. }
  1702. }
  1703. }
  1704. #endif
  1705. /* RAW */
  1706. if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
  1707. nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
  1708. if (nbuf == NULL)
  1709. return NULL;
  1710. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1711. "%s Raw frame %pK\n", __func__, vdev);
  1712. goto send_multiple;
  1713. }
  1714. /* Single linear frame */
  1715. /*
  1716. * If nbuf is a simple linear frame, use send_single function to
  1717. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  1718. * SRNG. There is no need to setup a MSDU extension descriptor.
  1719. */
  1720. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
  1721. return nbuf;
  1722. send_multiple:
  1723. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  1724. return nbuf;
  1725. }
  1726. /**
  1727. * dp_tx_reinject_handler() - Tx Reinject Handler
  1728. * @tx_desc: software descriptor head pointer
  1729. * @status : Tx completion status from HTT descriptor
  1730. *
  1731. * This function reinjects frames back to Target.
  1732. * Todo - Host queue needs to be added
  1733. *
  1734. * Return: none
  1735. */
  1736. static
  1737. void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
  1738. {
  1739. struct dp_vdev *vdev;
  1740. struct dp_peer *peer = NULL;
  1741. uint32_t peer_id = HTT_INVALID_PEER;
  1742. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1743. qdf_nbuf_t nbuf_copy = NULL;
  1744. struct dp_tx_msdu_info_s msdu_info;
  1745. struct dp_peer *sa_peer = NULL;
  1746. struct dp_ast_entry *ast_entry = NULL;
  1747. struct dp_soc *soc = NULL;
  1748. struct ether_header *eh = (struct ether_header *)qdf_nbuf_data(nbuf);
  1749. #ifdef WDS_VENDOR_EXTENSION
  1750. int is_mcast = 0, is_ucast = 0;
  1751. int num_peers_3addr = 0;
  1752. struct ether_header *eth_hdr = (struct ether_header *)(qdf_nbuf_data(nbuf));
  1753. struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
  1754. #endif
  1755. vdev = tx_desc->vdev;
  1756. soc = vdev->pdev->soc;
  1757. qdf_assert(vdev);
  1758. qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
  1759. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  1760. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1761. "%s Tx reinject path\n", __func__);
  1762. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  1763. qdf_nbuf_len(tx_desc->nbuf));
  1764. qdf_spin_lock_bh(&(soc->ast_lock));
  1765. ast_entry = dp_peer_ast_hash_find(soc, (uint8_t *)(eh->ether_shost));
  1766. if (ast_entry)
  1767. sa_peer = ast_entry->peer;
  1768. qdf_spin_unlock_bh(&(soc->ast_lock));
  1769. #ifdef WDS_VENDOR_EXTENSION
  1770. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1771. is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
  1772. } else {
  1773. is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
  1774. }
  1775. is_ucast = !is_mcast;
  1776. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  1777. if (peer->bss_peer)
  1778. continue;
  1779. /* Detect wds peers that use 3-addr framing for mcast.
  1780. * if there are any, the bss_peer is used to send the
  1781. * the mcast frame using 3-addr format. all wds enabled
  1782. * peers that use 4-addr framing for mcast frames will
  1783. * be duplicated and sent as 4-addr frames below.
  1784. */
  1785. if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
  1786. num_peers_3addr = 1;
  1787. break;
  1788. }
  1789. }
  1790. #endif
  1791. if (qdf_unlikely(vdev->mesh_vdev)) {
  1792. DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
  1793. } else {
  1794. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  1795. if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
  1796. #ifdef WDS_VENDOR_EXTENSION
  1797. /*
  1798. * . if 3-addr STA, then send on BSS Peer
  1799. * . if Peer WDS enabled and accept 4-addr mcast,
  1800. * send mcast on that peer only
  1801. * . if Peer WDS enabled and accept 4-addr ucast,
  1802. * send ucast on that peer only
  1803. */
  1804. ((peer->bss_peer && num_peers_3addr && is_mcast) ||
  1805. (peer->wds_enabled &&
  1806. ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
  1807. (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
  1808. #else
  1809. ((peer->bss_peer &&
  1810. !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) ||
  1811. peer->nawds_enabled)) {
  1812. #endif
  1813. peer_id = DP_INVALID_PEER;
  1814. if (peer->nawds_enabled) {
  1815. peer_id = peer->peer_ids[0];
  1816. if (sa_peer == peer) {
  1817. QDF_TRACE(
  1818. QDF_MODULE_ID_DP,
  1819. QDF_TRACE_LEVEL_DEBUG,
  1820. " %s: multicast packet",
  1821. __func__);
  1822. DP_STATS_INC(peer,
  1823. tx.nawds_mcast_drop, 1);
  1824. continue;
  1825. }
  1826. }
  1827. nbuf_copy = qdf_nbuf_copy(nbuf);
  1828. if (!nbuf_copy) {
  1829. QDF_TRACE(QDF_MODULE_ID_DP,
  1830. QDF_TRACE_LEVEL_DEBUG,
  1831. FL("nbuf copy failed"));
  1832. break;
  1833. }
  1834. nbuf_copy = dp_tx_send_msdu_single(vdev,
  1835. nbuf_copy,
  1836. &msdu_info,
  1837. peer_id,
  1838. NULL);
  1839. if (nbuf_copy) {
  1840. QDF_TRACE(QDF_MODULE_ID_DP,
  1841. QDF_TRACE_LEVEL_DEBUG,
  1842. FL("pkt send failed"));
  1843. qdf_nbuf_free(nbuf_copy);
  1844. } else {
  1845. if (peer_id != DP_INVALID_PEER)
  1846. DP_STATS_INC_PKT(peer,
  1847. tx.nawds_mcast,
  1848. 1, qdf_nbuf_len(nbuf));
  1849. }
  1850. }
  1851. }
  1852. }
  1853. if (vdev->nawds_enabled) {
  1854. peer_id = DP_INVALID_PEER;
  1855. DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
  1856. 1, qdf_nbuf_len(nbuf));
  1857. nbuf = dp_tx_send_msdu_single(vdev,
  1858. nbuf,
  1859. &msdu_info,
  1860. peer_id, NULL);
  1861. if (nbuf) {
  1862. QDF_TRACE(QDF_MODULE_ID_DP,
  1863. QDF_TRACE_LEVEL_DEBUG,
  1864. FL("pkt send failed"));
  1865. qdf_nbuf_free(nbuf);
  1866. }
  1867. } else
  1868. qdf_nbuf_free(nbuf);
  1869. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  1870. }
  1871. /**
  1872. * dp_tx_inspect_handler() - Tx Inspect Handler
  1873. * @tx_desc: software descriptor head pointer
  1874. * @status : Tx completion status from HTT descriptor
  1875. *
  1876. * Handles Tx frames sent back to Host for inspection
  1877. * (ProxyARP)
  1878. *
  1879. * Return: none
  1880. */
  1881. static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
  1882. {
  1883. struct dp_soc *soc;
  1884. struct dp_pdev *pdev = tx_desc->pdev;
  1885. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1886. "%s Tx inspect path\n",
  1887. __func__);
  1888. qdf_assert(pdev);
  1889. soc = pdev->soc;
  1890. DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
  1891. qdf_nbuf_len(tx_desc->nbuf));
  1892. DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
  1893. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  1894. }
  1895. #ifdef FEATURE_PERPKT_INFO
  1896. /**
  1897. * dp_get_completion_indication_for_stack() - send completion to stack
  1898. * @soc : dp_soc handle
  1899. * @pdev: dp_pdev handle
  1900. * @peer_id: peer_id of the peer for which completion came
  1901. * @ppdu_id: ppdu_id
  1902. * @first_msdu: first msdu
  1903. * @last_msdu: last msdu
  1904. * @netbuf: Buffer pointer for free
  1905. *
  1906. * This function is used for indication whether buffer needs to be
  1907. * send to stack for free or not
  1908. */
  1909. QDF_STATUS
  1910. dp_get_completion_indication_for_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  1911. uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu,
  1912. uint8_t last_msdu, qdf_nbuf_t netbuf)
  1913. {
  1914. struct tx_capture_hdr *ppdu_hdr;
  1915. struct dp_peer *peer = NULL;
  1916. if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode))
  1917. return QDF_STATUS_E_NOSUPPORT;
  1918. peer = (peer_id == HTT_INVALID_PEER) ? NULL :
  1919. dp_peer_find_by_id(soc, peer_id);
  1920. if (!peer) {
  1921. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1922. FL("Peer Invalid"));
  1923. return QDF_STATUS_E_INVAL;
  1924. }
  1925. if (pdev->mcopy_mode) {
  1926. if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
  1927. (pdev->m_copy_id.tx_peer_id == peer_id)) {
  1928. return QDF_STATUS_E_INVAL;
  1929. }
  1930. pdev->m_copy_id.tx_ppdu_id = ppdu_id;
  1931. pdev->m_copy_id.tx_peer_id = peer_id;
  1932. }
  1933. if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
  1934. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1935. FL("No headroom"));
  1936. return QDF_STATUS_E_NOMEM;
  1937. }
  1938. ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
  1939. qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
  1940. IEEE80211_ADDR_LEN);
  1941. ppdu_hdr->ppdu_id = ppdu_id;
  1942. qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
  1943. IEEE80211_ADDR_LEN);
  1944. ppdu_hdr->peer_id = peer_id;
  1945. ppdu_hdr->first_msdu = first_msdu;
  1946. ppdu_hdr->last_msdu = last_msdu;
  1947. return QDF_STATUS_SUCCESS;
  1948. }
  1949. /**
  1950. * dp_send_completion_to_stack() - send completion to stack
  1951. * @soc : dp_soc handle
  1952. * @pdev: dp_pdev handle
  1953. * @peer_id: peer_id of the peer for which completion came
  1954. * @ppdu_id: ppdu_id
  1955. * @netbuf: Buffer pointer for free
  1956. *
  1957. * This function is used to send completion to stack
  1958. * to free buffer
  1959. */
  1960. void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  1961. uint16_t peer_id, uint32_t ppdu_id,
  1962. qdf_nbuf_t netbuf)
  1963. {
  1964. dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
  1965. netbuf, peer_id,
  1966. WDI_NO_VAL, pdev->pdev_id);
  1967. }
  1968. #else
  1969. static QDF_STATUS
  1970. dp_get_completion_indication_for_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  1971. uint16_t peer_id, uint32_t ppdu_id, uint8_t first_msdu,
  1972. uint8_t last_msdu, qdf_nbuf_t netbuf)
  1973. {
  1974. return QDF_STATUS_E_NOSUPPORT;
  1975. }
  1976. static void
  1977. dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  1978. uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
  1979. {
  1980. }
  1981. #endif
  1982. /**
  1983. * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
  1984. * @soc: Soc handle
  1985. * @desc: software Tx descriptor to be processed
  1986. *
  1987. * Return: none
  1988. */
  1989. static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
  1990. struct dp_tx_desc_s *desc)
  1991. {
  1992. struct dp_vdev *vdev = desc->vdev;
  1993. qdf_nbuf_t nbuf = desc->nbuf;
  1994. /* If it is TDLS mgmt, don't unmap or free the frame */
  1995. if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
  1996. return dp_non_std_tx_comp_free_buff(desc, vdev);
  1997. /* 0 : MSDU buffer, 1 : MLE */
  1998. if (desc->msdu_ext_desc) {
  1999. /* TSO free */
  2000. if (hal_tx_ext_desc_get_tso_enable(
  2001. desc->msdu_ext_desc->vaddr)) {
  2002. /* If remaining number of segment is 0
  2003. * actual TSO may unmap and free */
  2004. if (qdf_nbuf_get_users(nbuf) == 1)
  2005. __qdf_nbuf_unmap_single(soc->osdev,
  2006. nbuf,
  2007. QDF_DMA_TO_DEVICE);
  2008. qdf_nbuf_free(nbuf);
  2009. return;
  2010. }
  2011. }
  2012. qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  2013. if (qdf_likely(!vdev->mesh_vdev))
  2014. qdf_nbuf_free(nbuf);
  2015. else {
  2016. if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
  2017. qdf_nbuf_free(nbuf);
  2018. DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
  2019. } else
  2020. vdev->osif_tx_free_ext((nbuf));
  2021. }
  2022. }
  2023. /**
  2024. * dp_tx_mec_handler() - Tx MEC Notify Handler
  2025. * @vdev: pointer to dp dev handler
  2026. * @status : Tx completion status from HTT descriptor
  2027. *
  2028. * Handles MEC notify event sent from fw to Host
  2029. *
  2030. * Return: none
  2031. */
  2032. #ifdef FEATURE_WDS
  2033. void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
  2034. {
  2035. struct dp_soc *soc;
  2036. uint32_t flags = IEEE80211_NODE_F_WDS_HM;
  2037. struct dp_peer *peer;
  2038. uint8_t mac_addr[DP_MAC_ADDR_LEN], i;
  2039. if (!vdev->wds_enabled)
  2040. return;
  2041. soc = vdev->pdev->soc;
  2042. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  2043. peer = TAILQ_FIRST(&vdev->peer_list);
  2044. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  2045. if (!peer) {
  2046. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2047. FL("peer is NULL"));
  2048. return;
  2049. }
  2050. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2051. "%s Tx MEC Handler\n",
  2052. __func__);
  2053. for (i = 0; i < DP_MAC_ADDR_LEN; i++)
  2054. mac_addr[(DP_MAC_ADDR_LEN - 1) - i] =
  2055. status[(DP_MAC_ADDR_LEN - 2) + i];
  2056. if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, DP_MAC_ADDR_LEN))
  2057. dp_peer_add_ast(soc,
  2058. peer,
  2059. mac_addr,
  2060. CDP_TXRX_AST_TYPE_MEC,
  2061. flags);
  2062. }
  2063. #else
  2064. static void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
  2065. {
  2066. }
  2067. #endif
  2068. /**
  2069. * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
  2070. * @tx_desc: software descriptor head pointer
  2071. * @status : Tx completion status from HTT descriptor
  2072. *
  2073. * This function will process HTT Tx indication messages from Target
  2074. *
  2075. * Return: none
  2076. */
  2077. static
  2078. void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
  2079. {
  2080. uint8_t tx_status;
  2081. struct dp_pdev *pdev;
  2082. struct dp_vdev *vdev;
  2083. struct dp_soc *soc;
  2084. uint32_t *htt_status_word = (uint32_t *) status;
  2085. qdf_assert(tx_desc->pdev);
  2086. pdev = tx_desc->pdev;
  2087. vdev = tx_desc->vdev;
  2088. soc = pdev->soc;
  2089. tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_status_word[0]);
  2090. switch (tx_status) {
  2091. case HTT_TX_FW2WBM_TX_STATUS_OK:
  2092. case HTT_TX_FW2WBM_TX_STATUS_DROP:
  2093. case HTT_TX_FW2WBM_TX_STATUS_TTL:
  2094. {
  2095. dp_tx_comp_free_buf(soc, tx_desc);
  2096. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2097. break;
  2098. }
  2099. case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
  2100. {
  2101. dp_tx_reinject_handler(tx_desc, status);
  2102. break;
  2103. }
  2104. case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
  2105. {
  2106. dp_tx_inspect_handler(tx_desc, status);
  2107. break;
  2108. }
  2109. case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
  2110. {
  2111. dp_tx_mec_handler(vdev, status);
  2112. break;
  2113. }
  2114. default:
  2115. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2116. "%s Invalid HTT tx_status %d\n",
  2117. __func__, tx_status);
  2118. break;
  2119. }
  2120. }
  2121. #ifdef MESH_MODE_SUPPORT
  2122. /**
  2123. * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
  2124. * in mesh meta header
  2125. * @tx_desc: software descriptor head pointer
  2126. * @ts: pointer to tx completion stats
  2127. * Return: none
  2128. */
  2129. static
  2130. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  2131. struct hal_tx_completion_status *ts)
  2132. {
  2133. struct meta_hdr_s *mhdr;
  2134. qdf_nbuf_t netbuf = tx_desc->nbuf;
  2135. if (!tx_desc->msdu_ext_desc) {
  2136. if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
  2137. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2138. "netbuf %pK offset %d\n",
  2139. netbuf, tx_desc->pkt_offset);
  2140. return;
  2141. }
  2142. }
  2143. if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
  2144. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2145. "netbuf %pK offset %d\n", netbuf,
  2146. sizeof(struct meta_hdr_s));
  2147. return;
  2148. }
  2149. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
  2150. mhdr->rssi = ts->ack_frame_rssi;
  2151. mhdr->channel = tx_desc->pdev->operating_channel;
  2152. }
  2153. #else
  2154. static
  2155. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  2156. struct hal_tx_completion_status *ts)
  2157. {
  2158. }
  2159. #endif
  2160. /**
  2161. * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
  2162. * @peer: Handle to DP peer
  2163. * @ts: pointer to HAL Tx completion stats
  2164. * @length: MSDU length
  2165. *
  2166. * Return: None
  2167. */
  2168. static void dp_tx_update_peer_stats(struct dp_peer *peer,
  2169. struct hal_tx_completion_status *ts, uint32_t length)
  2170. {
  2171. struct dp_pdev *pdev = peer->vdev->pdev;
  2172. struct dp_soc *soc = pdev->soc;
  2173. uint8_t mcs, pkt_type;
  2174. mcs = ts->mcs;
  2175. pkt_type = ts->pkt_type;
  2176. if (!ts->release_src == HAL_TX_COMP_RELEASE_SOURCE_TQM)
  2177. return;
  2178. if (peer->bss_peer) {
  2179. DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
  2180. DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
  2181. } else {
  2182. if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
  2183. DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
  2184. DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
  2185. }
  2186. }
  2187. DP_STATS_INCC(peer, tx.dropped.age_out, 1,
  2188. (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
  2189. DP_STATS_INCC(peer, tx.dropped.fw_rem, 1,
  2190. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  2191. DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
  2192. (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
  2193. DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
  2194. (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
  2195. DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
  2196. (ts->status == HAL_TX_TQM_RR_FW_REASON1));
  2197. DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
  2198. (ts->status == HAL_TX_TQM_RR_FW_REASON2));
  2199. DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
  2200. (ts->status == HAL_TX_TQM_RR_FW_REASON3));
  2201. if (!ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
  2202. return;
  2203. DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
  2204. DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
  2205. DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
  2206. if (!(soc->process_tx_status))
  2207. return;
  2208. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2209. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
  2210. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2211. ((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
  2212. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2213. ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
  2214. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2215. ((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
  2216. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2217. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
  2218. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2219. ((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
  2220. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2221. ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  2222. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2223. ((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  2224. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2225. ((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  2226. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2227. ((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  2228. DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
  2229. DP_STATS_INC(peer, tx.bw[ts->bw], 1);
  2230. DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
  2231. DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
  2232. DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
  2233. DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
  2234. DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
  2235. DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
  2236. if (soc->cdp_soc.ol_ops->update_dp_stats) {
  2237. soc->cdp_soc.ol_ops->update_dp_stats(pdev->osif_pdev,
  2238. &peer->stats, ts->peer_id,
  2239. UPDATE_PEER_STATS);
  2240. }
  2241. }
  2242. /**
  2243. * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
  2244. * @tx_desc: software descriptor head pointer
  2245. * @length: packet length
  2246. *
  2247. * Return: none
  2248. */
  2249. static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
  2250. uint32_t length)
  2251. {
  2252. struct hal_tx_completion_status ts;
  2253. struct dp_soc *soc = NULL;
  2254. struct dp_vdev *vdev = tx_desc->vdev;
  2255. struct dp_peer *peer = NULL;
  2256. struct ether_header *eh =
  2257. (struct ether_header *)qdf_nbuf_data(tx_desc->nbuf);
  2258. hal_tx_comp_get_status(&tx_desc->comp, &ts);
  2259. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2260. "-------------------- \n"
  2261. "Tx Completion Stats: \n"
  2262. "-------------------- \n"
  2263. "ack_frame_rssi = %d \n"
  2264. "first_msdu = %d \n"
  2265. "last_msdu = %d \n"
  2266. "msdu_part_of_amsdu = %d \n"
  2267. "rate_stats valid = %d \n"
  2268. "bw = %d \n"
  2269. "pkt_type = %d \n"
  2270. "stbc = %d \n"
  2271. "ldpc = %d \n"
  2272. "sgi = %d \n"
  2273. "mcs = %d \n"
  2274. "ofdma = %d \n"
  2275. "tones_in_ru = %d \n"
  2276. "tsf = %d \n"
  2277. "ppdu_id = %d \n"
  2278. "transmit_cnt = %d \n"
  2279. "tid = %d \n"
  2280. "peer_id = %d \n",
  2281. ts.ack_frame_rssi, ts.first_msdu, ts.last_msdu,
  2282. ts.msdu_part_of_amsdu, ts.valid, ts.bw,
  2283. ts.pkt_type, ts.stbc, ts.ldpc, ts.sgi,
  2284. ts.mcs, ts.ofdma, ts.tones_in_ru, ts.tsf,
  2285. ts.ppdu_id, ts.transmit_cnt, ts.tid,
  2286. ts.peer_id);
  2287. if (!vdev) {
  2288. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2289. "invalid vdev");
  2290. goto out;
  2291. }
  2292. soc = vdev->pdev->soc;
  2293. /* Update SoC level stats */
  2294. DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
  2295. (ts.status == HAL_TX_TQM_RR_REM_CMD_REM));
  2296. /* Update per-packet stats */
  2297. if (qdf_unlikely(vdev->mesh_vdev) &&
  2298. !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
  2299. dp_tx_comp_fill_tx_completion_stats(tx_desc, &ts);
  2300. /* Update peer level stats */
  2301. peer = dp_peer_find_by_id(soc, ts.peer_id);
  2302. if (!peer) {
  2303. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2304. "invalid peer");
  2305. DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
  2306. goto out;
  2307. }
  2308. if (qdf_likely(peer->vdev->tx_encap_type ==
  2309. htt_cmn_pkt_type_ethernet)) {
  2310. if (peer->bss_peer && IEEE80211_IS_BROADCAST(eh->ether_dhost))
  2311. DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
  2312. }
  2313. dp_tx_update_peer_stats(peer, &ts, length);
  2314. out:
  2315. return;
  2316. }
  2317. /**
  2318. * dp_tx_comp_process_desc() - Tx complete software descriptor handler
  2319. * @soc: core txrx main context
  2320. * @comp_head: software descriptor head pointer
  2321. *
  2322. * This function will process batch of descriptors reaped by dp_tx_comp_handler
  2323. * and release the software descriptors after processing is complete
  2324. *
  2325. * Return: none
  2326. */
  2327. static void dp_tx_comp_process_desc(struct dp_soc *soc,
  2328. struct dp_tx_desc_s *comp_head)
  2329. {
  2330. struct dp_tx_desc_s *desc;
  2331. struct dp_tx_desc_s *next;
  2332. struct hal_tx_completion_status ts = {0};
  2333. uint32_t length;
  2334. struct dp_peer *peer;
  2335. DP_HIST_INIT();
  2336. desc = comp_head;
  2337. while (desc) {
  2338. hal_tx_comp_get_status(&desc->comp, &ts);
  2339. peer = dp_peer_find_by_id(soc, ts.peer_id);
  2340. length = qdf_nbuf_len(desc->nbuf);
  2341. dp_tx_comp_process_tx_status(desc, length);
  2342. /*currently m_copy/tx_capture is not supported for scatter gather packets*/
  2343. if (!(desc->msdu_ext_desc) && (dp_get_completion_indication_for_stack(soc,
  2344. desc->pdev, ts.peer_id, ts.ppdu_id,
  2345. ts.first_msdu, ts.last_msdu,
  2346. desc->nbuf) == QDF_STATUS_SUCCESS)) {
  2347. qdf_nbuf_unmap(soc->osdev, desc->nbuf,
  2348. QDF_DMA_TO_DEVICE);
  2349. dp_send_completion_to_stack(soc, desc->pdev, ts.peer_id,
  2350. ts.ppdu_id, desc->nbuf);
  2351. } else {
  2352. dp_tx_comp_free_buf(soc, desc);
  2353. }
  2354. DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id);
  2355. next = desc->next;
  2356. dp_tx_desc_release(desc, desc->pool_id);
  2357. desc = next;
  2358. }
  2359. DP_TX_HIST_STATS_PER_PDEV();
  2360. }
  2361. /**
  2362. * dp_tx_comp_handler() - Tx completion handler
  2363. * @soc: core txrx main context
  2364. * @ring_id: completion ring id
  2365. * @quota: No. of packets/descriptors that can be serviced in one loop
  2366. *
  2367. * This function will collect hardware release ring element contents and
  2368. * handle descriptor contents. Based on contents, free packet or handle error
  2369. * conditions
  2370. *
  2371. * Return: none
  2372. */
  2373. uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota)
  2374. {
  2375. void *tx_comp_hal_desc;
  2376. uint8_t buffer_src;
  2377. uint8_t pool_id;
  2378. uint32_t tx_desc_id;
  2379. struct dp_tx_desc_s *tx_desc = NULL;
  2380. struct dp_tx_desc_s *head_desc = NULL;
  2381. struct dp_tx_desc_s *tail_desc = NULL;
  2382. uint32_t num_processed;
  2383. uint32_t count;
  2384. if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
  2385. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2386. "%s %d : HAL RING Access Failed -- %pK\n",
  2387. __func__, __LINE__, hal_srng);
  2388. return 0;
  2389. }
  2390. num_processed = 0;
  2391. count = 0;
  2392. /* Find head descriptor from completion ring */
  2393. while (qdf_likely(tx_comp_hal_desc =
  2394. hal_srng_dst_get_next(soc->hal_soc, hal_srng))) {
  2395. buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
  2396. /* If this buffer was not released by TQM or FW, then it is not
  2397. * Tx completion indication, assert */
  2398. if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
  2399. (buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  2400. QDF_TRACE(QDF_MODULE_ID_DP,
  2401. QDF_TRACE_LEVEL_FATAL,
  2402. "Tx comp release_src != TQM | FW");
  2403. qdf_assert_always(0);
  2404. }
  2405. /* Get descriptor id */
  2406. tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
  2407. pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
  2408. DP_TX_DESC_ID_POOL_OS;
  2409. /* Pool ID is out of limit. Error */
  2410. if (pool_id > wlan_cfg_get_num_tx_desc_pool(
  2411. soc->wlan_cfg_ctx)) {
  2412. QDF_TRACE(QDF_MODULE_ID_DP,
  2413. QDF_TRACE_LEVEL_FATAL,
  2414. "Tx Comp pool id %d not valid",
  2415. pool_id);
  2416. qdf_assert_always(0);
  2417. }
  2418. /* Find Tx descriptor */
  2419. tx_desc = dp_tx_desc_find(soc, pool_id,
  2420. (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
  2421. DP_TX_DESC_ID_PAGE_OS,
  2422. (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
  2423. DP_TX_DESC_ID_OFFSET_OS);
  2424. /*
  2425. * If the release source is FW, process the HTT status
  2426. */
  2427. if (qdf_unlikely(buffer_src ==
  2428. HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  2429. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  2430. hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
  2431. htt_tx_status);
  2432. dp_tx_process_htt_completion(tx_desc,
  2433. htt_tx_status);
  2434. } else {
  2435. /* Pool id is not matching. Error */
  2436. if (tx_desc->pool_id != pool_id) {
  2437. QDF_TRACE(QDF_MODULE_ID_DP,
  2438. QDF_TRACE_LEVEL_FATAL,
  2439. "Tx Comp pool id %d not matched %d",
  2440. pool_id, tx_desc->pool_id);
  2441. qdf_assert_always(0);
  2442. }
  2443. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  2444. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  2445. QDF_TRACE(QDF_MODULE_ID_DP,
  2446. QDF_TRACE_LEVEL_FATAL,
  2447. "Txdesc invalid, flgs = %x,id = %d",
  2448. tx_desc->flags, tx_desc_id);
  2449. qdf_assert_always(0);
  2450. }
  2451. /* First ring descriptor on the cycle */
  2452. if (!head_desc) {
  2453. head_desc = tx_desc;
  2454. tail_desc = tx_desc;
  2455. }
  2456. tail_desc->next = tx_desc;
  2457. tx_desc->next = NULL;
  2458. tail_desc = tx_desc;
  2459. /* Collect hw completion contents */
  2460. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  2461. &tx_desc->comp, 1);
  2462. }
  2463. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  2464. /* Decrement PM usage count if the packet has been sent.*/
  2465. hif_pm_runtime_put(soc->hif_handle);
  2466. /*
  2467. * Processed packet count is more than given quota
  2468. * stop to processing
  2469. */
  2470. if ((num_processed >= quota))
  2471. break;
  2472. count++;
  2473. }
  2474. hal_srng_access_end(soc->hal_soc, hal_srng);
  2475. /* Process the reaped descriptors */
  2476. if (head_desc)
  2477. dp_tx_comp_process_desc(soc, head_desc);
  2478. return num_processed;
  2479. }
  2480. #ifdef CONVERGED_TDLS_ENABLE
  2481. /**
  2482. * dp_tx_non_std() - Allow the control-path SW to send data frames
  2483. *
  2484. * @data_vdev - which vdev should transmit the tx data frames
  2485. * @tx_spec - what non-standard handling to apply to the tx data frames
  2486. * @msdu_list - NULL-terminated list of tx MSDUs
  2487. *
  2488. * Return: NULL on success,
  2489. * nbuf when it fails to send
  2490. */
  2491. qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
  2492. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
  2493. {
  2494. struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
  2495. if (tx_spec & OL_TX_SPEC_NO_FREE)
  2496. vdev->is_tdls_frame = true;
  2497. return dp_tx_send(vdev_handle, msdu_list);
  2498. }
  2499. #endif
  2500. /**
  2501. * dp_tx_vdev_attach() - attach vdev to dp tx
  2502. * @vdev: virtual device instance
  2503. *
  2504. * Return: QDF_STATUS_SUCCESS: success
  2505. * QDF_STATUS_E_RESOURCES: Error return
  2506. */
  2507. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
  2508. {
  2509. /*
  2510. * Fill HTT TCL Metadata with Vdev ID and MAC ID
  2511. */
  2512. HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
  2513. HTT_TCL_METADATA_TYPE_VDEV_BASED);
  2514. HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
  2515. vdev->vdev_id);
  2516. HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
  2517. DP_SW2HW_MACID(vdev->pdev->pdev_id));
  2518. /*
  2519. * Set HTT Extension Valid bit to 0 by default
  2520. */
  2521. HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
  2522. dp_tx_vdev_update_search_flags(vdev);
  2523. return QDF_STATUS_SUCCESS;
  2524. }
  2525. /**
  2526. * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
  2527. * @vdev: virtual device instance
  2528. *
  2529. * Return: void
  2530. *
  2531. */
  2532. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
  2533. {
  2534. /*
  2535. * Enable both AddrY (SA based search) and AddrX (Da based search)
  2536. * for TDLS link
  2537. *
  2538. * Enable AddrY (SA based search) only for non-WDS STA and
  2539. * ProxySTA VAP modes.
  2540. *
  2541. * In all other VAP modes, only DA based search should be
  2542. * enabled
  2543. */
  2544. if (vdev->opmode == wlan_op_mode_sta &&
  2545. vdev->tdls_link_connected)
  2546. vdev->hal_desc_addr_search_flags =
  2547. (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
  2548. else if ((vdev->opmode == wlan_op_mode_sta &&
  2549. (!vdev->wds_enabled || vdev->proxysta_vdev)))
  2550. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
  2551. else
  2552. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
  2553. }
  2554. /**
  2555. * dp_tx_vdev_detach() - detach vdev from dp tx
  2556. * @vdev: virtual device instance
  2557. *
  2558. * Return: QDF_STATUS_SUCCESS: success
  2559. * QDF_STATUS_E_RESOURCES: Error return
  2560. */
  2561. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
  2562. {
  2563. return QDF_STATUS_SUCCESS;
  2564. }
  2565. /**
  2566. * dp_tx_pdev_attach() - attach pdev to dp tx
  2567. * @pdev: physical device instance
  2568. *
  2569. * Return: QDF_STATUS_SUCCESS: success
  2570. * QDF_STATUS_E_RESOURCES: Error return
  2571. */
  2572. QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
  2573. {
  2574. struct dp_soc *soc = pdev->soc;
  2575. /* Initialize Flow control counters */
  2576. qdf_atomic_init(&pdev->num_tx_exception);
  2577. qdf_atomic_init(&pdev->num_tx_outstanding);
  2578. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  2579. /* Initialize descriptors in TCL Ring */
  2580. hal_tx_init_data_ring(soc->hal_soc,
  2581. soc->tcl_data_ring[pdev->pdev_id].hal_srng);
  2582. }
  2583. return QDF_STATUS_SUCCESS;
  2584. }
  2585. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  2586. /* Pools will be allocated dynamically */
  2587. static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  2588. int num_desc)
  2589. {
  2590. uint8_t i;
  2591. for (i = 0; i < num_pool; i++) {
  2592. qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
  2593. soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
  2594. }
  2595. return 0;
  2596. }
  2597. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  2598. {
  2599. uint8_t i;
  2600. for (i = 0; i < num_pool; i++)
  2601. qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
  2602. }
  2603. static void dp_tx_desc_flush(struct dp_pdev *pdev)
  2604. {
  2605. }
  2606. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  2607. static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  2608. int num_desc)
  2609. {
  2610. uint8_t i;
  2611. /* Allocate software Tx descriptor pools */
  2612. for (i = 0; i < num_pool; i++) {
  2613. if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
  2614. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2615. "%s Tx Desc Pool alloc %d failed %pK\n",
  2616. __func__, i, soc);
  2617. return ENOMEM;
  2618. }
  2619. }
  2620. return 0;
  2621. }
  2622. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  2623. {
  2624. uint8_t i;
  2625. for (i = 0; i < num_pool; i++) {
  2626. if (dp_tx_desc_pool_free(soc, i)) {
  2627. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2628. "%s Tx Desc Pool Free failed\n", __func__);
  2629. }
  2630. }
  2631. }
  2632. /* dp_tx_desc_flush() - release resources associated
  2633. * to tx_desc
  2634. * @pdev: physical device instance
  2635. *
  2636. * This function will free all outstanding Tx buffers,
  2637. * including ME buffer for which either free during
  2638. * completion didn't happened or completion is not
  2639. * received.
  2640. */
  2641. static void dp_tx_desc_flush(struct dp_pdev *pdev)
  2642. {
  2643. uint8_t i, num_pool;
  2644. uint32_t j;
  2645. uint32_t num_desc;
  2646. struct dp_soc *soc = pdev->soc;
  2647. struct dp_tx_desc_s *tx_desc = NULL;
  2648. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  2649. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  2650. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  2651. for (i = 0; i < num_pool; i++) {
  2652. for (j = 0; j < num_desc; j++) {
  2653. tx_desc_pool = &((soc)->tx_desc[(i)]);
  2654. if (tx_desc_pool &&
  2655. tx_desc_pool->desc_pages.cacheable_pages) {
  2656. tx_desc = dp_tx_desc_find(soc, i,
  2657. (j & DP_TX_DESC_ID_PAGE_MASK) >>
  2658. DP_TX_DESC_ID_PAGE_OS,
  2659. (j & DP_TX_DESC_ID_OFFSET_MASK) >>
  2660. DP_TX_DESC_ID_OFFSET_OS);
  2661. if (tx_desc && (tx_desc->pdev == pdev) &&
  2662. (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
  2663. dp_tx_comp_free_buf(soc, tx_desc);
  2664. dp_tx_desc_release(tx_desc, i);
  2665. }
  2666. }
  2667. }
  2668. }
  2669. }
  2670. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  2671. /**
  2672. * dp_tx_pdev_detach() - detach pdev from dp tx
  2673. * @pdev: physical device instance
  2674. *
  2675. * Return: QDF_STATUS_SUCCESS: success
  2676. * QDF_STATUS_E_RESOURCES: Error return
  2677. */
  2678. QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
  2679. {
  2680. dp_tx_desc_flush(pdev);
  2681. dp_tx_me_exit(pdev);
  2682. return QDF_STATUS_SUCCESS;
  2683. }
  2684. /**
  2685. * dp_tx_soc_detach() - detach soc from dp tx
  2686. * @soc: core txrx main context
  2687. *
  2688. * This function will detach dp tx into main device context
  2689. * will free dp tx resource and initialize resources
  2690. *
  2691. * Return: QDF_STATUS_SUCCESS: success
  2692. * QDF_STATUS_E_RESOURCES: Error return
  2693. */
  2694. QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
  2695. {
  2696. uint8_t num_pool;
  2697. uint16_t num_desc;
  2698. uint16_t num_ext_desc;
  2699. uint8_t i;
  2700. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  2701. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  2702. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  2703. dp_tx_flow_control_deinit(soc);
  2704. dp_tx_delete_static_pools(soc, num_pool);
  2705. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2706. "%s Tx Desc Pool Free num_pool = %d, descs = %d\n",
  2707. __func__, num_pool, num_desc);
  2708. for (i = 0; i < num_pool; i++) {
  2709. if (dp_tx_ext_desc_pool_free(soc, i)) {
  2710. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2711. "%s Tx Ext Desc Pool Free failed\n",
  2712. __func__);
  2713. return QDF_STATUS_E_RESOURCES;
  2714. }
  2715. }
  2716. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2717. "%s MSDU Ext Desc Pool %d Free descs = %d\n",
  2718. __func__, num_pool, num_ext_desc);
  2719. for (i = 0; i < num_pool; i++) {
  2720. dp_tx_tso_desc_pool_free(soc, i);
  2721. }
  2722. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2723. "%s TSO Desc Pool %d Free descs = %d\n",
  2724. __func__, num_pool, num_desc);
  2725. for (i = 0; i < num_pool; i++)
  2726. dp_tx_tso_num_seg_pool_free(soc, i);
  2727. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2728. "%s TSO Num of seg Desc Pool %d Free descs = %d\n",
  2729. __func__, num_pool, num_desc);
  2730. return QDF_STATUS_SUCCESS;
  2731. }
  2732. /**
  2733. * dp_tx_soc_attach() - attach soc to dp tx
  2734. * @soc: core txrx main context
  2735. *
  2736. * This function will attach dp tx into main device context
  2737. * will allocate dp tx resource and initialize resources
  2738. *
  2739. * Return: QDF_STATUS_SUCCESS: success
  2740. * QDF_STATUS_E_RESOURCES: Error return
  2741. */
  2742. QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
  2743. {
  2744. uint8_t i;
  2745. uint8_t num_pool;
  2746. uint32_t num_desc;
  2747. uint32_t num_ext_desc;
  2748. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  2749. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  2750. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  2751. if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
  2752. goto fail;
  2753. dp_tx_flow_control_init(soc);
  2754. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2755. "%s Tx Desc Alloc num_pool = %d, descs = %d\n",
  2756. __func__, num_pool, num_desc);
  2757. /* Allocate extension tx descriptor pools */
  2758. for (i = 0; i < num_pool; i++) {
  2759. if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
  2760. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2761. "MSDU Ext Desc Pool alloc %d failed %pK\n",
  2762. i, soc);
  2763. goto fail;
  2764. }
  2765. }
  2766. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2767. "%s MSDU Ext Desc Alloc %d, descs = %d\n",
  2768. __func__, num_pool, num_ext_desc);
  2769. for (i = 0; i < num_pool; i++) {
  2770. if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
  2771. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2772. "TSO Desc Pool alloc %d failed %pK\n",
  2773. i, soc);
  2774. goto fail;
  2775. }
  2776. }
  2777. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2778. "%s TSO Desc Alloc %d, descs = %d\n",
  2779. __func__, num_pool, num_desc);
  2780. for (i = 0; i < num_pool; i++) {
  2781. if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
  2782. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2783. "TSO Num of seg Pool alloc %d failed %pK\n",
  2784. i, soc);
  2785. goto fail;
  2786. }
  2787. }
  2788. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2789. "%s TSO Num of seg pool Alloc %d, descs = %d\n",
  2790. __func__, num_pool, num_desc);
  2791. /* Initialize descriptors in TCL Rings */
  2792. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  2793. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  2794. hal_tx_init_data_ring(soc->hal_soc,
  2795. soc->tcl_data_ring[i].hal_srng);
  2796. }
  2797. }
  2798. /*
  2799. * todo - Add a runtime config option to enable this.
  2800. */
  2801. /*
  2802. * Due to multiple issues on NPR EMU, enable it selectively
  2803. * only for NPR EMU, should be removed, once NPR platforms
  2804. * are stable.
  2805. */
  2806. soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
  2807. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2808. "%s HAL Tx init Success\n", __func__);
  2809. return QDF_STATUS_SUCCESS;
  2810. fail:
  2811. /* Detach will take care of freeing only allocated resources */
  2812. dp_tx_soc_detach(soc);
  2813. return QDF_STATUS_E_RESOURCES;
  2814. }
  2815. /*
  2816. * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
  2817. * pdev: pointer to DP PDEV structure
  2818. * seg_info_head: Pointer to the head of list
  2819. *
  2820. * return: void
  2821. */
  2822. static void dp_tx_me_mem_free(struct dp_pdev *pdev,
  2823. struct dp_tx_seg_info_s *seg_info_head)
  2824. {
  2825. struct dp_tx_me_buf_t *mc_uc_buf;
  2826. struct dp_tx_seg_info_s *seg_info_new = NULL;
  2827. qdf_nbuf_t nbuf = NULL;
  2828. uint64_t phy_addr;
  2829. while (seg_info_head) {
  2830. nbuf = seg_info_head->nbuf;
  2831. mc_uc_buf = (struct dp_tx_me_buf_t *)
  2832. seg_info_head->frags[0].vaddr;
  2833. phy_addr = seg_info_head->frags[0].paddr_hi;
  2834. phy_addr = (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
  2835. qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
  2836. phy_addr,
  2837. QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN);
  2838. dp_tx_me_free_buf(pdev, mc_uc_buf);
  2839. qdf_nbuf_free(nbuf);
  2840. seg_info_new = seg_info_head;
  2841. seg_info_head = seg_info_head->next;
  2842. qdf_mem_free(seg_info_new);
  2843. }
  2844. }
  2845. /**
  2846. * dp_tx_me_send_convert_ucast(): fuction to convert multicast to unicast
  2847. * @vdev: DP VDEV handle
  2848. * @nbuf: Multicast nbuf
  2849. * @newmac: Table of the clients to which packets have to be sent
  2850. * @new_mac_cnt: No of clients
  2851. *
  2852. * return: no of converted packets
  2853. */
  2854. uint16_t
  2855. dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
  2856. uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt)
  2857. {
  2858. struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
  2859. struct dp_pdev *pdev = vdev->pdev;
  2860. struct ether_header *eh;
  2861. uint8_t *data;
  2862. uint16_t len;
  2863. /* reference to frame dst addr */
  2864. uint8_t *dstmac;
  2865. /* copy of original frame src addr */
  2866. uint8_t srcmac[DP_MAC_ADDR_LEN];
  2867. /* local index into newmac */
  2868. uint8_t new_mac_idx = 0;
  2869. struct dp_tx_me_buf_t *mc_uc_buf;
  2870. qdf_nbuf_t nbuf_clone;
  2871. struct dp_tx_msdu_info_s msdu_info;
  2872. struct dp_tx_seg_info_s *seg_info_head = NULL;
  2873. struct dp_tx_seg_info_s *seg_info_tail = NULL;
  2874. struct dp_tx_seg_info_s *seg_info_new;
  2875. struct dp_tx_frag_info_s data_frag;
  2876. qdf_dma_addr_t paddr_data;
  2877. qdf_dma_addr_t paddr_mcbuf = 0;
  2878. uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0};
  2879. QDF_STATUS status;
  2880. qdf_mem_set(&msdu_info, sizeof(msdu_info), 0x0);
  2881. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2882. eh = (struct ether_header *) nbuf;
  2883. qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN);
  2884. len = qdf_nbuf_len(nbuf);
  2885. data = qdf_nbuf_data(nbuf);
  2886. status = qdf_nbuf_map(vdev->osdev, nbuf,
  2887. QDF_DMA_TO_DEVICE);
  2888. if (status) {
  2889. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2890. "Mapping failure Error:%d", status);
  2891. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
  2892. qdf_nbuf_free(nbuf);
  2893. return 1;
  2894. }
  2895. paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN;
  2896. /*preparing data fragment*/
  2897. data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN;
  2898. data_frag.paddr_lo = (uint32_t)paddr_data;
  2899. data_frag.paddr_hi = (((uint64_t) paddr_data) >> 32);
  2900. data_frag.len = len - DP_MAC_ADDR_LEN;
  2901. for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
  2902. dstmac = newmac[new_mac_idx];
  2903. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2904. "added mac addr (%pM)", dstmac);
  2905. /* Check for NULL Mac Address */
  2906. if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN))
  2907. continue;
  2908. /* frame to self mac. skip */
  2909. if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN))
  2910. continue;
  2911. /*
  2912. * TODO: optimize to avoid malloc in per-packet path
  2913. * For eg. seg_pool can be made part of vdev structure
  2914. */
  2915. seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
  2916. if (!seg_info_new) {
  2917. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2918. "alloc failed");
  2919. DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
  2920. goto fail_seg_alloc;
  2921. }
  2922. mc_uc_buf = dp_tx_me_alloc_buf(pdev);
  2923. if (mc_uc_buf == NULL)
  2924. goto fail_buf_alloc;
  2925. /*
  2926. * TODO: Check if we need to clone the nbuf
  2927. * Or can we just use the reference for all cases
  2928. */
  2929. if (new_mac_idx < (new_mac_cnt - 1)) {
  2930. nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
  2931. if (nbuf_clone == NULL) {
  2932. DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
  2933. goto fail_clone;
  2934. }
  2935. } else {
  2936. /*
  2937. * Update the ref
  2938. * to account for frame sent without cloning
  2939. */
  2940. qdf_nbuf_ref(nbuf);
  2941. nbuf_clone = nbuf;
  2942. }
  2943. qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN);
  2944. status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
  2945. QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN,
  2946. &paddr_mcbuf);
  2947. if (status) {
  2948. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2949. "Mapping failure Error:%d", status);
  2950. DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
  2951. goto fail_map;
  2952. }
  2953. seg_info_new->frags[0].vaddr = (uint8_t *)mc_uc_buf;
  2954. seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
  2955. seg_info_new->frags[0].paddr_hi =
  2956. ((uint64_t) paddr_mcbuf >> 32);
  2957. seg_info_new->frags[0].len = DP_MAC_ADDR_LEN;
  2958. seg_info_new->frags[1] = data_frag;
  2959. seg_info_new->nbuf = nbuf_clone;
  2960. seg_info_new->frag_cnt = 2;
  2961. seg_info_new->total_len = len;
  2962. seg_info_new->next = NULL;
  2963. if (seg_info_head == NULL)
  2964. seg_info_head = seg_info_new;
  2965. else
  2966. seg_info_tail->next = seg_info_new;
  2967. seg_info_tail = seg_info_new;
  2968. }
  2969. if (!seg_info_head) {
  2970. goto free_return;
  2971. }
  2972. msdu_info.u.sg_info.curr_seg = seg_info_head;
  2973. msdu_info.num_seg = new_mac_cnt;
  2974. msdu_info.frm_type = dp_tx_frm_me;
  2975. DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
  2976. dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2977. while (seg_info_head->next) {
  2978. seg_info_new = seg_info_head;
  2979. seg_info_head = seg_info_head->next;
  2980. qdf_mem_free(seg_info_new);
  2981. }
  2982. qdf_mem_free(seg_info_head);
  2983. qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  2984. qdf_nbuf_free(nbuf);
  2985. return new_mac_cnt;
  2986. fail_map:
  2987. qdf_nbuf_free(nbuf_clone);
  2988. fail_clone:
  2989. dp_tx_me_free_buf(pdev, mc_uc_buf);
  2990. fail_buf_alloc:
  2991. qdf_mem_free(seg_info_new);
  2992. fail_seg_alloc:
  2993. dp_tx_me_mem_free(pdev, seg_info_head);
  2994. free_return:
  2995. qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  2996. qdf_nbuf_free(nbuf);
  2997. return 1;
  2998. }