dp_tx.c 131 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "htt.h"
  19. #include "dp_htt.h"
  20. #include "hal_hw_headers.h"
  21. #include "dp_tx.h"
  22. #include "dp_tx_desc.h"
  23. #include "dp_peer.h"
  24. #include "dp_types.h"
  25. #include "hal_tx.h"
  26. #include "qdf_mem.h"
  27. #include "qdf_nbuf.h"
  28. #include "qdf_net_types.h"
  29. #include <wlan_cfg.h>
  30. #include "dp_ipa.h"
  31. #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
  32. #include "if_meta_hdr.h"
  33. #endif
  34. #include "enet.h"
  35. #include "dp_internal.h"
  36. #ifdef FEATURE_WDS
  37. #include "dp_txrx_wds.h"
  38. #endif
  39. #ifdef ATH_SUPPORT_IQUE
  40. #include "dp_txrx_me.h"
  41. #endif
  42. #include "dp_hist.h"
  43. /* TODO Add support in TSO */
  44. #define DP_DESC_NUM_FRAG(x) 0
  45. /* disable TQM_BYPASS */
  46. #define TQM_BYPASS_WAR 0
  47. /* invalid peer id for reinject*/
  48. #define DP_INVALID_PEER 0XFFFE
  49. /*mapping between hal encrypt type and cdp_sec_type*/
  50. #define MAX_CDP_SEC_TYPE 12
  51. static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
  52. HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
  53. HAL_TX_ENCRYPT_TYPE_WEP_128,
  54. HAL_TX_ENCRYPT_TYPE_WEP_104,
  55. HAL_TX_ENCRYPT_TYPE_WEP_40,
  56. HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
  57. HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
  58. HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
  59. HAL_TX_ENCRYPT_TYPE_WAPI,
  60. HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
  61. HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
  62. HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
  63. HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
  64. #ifdef QCA_TX_LIMIT_CHECK
  65. /**
  66. * dp_tx_limit_check - Check if allocated tx descriptors reached
  67. * soc max limit and pdev max limit
  68. * @vdev: DP vdev handle
  69. *
  70. * Return: true if allocated tx descriptors reached max configured value, else
  71. * false
  72. */
  73. static inline bool
  74. dp_tx_limit_check(struct dp_vdev *vdev)
  75. {
  76. struct dp_pdev *pdev = vdev->pdev;
  77. struct dp_soc *soc = pdev->soc;
  78. if (qdf_atomic_read(&soc->num_tx_outstanding) >=
  79. soc->num_tx_allowed) {
  80. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  81. "%s: queued packets are more than max tx, drop the frame",
  82. __func__);
  83. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  84. return true;
  85. }
  86. if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
  87. pdev->num_tx_allowed) {
  88. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  89. "%s: queued packets are more than max tx, drop the frame",
  90. __func__);
  91. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  92. return true;
  93. }
  94. return false;
  95. }
  96. /**
  97. * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
  98. * reached soc max limit
  99. * @vdev: DP vdev handle
  100. *
  101. * Return: true if allocated tx descriptors reached max configured value, else
  102. * false
  103. */
  104. static inline bool
  105. dp_tx_exception_limit_check(struct dp_vdev *vdev)
  106. {
  107. struct dp_pdev *pdev = vdev->pdev;
  108. struct dp_soc *soc = pdev->soc;
  109. if (qdf_atomic_read(&soc->num_tx_exception) >=
  110. soc->num_msdu_exception_desc) {
  111. dp_info("exc packets are more than max drop the exc pkt");
  112. DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
  113. return true;
  114. }
  115. return false;
  116. }
  117. /**
  118. * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
  119. * @vdev: DP pdev handle
  120. *
  121. * Return: void
  122. */
  123. static inline void
  124. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  125. {
  126. struct dp_soc *soc = pdev->soc;
  127. qdf_atomic_inc(&pdev->num_tx_outstanding);
  128. qdf_atomic_inc(&soc->num_tx_outstanding);
  129. }
  130. /**
  131. * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
  132. * @vdev: DP pdev handle
  133. *
  134. * Return: void
  135. */
  136. static inline void
  137. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  138. {
  139. struct dp_soc *soc = pdev->soc;
  140. qdf_atomic_dec(&pdev->num_tx_outstanding);
  141. qdf_atomic_dec(&soc->num_tx_outstanding);
  142. }
  143. #else //QCA_TX_LIMIT_CHECK
  144. static inline bool
  145. dp_tx_limit_check(struct dp_vdev *vdev)
  146. {
  147. return false;
  148. }
  149. static inline bool
  150. dp_tx_exception_limit_check(struct dp_vdev *vdev)
  151. {
  152. return false;
  153. }
  154. static inline void
  155. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  156. {
  157. qdf_atomic_inc(&pdev->num_tx_outstanding);
  158. }
  159. static inline void
  160. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  161. {
  162. qdf_atomic_dec(&pdev->num_tx_outstanding);
  163. }
  164. #endif //QCA_TX_LIMIT_CHECK
  165. #if defined(FEATURE_TSO)
  166. /**
  167. * dp_tx_tso_unmap_segment() - Unmap TSO segment
  168. *
  169. * @soc - core txrx main context
  170. * @seg_desc - tso segment descriptor
  171. * @num_seg_desc - tso number segment descriptor
  172. */
  173. static void dp_tx_tso_unmap_segment(
  174. struct dp_soc *soc,
  175. struct qdf_tso_seg_elem_t *seg_desc,
  176. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  177. {
  178. TSO_DEBUG("%s: Unmap the tso segment", __func__);
  179. if (qdf_unlikely(!seg_desc)) {
  180. DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
  181. __func__, __LINE__);
  182. qdf_assert(0);
  183. } else if (qdf_unlikely(!num_seg_desc)) {
  184. DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
  185. __func__, __LINE__);
  186. qdf_assert(0);
  187. } else {
  188. bool is_last_seg;
  189. /* no tso segment left to do dma unmap */
  190. if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
  191. return;
  192. is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
  193. true : false;
  194. qdf_nbuf_unmap_tso_segment(soc->osdev,
  195. seg_desc, is_last_seg);
  196. num_seg_desc->num_seg.tso_cmn_num_seg--;
  197. }
  198. }
  199. /**
  200. * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
  201. * back to the freelist
  202. *
  203. * @soc - soc device handle
  204. * @tx_desc - Tx software descriptor
  205. */
  206. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  207. struct dp_tx_desc_s *tx_desc)
  208. {
  209. TSO_DEBUG("%s: Free the tso descriptor", __func__);
  210. if (qdf_unlikely(!tx_desc->tso_desc)) {
  211. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  212. "%s %d TSO desc is NULL!",
  213. __func__, __LINE__);
  214. qdf_assert(0);
  215. } else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
  216. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  217. "%s %d TSO num desc is NULL!",
  218. __func__, __LINE__);
  219. qdf_assert(0);
  220. } else {
  221. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  222. (struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
  223. /* Add the tso num segment into the free list */
  224. if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
  225. dp_tso_num_seg_free(soc, tx_desc->pool_id,
  226. tx_desc->tso_num_desc);
  227. tx_desc->tso_num_desc = NULL;
  228. DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
  229. }
  230. /* Add the tso segment into the free list*/
  231. dp_tx_tso_desc_free(soc,
  232. tx_desc->pool_id, tx_desc->tso_desc);
  233. tx_desc->tso_desc = NULL;
  234. }
  235. }
  236. #else
  237. static void dp_tx_tso_unmap_segment(
  238. struct dp_soc *soc,
  239. struct qdf_tso_seg_elem_t *seg_desc,
  240. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  241. {
  242. }
  243. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  244. struct dp_tx_desc_s *tx_desc)
  245. {
  246. }
  247. #endif
  248. /**
  249. * dp_tx_desc_release() - Release Tx Descriptor
  250. * @tx_desc : Tx Descriptor
  251. * @desc_pool_id: Descriptor Pool ID
  252. *
  253. * Deallocate all resources attached to Tx descriptor and free the Tx
  254. * descriptor.
  255. *
  256. * Return:
  257. */
  258. static void
  259. dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
  260. {
  261. struct dp_pdev *pdev = tx_desc->pdev;
  262. struct dp_soc *soc;
  263. uint8_t comp_status = 0;
  264. qdf_assert(pdev);
  265. soc = pdev->soc;
  266. dp_tx_outstanding_dec(pdev);
  267. if (tx_desc->frm_type == dp_tx_frm_tso)
  268. dp_tx_tso_desc_release(soc, tx_desc);
  269. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
  270. dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
  271. if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
  272. dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
  273. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  274. qdf_atomic_dec(&soc->num_tx_exception);
  275. if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
  276. hal_tx_comp_get_buffer_source(&tx_desc->comp))
  277. comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
  278. soc->hal_soc);
  279. else
  280. comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
  281. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  282. "Tx Completion Release desc %d status %d outstanding %d",
  283. tx_desc->id, comp_status,
  284. qdf_atomic_read(&pdev->num_tx_outstanding));
  285. dp_tx_desc_free(soc, tx_desc, desc_pool_id);
  286. return;
  287. }
  288. /**
  289. * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
  290. * @vdev: DP vdev Handle
  291. * @nbuf: skb
  292. * @msdu_info: msdu_info required to create HTT metadata
  293. *
  294. * Prepares and fills HTT metadata in the frame pre-header for special frames
  295. * that should be transmitted using varying transmit parameters.
  296. * There are 2 VDEV modes that currently needs this special metadata -
  297. * 1) Mesh Mode
  298. * 2) DSRC Mode
  299. *
  300. * Return: HTT metadata size
  301. *
  302. */
  303. static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  304. struct dp_tx_msdu_info_s *msdu_info)
  305. {
  306. uint32_t *meta_data = msdu_info->meta_data;
  307. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  308. (struct htt_tx_msdu_desc_ext2_t *) meta_data;
  309. uint8_t htt_desc_size;
  310. /* Size rounded of multiple of 8 bytes */
  311. uint8_t htt_desc_size_aligned;
  312. uint8_t *hdr = NULL;
  313. /*
  314. * Metadata - HTT MSDU Extension header
  315. */
  316. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  317. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  318. if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
  319. HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
  320. meta_data[0])) {
  321. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
  322. htt_desc_size_aligned)) {
  323. nbuf = qdf_nbuf_realloc_headroom(nbuf,
  324. htt_desc_size_aligned);
  325. if (!nbuf) {
  326. /*
  327. * qdf_nbuf_realloc_headroom won't do skb_clone
  328. * as skb_realloc_headroom does. so, no free is
  329. * needed here.
  330. */
  331. DP_STATS_INC(vdev,
  332. tx_i.dropped.headroom_insufficient,
  333. 1);
  334. qdf_print(" %s[%d] skb_realloc_headroom failed",
  335. __func__, __LINE__);
  336. return 0;
  337. }
  338. }
  339. /* Fill and add HTT metaheader */
  340. hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
  341. if (!hdr) {
  342. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  343. "Error in filling HTT metadata");
  344. return 0;
  345. }
  346. qdf_mem_copy(hdr, desc_ext, htt_desc_size);
  347. } else if (vdev->opmode == wlan_op_mode_ocb) {
  348. /* Todo - Add support for DSRC */
  349. }
  350. return htt_desc_size_aligned;
  351. }
  352. /**
  353. * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
  354. * @tso_seg: TSO segment to process
  355. * @ext_desc: Pointer to MSDU extension descriptor
  356. *
  357. * Return: void
  358. */
  359. #if defined(FEATURE_TSO)
  360. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  361. void *ext_desc)
  362. {
  363. uint8_t num_frag;
  364. uint32_t tso_flags;
  365. /*
  366. * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
  367. * tcp_flag_mask
  368. *
  369. * Checksum enable flags are set in TCL descriptor and not in Extension
  370. * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
  371. */
  372. tso_flags = *(uint32_t *) &tso_seg->tso_flags;
  373. hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
  374. hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
  375. tso_seg->tso_flags.ip_len);
  376. hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
  377. hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
  378. for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
  379. uint32_t lo = 0;
  380. uint32_t hi = 0;
  381. qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
  382. (tso_seg->tso_frags[num_frag].length));
  383. qdf_dmaaddr_to_32s(
  384. tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
  385. hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
  386. tso_seg->tso_frags[num_frag].length);
  387. }
  388. return;
  389. }
  390. #else
  391. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  392. void *ext_desc)
  393. {
  394. return;
  395. }
  396. #endif
  397. #if defined(FEATURE_TSO)
  398. /**
  399. * dp_tx_free_tso_seg_list() - Loop through the tso segments
  400. * allocated and free them
  401. *
  402. * @soc: soc handle
  403. * @free_seg: list of tso segments
  404. * @msdu_info: msdu descriptor
  405. *
  406. * Return - void
  407. */
  408. static void dp_tx_free_tso_seg_list(
  409. struct dp_soc *soc,
  410. struct qdf_tso_seg_elem_t *free_seg,
  411. struct dp_tx_msdu_info_s *msdu_info)
  412. {
  413. struct qdf_tso_seg_elem_t *next_seg;
  414. while (free_seg) {
  415. next_seg = free_seg->next;
  416. dp_tx_tso_desc_free(soc,
  417. msdu_info->tx_queue.desc_pool_id,
  418. free_seg);
  419. free_seg = next_seg;
  420. }
  421. }
  422. /**
  423. * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
  424. * allocated and free them
  425. *
  426. * @soc: soc handle
  427. * @free_num_seg: list of tso number segments
  428. * @msdu_info: msdu descriptor
  429. * Return - void
  430. */
  431. static void dp_tx_free_tso_num_seg_list(
  432. struct dp_soc *soc,
  433. struct qdf_tso_num_seg_elem_t *free_num_seg,
  434. struct dp_tx_msdu_info_s *msdu_info)
  435. {
  436. struct qdf_tso_num_seg_elem_t *next_num_seg;
  437. while (free_num_seg) {
  438. next_num_seg = free_num_seg->next;
  439. dp_tso_num_seg_free(soc,
  440. msdu_info->tx_queue.desc_pool_id,
  441. free_num_seg);
  442. free_num_seg = next_num_seg;
  443. }
  444. }
  445. /**
  446. * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
  447. * do dma unmap for each segment
  448. *
  449. * @soc: soc handle
  450. * @free_seg: list of tso segments
  451. * @num_seg_desc: tso number segment descriptor
  452. *
  453. * Return - void
  454. */
  455. static void dp_tx_unmap_tso_seg_list(
  456. struct dp_soc *soc,
  457. struct qdf_tso_seg_elem_t *free_seg,
  458. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  459. {
  460. struct qdf_tso_seg_elem_t *next_seg;
  461. if (qdf_unlikely(!num_seg_desc)) {
  462. DP_TRACE(ERROR, "TSO number seg desc is NULL!");
  463. return;
  464. }
  465. while (free_seg) {
  466. next_seg = free_seg->next;
  467. dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
  468. free_seg = next_seg;
  469. }
  470. }
  471. #ifdef FEATURE_TSO_STATS
  472. /**
  473. * dp_tso_get_stats_idx: Retrieve the tso packet id
  474. * @pdev - pdev handle
  475. *
  476. * Return: id
  477. */
  478. static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
  479. {
  480. uint32_t stats_idx;
  481. stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
  482. % CDP_MAX_TSO_PACKETS);
  483. return stats_idx;
  484. }
  485. #else
  486. static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
  487. {
  488. return 0;
  489. }
  490. #endif /* FEATURE_TSO_STATS */
  491. /**
  492. * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
  493. * free the tso segments descriptor and
  494. * tso num segments descriptor
  495. *
  496. * @soc: soc handle
  497. * @msdu_info: msdu descriptor
  498. * @tso_seg_unmap: flag to show if dma unmap is necessary
  499. *
  500. * Return - void
  501. */
  502. static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
  503. struct dp_tx_msdu_info_s *msdu_info,
  504. bool tso_seg_unmap)
  505. {
  506. struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
  507. struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
  508. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  509. tso_info->tso_num_seg_list;
  510. /* do dma unmap for each segment */
  511. if (tso_seg_unmap)
  512. dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
  513. /* free all tso number segment descriptor though looks only have 1 */
  514. dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
  515. /* free all tso segment descriptor */
  516. dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
  517. }
  518. /**
  519. * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
  520. * @vdev: virtual device handle
  521. * @msdu: network buffer
  522. * @msdu_info: meta data associated with the msdu
  523. *
  524. * Return: QDF_STATUS_SUCCESS success
  525. */
  526. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  527. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  528. {
  529. struct qdf_tso_seg_elem_t *tso_seg;
  530. int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
  531. struct dp_soc *soc = vdev->pdev->soc;
  532. struct dp_pdev *pdev = vdev->pdev;
  533. struct qdf_tso_info_t *tso_info;
  534. struct qdf_tso_num_seg_elem_t *tso_num_seg;
  535. tso_info = &msdu_info->u.tso_info;
  536. tso_info->curr_seg = NULL;
  537. tso_info->tso_seg_list = NULL;
  538. tso_info->num_segs = num_seg;
  539. msdu_info->frm_type = dp_tx_frm_tso;
  540. tso_info->tso_num_seg_list = NULL;
  541. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  542. while (num_seg) {
  543. tso_seg = dp_tx_tso_desc_alloc(
  544. soc, msdu_info->tx_queue.desc_pool_id);
  545. if (tso_seg) {
  546. tso_seg->next = tso_info->tso_seg_list;
  547. tso_info->tso_seg_list = tso_seg;
  548. num_seg--;
  549. } else {
  550. dp_err_rl("Failed to alloc tso seg desc");
  551. DP_STATS_INC_PKT(vdev->pdev,
  552. tso_stats.tso_no_mem_dropped, 1,
  553. qdf_nbuf_len(msdu));
  554. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  555. return QDF_STATUS_E_NOMEM;
  556. }
  557. }
  558. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  559. tso_num_seg = dp_tso_num_seg_alloc(soc,
  560. msdu_info->tx_queue.desc_pool_id);
  561. if (tso_num_seg) {
  562. tso_num_seg->next = tso_info->tso_num_seg_list;
  563. tso_info->tso_num_seg_list = tso_num_seg;
  564. } else {
  565. DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
  566. __func__);
  567. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  568. return QDF_STATUS_E_NOMEM;
  569. }
  570. msdu_info->num_seg =
  571. qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
  572. TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
  573. msdu_info->num_seg);
  574. if (!(msdu_info->num_seg)) {
  575. /*
  576. * Free allocated TSO seg desc and number seg desc,
  577. * do unmap for segments if dma map has done.
  578. */
  579. DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
  580. dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
  581. return QDF_STATUS_E_INVAL;
  582. }
  583. tso_info->curr_seg = tso_info->tso_seg_list;
  584. tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
  585. dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
  586. msdu, msdu_info->num_seg);
  587. dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
  588. tso_info->msdu_stats_idx);
  589. dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
  590. return QDF_STATUS_SUCCESS;
  591. }
  592. #else
  593. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  594. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  595. {
  596. return QDF_STATUS_E_NOMEM;
  597. }
  598. #endif
  599. QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
  600. (DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
  601. sizeof(struct htt_tx_msdu_desc_ext2_t)));
  602. /**
  603. * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
  604. * @vdev: DP Vdev handle
  605. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  606. * @desc_pool_id: Descriptor Pool ID
  607. *
  608. * Return:
  609. */
  610. static
  611. struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
  612. struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
  613. {
  614. uint8_t i;
  615. uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
  616. struct dp_tx_seg_info_s *seg_info;
  617. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  618. struct dp_soc *soc = vdev->pdev->soc;
  619. /* Allocate an extension descriptor */
  620. msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
  621. qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
  622. if (!msdu_ext_desc) {
  623. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  624. return NULL;
  625. }
  626. if (msdu_info->exception_fw &&
  627. qdf_unlikely(vdev->mesh_vdev)) {
  628. qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
  629. &msdu_info->meta_data[0],
  630. sizeof(struct htt_tx_msdu_desc_ext2_t));
  631. qdf_atomic_inc(&soc->num_tx_exception);
  632. msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
  633. }
  634. switch (msdu_info->frm_type) {
  635. case dp_tx_frm_sg:
  636. case dp_tx_frm_me:
  637. case dp_tx_frm_raw:
  638. seg_info = msdu_info->u.sg_info.curr_seg;
  639. /* Update the buffer pointers in MSDU Extension Descriptor */
  640. for (i = 0; i < seg_info->frag_cnt; i++) {
  641. hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
  642. seg_info->frags[i].paddr_lo,
  643. seg_info->frags[i].paddr_hi,
  644. seg_info->frags[i].len);
  645. }
  646. break;
  647. case dp_tx_frm_tso:
  648. dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
  649. &cached_ext_desc[0]);
  650. break;
  651. default:
  652. break;
  653. }
  654. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  655. cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
  656. hal_tx_ext_desc_sync(&cached_ext_desc[0],
  657. msdu_ext_desc->vaddr);
  658. return msdu_ext_desc;
  659. }
  660. /**
  661. * dp_tx_trace_pkt() - Trace TX packet at DP layer
  662. *
  663. * @skb: skb to be traced
  664. * @msdu_id: msdu_id of the packet
  665. * @vdev_id: vdev_id of the packet
  666. *
  667. * Return: None
  668. */
  669. #ifdef DP_DISABLE_TX_PKT_TRACE
  670. static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
  671. uint8_t vdev_id)
  672. {
  673. }
  674. #else
  675. static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
  676. uint8_t vdev_id)
  677. {
  678. QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
  679. QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
  680. DPTRACE(qdf_dp_trace_ptr(skb,
  681. QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
  682. QDF_TRACE_DEFAULT_PDEV_ID,
  683. qdf_nbuf_data_addr(skb),
  684. sizeof(qdf_nbuf_data(skb)),
  685. msdu_id, vdev_id));
  686. qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
  687. DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
  688. QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
  689. msdu_id, QDF_TX));
  690. }
  691. #endif
  692. /**
  693. * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
  694. * @vdev: DP vdev handle
  695. * @nbuf: skb
  696. * @desc_pool_id: Descriptor pool ID
  697. * @meta_data: Metadata to the fw
  698. * @tx_exc_metadata: Handle that holds exception path metadata
  699. * Allocate and prepare Tx descriptor with msdu information.
  700. *
  701. * Return: Pointer to Tx Descriptor on success,
  702. * NULL on failure
  703. */
  704. static
  705. struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
  706. qdf_nbuf_t nbuf, uint8_t desc_pool_id,
  707. struct dp_tx_msdu_info_s *msdu_info,
  708. struct cdp_tx_exception_metadata *tx_exc_metadata)
  709. {
  710. uint8_t align_pad;
  711. uint8_t is_exception = 0;
  712. uint8_t htt_hdr_size;
  713. struct dp_tx_desc_s *tx_desc;
  714. struct dp_pdev *pdev = vdev->pdev;
  715. struct dp_soc *soc = pdev->soc;
  716. if (dp_tx_limit_check(vdev))
  717. return NULL;
  718. /* Allocate software Tx descriptor */
  719. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  720. if (qdf_unlikely(!tx_desc)) {
  721. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  722. return NULL;
  723. }
  724. dp_tx_outstanding_inc(pdev);
  725. /* Initialize the SW tx descriptor */
  726. tx_desc->nbuf = nbuf;
  727. tx_desc->frm_type = dp_tx_frm_std;
  728. tx_desc->tx_encap_type = ((tx_exc_metadata &&
  729. (tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
  730. tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
  731. tx_desc->vdev_id = vdev->vdev_id;
  732. tx_desc->pdev = pdev;
  733. tx_desc->msdu_ext_desc = NULL;
  734. tx_desc->pkt_offset = 0;
  735. dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
  736. if (qdf_unlikely(vdev->multipass_en)) {
  737. if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
  738. goto failure;
  739. }
  740. /*
  741. * For special modes (vdev_type == ocb or mesh), data frames should be
  742. * transmitted using varying transmit parameters (tx spec) which include
  743. * transmit rate, power, priority, channel, channel bandwidth , nss etc.
  744. * These are filled in HTT MSDU descriptor and sent in frame pre-header.
  745. * These frames are sent as exception packets to firmware.
  746. *
  747. * HW requirement is that metadata should always point to a
  748. * 8-byte aligned address. So we add alignment pad to start of buffer.
  749. * HTT Metadata should be ensured to be multiple of 8-bytes,
  750. * to get 8-byte aligned start address along with align_pad added
  751. *
  752. * |-----------------------------|
  753. * | |
  754. * |-----------------------------| <-----Buffer Pointer Address given
  755. * | | ^ in HW descriptor (aligned)
  756. * | HTT Metadata | |
  757. * | | |
  758. * | | | Packet Offset given in descriptor
  759. * | | |
  760. * |-----------------------------| |
  761. * | Alignment Pad | v
  762. * |-----------------------------| <----- Actual buffer start address
  763. * | SKB Data | (Unaligned)
  764. * | |
  765. * | |
  766. * | |
  767. * | |
  768. * | |
  769. * |-----------------------------|
  770. */
  771. if (qdf_unlikely((msdu_info->exception_fw)) ||
  772. (vdev->opmode == wlan_op_mode_ocb) ||
  773. (tx_exc_metadata &&
  774. tx_exc_metadata->is_tx_sniffer)) {
  775. align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
  776. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
  777. DP_STATS_INC(vdev,
  778. tx_i.dropped.headroom_insufficient, 1);
  779. goto failure;
  780. }
  781. if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
  782. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  783. "qdf_nbuf_push_head failed");
  784. goto failure;
  785. }
  786. htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
  787. msdu_info);
  788. if (htt_hdr_size == 0)
  789. goto failure;
  790. tx_desc->pkt_offset = align_pad + htt_hdr_size;
  791. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  792. is_exception = 1;
  793. }
  794. #if !TQM_BYPASS_WAR
  795. if (is_exception || tx_exc_metadata)
  796. #endif
  797. {
  798. /* Temporary WAR due to TQM VP issues */
  799. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  800. qdf_atomic_inc(&soc->num_tx_exception);
  801. }
  802. return tx_desc;
  803. failure:
  804. dp_tx_desc_release(tx_desc, desc_pool_id);
  805. return NULL;
  806. }
  807. /**
  808. * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
  809. * @vdev: DP vdev handle
  810. * @nbuf: skb
  811. * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
  812. * @desc_pool_id : Descriptor Pool ID
  813. *
  814. * Allocate and prepare Tx descriptor with msdu and fragment descritor
  815. * information. For frames wth fragments, allocate and prepare
  816. * an MSDU extension descriptor
  817. *
  818. * Return: Pointer to Tx Descriptor on success,
  819. * NULL on failure
  820. */
  821. static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
  822. qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
  823. uint8_t desc_pool_id)
  824. {
  825. struct dp_tx_desc_s *tx_desc;
  826. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  827. struct dp_pdev *pdev = vdev->pdev;
  828. struct dp_soc *soc = pdev->soc;
  829. if (dp_tx_limit_check(vdev))
  830. return NULL;
  831. /* Allocate software Tx descriptor */
  832. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  833. if (!tx_desc) {
  834. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  835. return NULL;
  836. }
  837. dp_tx_outstanding_inc(pdev);
  838. /* Initialize the SW tx descriptor */
  839. tx_desc->nbuf = nbuf;
  840. tx_desc->frm_type = msdu_info->frm_type;
  841. tx_desc->tx_encap_type = vdev->tx_encap_type;
  842. tx_desc->vdev_id = vdev->vdev_id;
  843. tx_desc->pdev = pdev;
  844. tx_desc->pkt_offset = 0;
  845. tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
  846. tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
  847. dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
  848. /* Handle scattered frames - TSO/SG/ME */
  849. /* Allocate and prepare an extension descriptor for scattered frames */
  850. msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
  851. if (!msdu_ext_desc) {
  852. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  853. "%s Tx Extension Descriptor Alloc Fail",
  854. __func__);
  855. goto failure;
  856. }
  857. #if TQM_BYPASS_WAR
  858. /* Temporary WAR due to TQM VP issues */
  859. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  860. qdf_atomic_inc(&soc->num_tx_exception);
  861. #endif
  862. if (qdf_unlikely(msdu_info->exception_fw))
  863. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  864. tx_desc->msdu_ext_desc = msdu_ext_desc;
  865. tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
  866. return tx_desc;
  867. failure:
  868. dp_tx_desc_release(tx_desc, desc_pool_id);
  869. return NULL;
  870. }
  871. /**
  872. * dp_tx_prepare_raw() - Prepare RAW packet TX
  873. * @vdev: DP vdev handle
  874. * @nbuf: buffer pointer
  875. * @seg_info: Pointer to Segment info Descriptor to be prepared
  876. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
  877. * descriptor
  878. *
  879. * Return:
  880. */
  881. static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  882. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  883. {
  884. qdf_nbuf_t curr_nbuf = NULL;
  885. uint16_t total_len = 0;
  886. qdf_dma_addr_t paddr;
  887. int32_t i;
  888. int32_t mapped_buf_num = 0;
  889. struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
  890. qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  891. DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
  892. /* Continue only if frames are of DATA type */
  893. if (!DP_FRAME_IS_DATA(qos_wh)) {
  894. DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
  895. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  896. "Pkt. recd is of not data type");
  897. goto error;
  898. }
  899. /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
  900. if (vdev->raw_mode_war &&
  901. (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
  902. (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
  903. qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
  904. for (curr_nbuf = nbuf, i = 0; curr_nbuf;
  905. curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
  906. if (QDF_STATUS_SUCCESS !=
  907. qdf_nbuf_map_nbytes_single(vdev->osdev,
  908. curr_nbuf,
  909. QDF_DMA_TO_DEVICE,
  910. curr_nbuf->len)) {
  911. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  912. "%s dma map error ", __func__);
  913. DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
  914. mapped_buf_num = i;
  915. goto error;
  916. }
  917. paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
  918. seg_info->frags[i].paddr_lo = paddr;
  919. seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
  920. seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
  921. seg_info->frags[i].vaddr = (void *) curr_nbuf;
  922. total_len += qdf_nbuf_len(curr_nbuf);
  923. }
  924. seg_info->frag_cnt = i;
  925. seg_info->total_len = total_len;
  926. seg_info->next = NULL;
  927. sg_info->curr_seg = seg_info;
  928. msdu_info->frm_type = dp_tx_frm_raw;
  929. msdu_info->num_seg = 1;
  930. return nbuf;
  931. error:
  932. i = 0;
  933. while (nbuf) {
  934. curr_nbuf = nbuf;
  935. if (i < mapped_buf_num) {
  936. qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
  937. QDF_DMA_TO_DEVICE,
  938. curr_nbuf->len);
  939. i++;
  940. }
  941. nbuf = qdf_nbuf_next(nbuf);
  942. qdf_nbuf_free(curr_nbuf);
  943. }
  944. return NULL;
  945. }
  946. /**
  947. * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
  948. * @soc: DP soc handle
  949. * @nbuf: Buffer pointer
  950. *
  951. * unmap the chain of nbufs that belong to this RAW frame.
  952. *
  953. * Return: None
  954. */
  955. static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
  956. qdf_nbuf_t nbuf)
  957. {
  958. qdf_nbuf_t cur_nbuf = nbuf;
  959. do {
  960. qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
  961. QDF_DMA_TO_DEVICE,
  962. cur_nbuf->len);
  963. cur_nbuf = qdf_nbuf_next(cur_nbuf);
  964. } while (cur_nbuf);
  965. }
  966. #ifdef VDEV_PEER_PROTOCOL_COUNT
  967. #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, nbuf) \
  968. { \
  969. qdf_nbuf_t nbuf_local; \
  970. struct dp_vdev *vdev_local = vdev_hdl; \
  971. do { \
  972. if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
  973. break; \
  974. nbuf_local = nbuf; \
  975. if (qdf_unlikely(((vdev_local)->tx_encap_type) == \
  976. htt_cmn_pkt_type_raw)) \
  977. break; \
  978. else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local)))) \
  979. break; \
  980. else if (qdf_nbuf_is_tso((nbuf_local))) \
  981. break; \
  982. dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
  983. (nbuf_local), \
  984. NULL, 1, 0); \
  985. } while (0); \
  986. }
  987. #else
  988. #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, skb)
  989. #endif
  990. /**
  991. * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
  992. * @soc: DP Soc Handle
  993. * @vdev: DP vdev handle
  994. * @tx_desc: Tx Descriptor Handle
  995. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  996. * @fw_metadata: Metadata to send to Target Firmware along with frame
  997. * @ring_id: Ring ID of H/W ring to which we enqueue the packet
  998. * @tx_exc_metadata: Handle that holds exception path meta data
  999. *
  1000. * Gets the next free TCL HW DMA descriptor and sets up required parameters
  1001. * from software Tx descriptor
  1002. *
  1003. * Return: QDF_STATUS_SUCCESS: success
  1004. * QDF_STATUS_E_RESOURCES: Error return
  1005. */
  1006. static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
  1007. struct dp_tx_desc_s *tx_desc, uint8_t tid,
  1008. uint16_t fw_metadata, uint8_t ring_id,
  1009. struct cdp_tx_exception_metadata
  1010. *tx_exc_metadata)
  1011. {
  1012. uint8_t type;
  1013. void *hal_tx_desc;
  1014. uint32_t *hal_tx_desc_cached;
  1015. /*
  1016. * Setting it initialization statically here to avoid
  1017. * a memset call jump with qdf_mem_set call
  1018. */
  1019. uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
  1020. enum cdp_sec_type sec_type = ((tx_exc_metadata &&
  1021. tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
  1022. tx_exc_metadata->sec_type : vdev->sec_type);
  1023. /* Return Buffer Manager ID */
  1024. uint8_t bm_id = dp_tx_get_rbm_id(soc, ring_id);
  1025. hal_ring_handle_t hal_ring_hdl = NULL;
  1026. QDF_STATUS status = QDF_STATUS_E_RESOURCES;
  1027. if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
  1028. dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
  1029. return QDF_STATUS_E_RESOURCES;
  1030. }
  1031. hal_tx_desc_cached = (void *) cached_desc;
  1032. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
  1033. type = HAL_TX_BUF_TYPE_EXT_DESC;
  1034. tx_desc->dma_addr = tx_desc->msdu_ext_desc->paddr;
  1035. if (tx_desc->msdu_ext_desc->flags &
  1036. DP_TX_EXT_DESC_FLAG_METADATA_VALID)
  1037. tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
  1038. else
  1039. tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
  1040. } else {
  1041. tx_desc->length = qdf_nbuf_len(tx_desc->nbuf) -
  1042. tx_desc->pkt_offset;
  1043. type = HAL_TX_BUF_TYPE_BUFFER;
  1044. tx_desc->dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
  1045. }
  1046. qdf_assert_always(tx_desc->dma_addr);
  1047. hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
  1048. tx_desc->dma_addr, bm_id, tx_desc->id,
  1049. type);
  1050. hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
  1051. vdev->lmac_id);
  1052. hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
  1053. vdev->search_type);
  1054. hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
  1055. vdev->bss_ast_idx);
  1056. hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
  1057. vdev->dscp_tid_map_id);
  1058. hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
  1059. sec_type_map[sec_type]);
  1060. hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
  1061. (vdev->bss_ast_hash & 0xF));
  1062. hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
  1063. hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
  1064. hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
  1065. hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
  1066. hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
  1067. vdev->hal_desc_addr_search_flags);
  1068. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  1069. hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
  1070. /* verify checksum offload configuration*/
  1071. if (vdev->csum_enabled &&
  1072. ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
  1073. || qdf_nbuf_is_tso(tx_desc->nbuf))) {
  1074. hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
  1075. hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
  1076. }
  1077. if (tid != HTT_TX_EXT_TID_INVALID)
  1078. hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
  1079. if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
  1080. hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
  1081. if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
  1082. qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled(
  1083. soc->wlan_cfg_ctx)))
  1084. tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  1085. dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
  1086. tx_desc->length, type, (uint64_t)tx_desc->dma_addr,
  1087. tx_desc->pkt_offset, tx_desc->id);
  1088. hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
  1089. if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
  1090. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1091. "%s %d : HAL RING Access Failed -- %pK",
  1092. __func__, __LINE__, hal_ring_hdl);
  1093. DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
  1094. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  1095. return status;
  1096. }
  1097. /* Sync cached descriptor with HW */
  1098. hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
  1099. if (qdf_unlikely(!hal_tx_desc)) {
  1100. dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
  1101. DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
  1102. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  1103. goto ring_access_fail;
  1104. }
  1105. tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
  1106. dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
  1107. hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
  1108. DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
  1109. status = QDF_STATUS_SUCCESS;
  1110. ring_access_fail:
  1111. if (hif_pm_runtime_get(soc->hif_handle,
  1112. RTPM_ID_DW_TX_HW_ENQUEUE) == 0) {
  1113. dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
  1114. hif_pm_runtime_put(soc->hif_handle,
  1115. RTPM_ID_DW_TX_HW_ENQUEUE);
  1116. } else {
  1117. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1118. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1119. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1120. }
  1121. return status;
  1122. }
  1123. /**
  1124. * dp_cce_classify() - Classify the frame based on CCE rules
  1125. * @vdev: DP vdev handle
  1126. * @nbuf: skb
  1127. *
  1128. * Classify frames based on CCE rules
  1129. * Return: bool( true if classified,
  1130. * else false)
  1131. */
  1132. static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1133. {
  1134. qdf_ether_header_t *eh = NULL;
  1135. uint16_t ether_type;
  1136. qdf_llc_t *llcHdr;
  1137. qdf_nbuf_t nbuf_clone = NULL;
  1138. qdf_dot3_qosframe_t *qos_wh = NULL;
  1139. /* for mesh packets don't do any classification */
  1140. if (qdf_unlikely(vdev->mesh_vdev))
  1141. return false;
  1142. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1143. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1144. ether_type = eh->ether_type;
  1145. llcHdr = (qdf_llc_t *)(nbuf->data +
  1146. sizeof(qdf_ether_header_t));
  1147. } else {
  1148. qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  1149. /* For encrypted packets don't do any classification */
  1150. if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
  1151. return false;
  1152. if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
  1153. if (qdf_unlikely(
  1154. qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
  1155. qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
  1156. ether_type = *(uint16_t *)(nbuf->data
  1157. + QDF_IEEE80211_4ADDR_HDR_LEN
  1158. + sizeof(qdf_llc_t)
  1159. - sizeof(ether_type));
  1160. llcHdr = (qdf_llc_t *)(nbuf->data +
  1161. QDF_IEEE80211_4ADDR_HDR_LEN);
  1162. } else {
  1163. ether_type = *(uint16_t *)(nbuf->data
  1164. + QDF_IEEE80211_3ADDR_HDR_LEN
  1165. + sizeof(qdf_llc_t)
  1166. - sizeof(ether_type));
  1167. llcHdr = (qdf_llc_t *)(nbuf->data +
  1168. QDF_IEEE80211_3ADDR_HDR_LEN);
  1169. }
  1170. if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
  1171. && (ether_type ==
  1172. qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
  1173. DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
  1174. return true;
  1175. }
  1176. }
  1177. return false;
  1178. }
  1179. if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
  1180. ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1181. sizeof(*llcHdr));
  1182. nbuf_clone = qdf_nbuf_clone(nbuf);
  1183. if (qdf_unlikely(nbuf_clone)) {
  1184. qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
  1185. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1186. qdf_nbuf_pull_head(nbuf_clone,
  1187. sizeof(qdf_net_vlanhdr_t));
  1188. }
  1189. }
  1190. } else {
  1191. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1192. nbuf_clone = qdf_nbuf_clone(nbuf);
  1193. if (qdf_unlikely(nbuf_clone)) {
  1194. qdf_nbuf_pull_head(nbuf_clone,
  1195. sizeof(qdf_net_vlanhdr_t));
  1196. }
  1197. }
  1198. }
  1199. if (qdf_unlikely(nbuf_clone))
  1200. nbuf = nbuf_clone;
  1201. if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
  1202. || qdf_nbuf_is_ipv4_arp_pkt(nbuf)
  1203. || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
  1204. || qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
  1205. || (qdf_nbuf_is_ipv4_pkt(nbuf)
  1206. && qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
  1207. || (qdf_nbuf_is_ipv6_pkt(nbuf) &&
  1208. qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
  1209. if (qdf_unlikely(nbuf_clone))
  1210. qdf_nbuf_free(nbuf_clone);
  1211. return true;
  1212. }
  1213. if (qdf_unlikely(nbuf_clone))
  1214. qdf_nbuf_free(nbuf_clone);
  1215. return false;
  1216. }
  1217. /**
  1218. * dp_tx_get_tid() - Obtain TID to be used for this frame
  1219. * @vdev: DP vdev handle
  1220. * @nbuf: skb
  1221. *
  1222. * Extract the DSCP or PCP information from frame and map into TID value.
  1223. *
  1224. * Return: void
  1225. */
  1226. static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1227. struct dp_tx_msdu_info_s *msdu_info)
  1228. {
  1229. uint8_t tos = 0, dscp_tid_override = 0;
  1230. uint8_t *hdr_ptr, *L3datap;
  1231. uint8_t is_mcast = 0;
  1232. qdf_ether_header_t *eh = NULL;
  1233. qdf_ethervlan_header_t *evh = NULL;
  1234. uint16_t ether_type;
  1235. qdf_llc_t *llcHdr;
  1236. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  1237. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1238. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1239. eh = (qdf_ether_header_t *)nbuf->data;
  1240. hdr_ptr = (uint8_t *)(eh->ether_dhost);
  1241. L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
  1242. } else {
  1243. qdf_dot3_qosframe_t *qos_wh =
  1244. (qdf_dot3_qosframe_t *) nbuf->data;
  1245. msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
  1246. qos_wh->i_qos[0] & DP_QOS_TID : 0;
  1247. return;
  1248. }
  1249. is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
  1250. ether_type = eh->ether_type;
  1251. llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
  1252. /*
  1253. * Check if packet is dot3 or eth2 type.
  1254. */
  1255. if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
  1256. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1257. sizeof(*llcHdr));
  1258. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1259. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
  1260. sizeof(*llcHdr);
  1261. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
  1262. + sizeof(*llcHdr) +
  1263. sizeof(qdf_net_vlanhdr_t));
  1264. } else {
  1265. L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
  1266. sizeof(*llcHdr);
  1267. }
  1268. } else {
  1269. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1270. evh = (qdf_ethervlan_header_t *) eh;
  1271. ether_type = evh->ether_type;
  1272. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
  1273. }
  1274. }
  1275. /*
  1276. * Find priority from IP TOS DSCP field
  1277. */
  1278. if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
  1279. qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
  1280. if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
  1281. /* Only for unicast frames */
  1282. if (!is_mcast) {
  1283. /* send it on VO queue */
  1284. msdu_info->tid = DP_VO_TID;
  1285. }
  1286. } else {
  1287. /*
  1288. * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
  1289. * from TOS byte.
  1290. */
  1291. tos = ip->ip_tos;
  1292. dscp_tid_override = 1;
  1293. }
  1294. } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
  1295. /* TODO
  1296. * use flowlabel
  1297. *igmpmld cases to be handled in phase 2
  1298. */
  1299. unsigned long ver_pri_flowlabel;
  1300. unsigned long pri;
  1301. ver_pri_flowlabel = *(unsigned long *) L3datap;
  1302. pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
  1303. DP_IPV6_PRIORITY_SHIFT;
  1304. tos = pri;
  1305. dscp_tid_override = 1;
  1306. } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
  1307. msdu_info->tid = DP_VO_TID;
  1308. else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
  1309. /* Only for unicast frames */
  1310. if (!is_mcast) {
  1311. /* send ucast arp on VO queue */
  1312. msdu_info->tid = DP_VO_TID;
  1313. }
  1314. }
  1315. /*
  1316. * Assign all MCAST packets to BE
  1317. */
  1318. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1319. if (is_mcast) {
  1320. tos = 0;
  1321. dscp_tid_override = 1;
  1322. }
  1323. }
  1324. if (dscp_tid_override == 1) {
  1325. tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  1326. msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
  1327. }
  1328. if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
  1329. msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
  1330. return;
  1331. }
  1332. /**
  1333. * dp_tx_classify_tid() - Obtain TID to be used for this frame
  1334. * @vdev: DP vdev handle
  1335. * @nbuf: skb
  1336. *
  1337. * Software based TID classification is required when more than 2 DSCP-TID
  1338. * mapping tables are needed.
  1339. * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
  1340. *
  1341. * Return: void
  1342. */
  1343. static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1344. struct dp_tx_msdu_info_s *msdu_info)
  1345. {
  1346. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  1347. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1348. if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map)
  1349. return;
  1350. /* for mesh packets don't do any classification */
  1351. if (qdf_unlikely(vdev->mesh_vdev))
  1352. return;
  1353. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1354. }
  1355. #ifdef FEATURE_WLAN_TDLS
  1356. /**
  1357. * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
  1358. * @soc: datapath SOC
  1359. * @vdev: datapath vdev
  1360. * @tx_desc: TX descriptor
  1361. *
  1362. * Return: None
  1363. */
  1364. static void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1365. struct dp_vdev *vdev,
  1366. struct dp_tx_desc_s *tx_desc)
  1367. {
  1368. if (vdev) {
  1369. if (vdev->is_tdls_frame) {
  1370. tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
  1371. vdev->is_tdls_frame = false;
  1372. }
  1373. }
  1374. }
  1375. /**
  1376. * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
  1377. * @soc: dp_soc handle
  1378. * @tx_desc: TX descriptor
  1379. * @vdev: datapath vdev handle
  1380. *
  1381. * Return: None
  1382. */
  1383. static void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
  1384. struct dp_tx_desc_s *tx_desc)
  1385. {
  1386. struct hal_tx_completion_status ts = {0};
  1387. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1388. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
  1389. DP_MOD_ID_TDLS);
  1390. if (qdf_unlikely(!vdev)) {
  1391. dp_err_rl("vdev is null!");
  1392. goto error;
  1393. }
  1394. hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
  1395. if (vdev->tx_non_std_data_callback.func) {
  1396. qdf_nbuf_set_next(nbuf, NULL);
  1397. vdev->tx_non_std_data_callback.func(
  1398. vdev->tx_non_std_data_callback.ctxt,
  1399. nbuf, ts.status);
  1400. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1401. return;
  1402. } else {
  1403. dp_err_rl("callback func is null");
  1404. }
  1405. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1406. error:
  1407. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  1408. qdf_nbuf_free(nbuf);
  1409. }
  1410. /**
  1411. * dp_tx_msdu_single_map() - do nbuf map
  1412. * @vdev: DP vdev handle
  1413. * @tx_desc: DP TX descriptor pointer
  1414. * @nbuf: skb pointer
  1415. *
  1416. * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
  1417. * operation done in other component.
  1418. *
  1419. * Return: QDF_STATUS
  1420. */
  1421. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1422. struct dp_tx_desc_s *tx_desc,
  1423. qdf_nbuf_t nbuf)
  1424. {
  1425. if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
  1426. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1427. nbuf,
  1428. QDF_DMA_TO_DEVICE,
  1429. nbuf->len);
  1430. else
  1431. return qdf_nbuf_map_single(vdev->osdev, nbuf,
  1432. QDF_DMA_TO_DEVICE);
  1433. }
  1434. #else
  1435. static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1436. struct dp_vdev *vdev,
  1437. struct dp_tx_desc_s *tx_desc)
  1438. {
  1439. }
  1440. static inline void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
  1441. struct dp_tx_desc_s *tx_desc)
  1442. {
  1443. }
  1444. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1445. struct dp_tx_desc_s *tx_desc,
  1446. qdf_nbuf_t nbuf)
  1447. {
  1448. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1449. nbuf,
  1450. QDF_DMA_TO_DEVICE,
  1451. nbuf->len);
  1452. }
  1453. #endif
  1454. #ifdef MESH_MODE_SUPPORT
  1455. /**
  1456. * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
  1457. * @soc: datapath SOC
  1458. * @vdev: datapath vdev
  1459. * @tx_desc: TX descriptor
  1460. *
  1461. * Return: None
  1462. */
  1463. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1464. struct dp_vdev *vdev,
  1465. struct dp_tx_desc_s *tx_desc)
  1466. {
  1467. if (qdf_unlikely(vdev->mesh_vdev))
  1468. tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
  1469. }
  1470. /**
  1471. * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
  1472. * @soc: dp_soc handle
  1473. * @tx_desc: TX descriptor
  1474. * @vdev: datapath vdev handle
  1475. *
  1476. * Return: None
  1477. */
  1478. static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1479. struct dp_tx_desc_s *tx_desc)
  1480. {
  1481. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1482. struct dp_vdev *vdev = NULL;
  1483. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
  1484. qdf_nbuf_free(nbuf);
  1485. DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
  1486. } else {
  1487. vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
  1488. DP_MOD_ID_MESH);
  1489. if (vdev && vdev->osif_tx_free_ext)
  1490. vdev->osif_tx_free_ext((nbuf));
  1491. else
  1492. qdf_nbuf_free(nbuf);
  1493. if (vdev)
  1494. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  1495. }
  1496. }
  1497. #else
  1498. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1499. struct dp_vdev *vdev,
  1500. struct dp_tx_desc_s *tx_desc)
  1501. {
  1502. }
  1503. static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1504. struct dp_tx_desc_s *tx_desc)
  1505. {
  1506. }
  1507. #endif
  1508. /**
  1509. * dp_tx_frame_is_drop() - checks if the packet is loopback
  1510. * @vdev: DP vdev handle
  1511. * @nbuf: skb
  1512. *
  1513. * Return: 1 if frame needs to be dropped else 0
  1514. */
  1515. int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
  1516. {
  1517. struct dp_pdev *pdev = NULL;
  1518. struct dp_ast_entry *src_ast_entry = NULL;
  1519. struct dp_ast_entry *dst_ast_entry = NULL;
  1520. struct dp_soc *soc = NULL;
  1521. qdf_assert(vdev);
  1522. pdev = vdev->pdev;
  1523. qdf_assert(pdev);
  1524. soc = pdev->soc;
  1525. dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1526. (soc, dstmac, vdev->pdev->pdev_id);
  1527. src_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1528. (soc, srcmac, vdev->pdev->pdev_id);
  1529. if (dst_ast_entry && src_ast_entry) {
  1530. if (dst_ast_entry->peer_id ==
  1531. src_ast_entry->peer_id)
  1532. return 1;
  1533. }
  1534. return 0;
  1535. }
  1536. /**
  1537. * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
  1538. * @vdev: DP vdev handle
  1539. * @nbuf: skb
  1540. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  1541. * @meta_data: Metadata to the fw
  1542. * @tx_q: Tx queue to be used for this Tx frame
  1543. * @peer_id: peer_id of the peer in case of NAWDS frames
  1544. * @tx_exc_metadata: Handle that holds exception path metadata
  1545. *
  1546. * Return: NULL on success,
  1547. * nbuf when it fails to send
  1548. */
  1549. qdf_nbuf_t
  1550. dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1551. struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
  1552. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1553. {
  1554. struct dp_pdev *pdev = vdev->pdev;
  1555. struct dp_soc *soc = pdev->soc;
  1556. struct dp_tx_desc_s *tx_desc;
  1557. QDF_STATUS status;
  1558. struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
  1559. uint16_t htt_tcl_metadata = 0;
  1560. enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
  1561. uint8_t tid = msdu_info->tid;
  1562. struct cdp_tid_tx_stats *tid_stats = NULL;
  1563. /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
  1564. tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
  1565. msdu_info, tx_exc_metadata);
  1566. if (!tx_desc) {
  1567. dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
  1568. vdev, tx_q->desc_pool_id);
  1569. drop_code = TX_DESC_ERR;
  1570. goto fail_return;
  1571. }
  1572. if (qdf_unlikely(soc->cce_disable)) {
  1573. if (dp_cce_classify(vdev, nbuf) == true) {
  1574. DP_STATS_INC(vdev, tx_i.cce_classified, 1);
  1575. tid = DP_VO_TID;
  1576. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1577. }
  1578. }
  1579. dp_tx_update_tdls_flags(soc, vdev, tx_desc);
  1580. if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
  1581. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1582. HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
  1583. } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
  1584. HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
  1585. HTT_TCL_METADATA_TYPE_PEER_BASED);
  1586. HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
  1587. peer_id);
  1588. } else
  1589. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1590. if (msdu_info->exception_fw)
  1591. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  1592. dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
  1593. !pdev->enhanced_stats_en);
  1594. dp_tx_update_mesh_flags(soc, vdev, tx_desc);
  1595. if (qdf_unlikely(QDF_STATUS_SUCCESS !=
  1596. dp_tx_msdu_single_map(vdev, tx_desc, nbuf))) {
  1597. /* Handle failure */
  1598. dp_err("qdf_nbuf_map failed");
  1599. DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
  1600. drop_code = TX_DMA_MAP_ERR;
  1601. goto release_desc;
  1602. }
  1603. /* Enqueue the Tx MSDU descriptor to HW for transmit */
  1604. status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
  1605. htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
  1606. if (status != QDF_STATUS_SUCCESS) {
  1607. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1608. "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
  1609. __func__, tx_desc, tx_q->ring_id);
  1610. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  1611. QDF_DMA_TO_DEVICE,
  1612. nbuf->len);
  1613. drop_code = TX_HW_ENQUEUE;
  1614. goto release_desc;
  1615. }
  1616. return NULL;
  1617. release_desc:
  1618. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1619. fail_return:
  1620. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1621. tid_stats = &pdev->stats.tid_stats.
  1622. tid_tx_stats[tx_q->ring_id][tid];
  1623. tid_stats->swdrop_cnt[drop_code]++;
  1624. return nbuf;
  1625. }
  1626. /**
  1627. * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
  1628. * @vdev: DP vdev handle
  1629. * @nbuf: skb
  1630. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  1631. *
  1632. * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
  1633. *
  1634. * Return: NULL on success,
  1635. * nbuf when it fails to send
  1636. */
  1637. #if QDF_LOCK_STATS
  1638. noinline
  1639. #else
  1640. #endif
  1641. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1642. struct dp_tx_msdu_info_s *msdu_info)
  1643. {
  1644. uint32_t i;
  1645. struct dp_pdev *pdev = vdev->pdev;
  1646. struct dp_soc *soc = pdev->soc;
  1647. struct dp_tx_desc_s *tx_desc;
  1648. bool is_cce_classified = false;
  1649. QDF_STATUS status;
  1650. uint16_t htt_tcl_metadata = 0;
  1651. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  1652. struct cdp_tid_tx_stats *tid_stats = NULL;
  1653. if (qdf_unlikely(soc->cce_disable)) {
  1654. is_cce_classified = dp_cce_classify(vdev, nbuf);
  1655. if (is_cce_classified) {
  1656. DP_STATS_INC(vdev, tx_i.cce_classified, 1);
  1657. msdu_info->tid = DP_VO_TID;
  1658. }
  1659. }
  1660. if (msdu_info->frm_type == dp_tx_frm_me)
  1661. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  1662. i = 0;
  1663. /* Print statement to track i and num_seg */
  1664. /*
  1665. * For each segment (maps to 1 MSDU) , prepare software and hardware
  1666. * descriptors using information in msdu_info
  1667. */
  1668. while (i < msdu_info->num_seg) {
  1669. /*
  1670. * Setup Tx descriptor for an MSDU, and MSDU extension
  1671. * descriptor
  1672. */
  1673. tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
  1674. tx_q->desc_pool_id);
  1675. if (!tx_desc) {
  1676. if (msdu_info->frm_type == dp_tx_frm_me) {
  1677. dp_tx_me_free_buf(pdev,
  1678. (void *)(msdu_info->u.sg_info
  1679. .curr_seg->frags[0].vaddr));
  1680. i++;
  1681. continue;
  1682. }
  1683. goto done;
  1684. }
  1685. if (msdu_info->frm_type == dp_tx_frm_me) {
  1686. tx_desc->me_buffer =
  1687. msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
  1688. tx_desc->flags |= DP_TX_DESC_FLAG_ME;
  1689. }
  1690. if (is_cce_classified)
  1691. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1692. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1693. if (msdu_info->exception_fw) {
  1694. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  1695. }
  1696. /*
  1697. * Enqueue the Tx MSDU descriptor to HW for transmit
  1698. */
  1699. status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
  1700. htt_tcl_metadata, tx_q->ring_id, NULL);
  1701. if (status != QDF_STATUS_SUCCESS) {
  1702. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1703. "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
  1704. __func__, tx_desc, tx_q->ring_id);
  1705. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1706. tid_stats = &pdev->stats.tid_stats.
  1707. tid_tx_stats[tx_q->ring_id][msdu_info->tid];
  1708. tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
  1709. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1710. if (msdu_info->frm_type == dp_tx_frm_me) {
  1711. i++;
  1712. continue;
  1713. }
  1714. goto done;
  1715. }
  1716. /*
  1717. * TODO
  1718. * if tso_info structure can be modified to have curr_seg
  1719. * as first element, following 2 blocks of code (for TSO and SG)
  1720. * can be combined into 1
  1721. */
  1722. /*
  1723. * For frames with multiple segments (TSO, ME), jump to next
  1724. * segment.
  1725. */
  1726. if (msdu_info->frm_type == dp_tx_frm_tso) {
  1727. if (msdu_info->u.tso_info.curr_seg->next) {
  1728. msdu_info->u.tso_info.curr_seg =
  1729. msdu_info->u.tso_info.curr_seg->next;
  1730. /*
  1731. * If this is a jumbo nbuf, then increment the number of
  1732. * nbuf users for each additional segment of the msdu.
  1733. * This will ensure that the skb is freed only after
  1734. * receiving tx completion for all segments of an nbuf
  1735. */
  1736. qdf_nbuf_inc_users(nbuf);
  1737. /* Check with MCL if this is needed */
  1738. /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
  1739. }
  1740. }
  1741. /*
  1742. * For Multicast-Unicast converted packets,
  1743. * each converted frame (for a client) is represented as
  1744. * 1 segment
  1745. */
  1746. if ((msdu_info->frm_type == dp_tx_frm_sg) ||
  1747. (msdu_info->frm_type == dp_tx_frm_me)) {
  1748. if (msdu_info->u.sg_info.curr_seg->next) {
  1749. msdu_info->u.sg_info.curr_seg =
  1750. msdu_info->u.sg_info.curr_seg->next;
  1751. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  1752. }
  1753. }
  1754. i++;
  1755. }
  1756. nbuf = NULL;
  1757. done:
  1758. return nbuf;
  1759. }
  1760. /**
  1761. * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
  1762. * for SG frames
  1763. * @vdev: DP vdev handle
  1764. * @nbuf: skb
  1765. * @seg_info: Pointer to Segment info Descriptor to be prepared
  1766. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  1767. *
  1768. * Return: NULL on success,
  1769. * nbuf when it fails to send
  1770. */
  1771. static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1772. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  1773. {
  1774. uint32_t cur_frag, nr_frags;
  1775. qdf_dma_addr_t paddr;
  1776. struct dp_tx_sg_info_s *sg_info;
  1777. sg_info = &msdu_info->u.sg_info;
  1778. nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  1779. if (QDF_STATUS_SUCCESS !=
  1780. qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
  1781. QDF_DMA_TO_DEVICE, nbuf->len)) {
  1782. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1783. "dma map error");
  1784. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  1785. qdf_nbuf_free(nbuf);
  1786. return NULL;
  1787. }
  1788. paddr = qdf_nbuf_mapped_paddr_get(nbuf);
  1789. seg_info->frags[0].paddr_lo = paddr;
  1790. seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
  1791. seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
  1792. seg_info->frags[0].vaddr = (void *) nbuf;
  1793. for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
  1794. if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
  1795. nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
  1796. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1797. "frag dma map error");
  1798. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  1799. qdf_nbuf_free(nbuf);
  1800. return NULL;
  1801. }
  1802. paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
  1803. seg_info->frags[cur_frag + 1].paddr_lo = paddr;
  1804. seg_info->frags[cur_frag + 1].paddr_hi =
  1805. ((uint64_t) paddr) >> 32;
  1806. seg_info->frags[cur_frag + 1].len =
  1807. qdf_nbuf_get_frag_size(nbuf, cur_frag);
  1808. }
  1809. seg_info->frag_cnt = (cur_frag + 1);
  1810. seg_info->total_len = qdf_nbuf_len(nbuf);
  1811. seg_info->next = NULL;
  1812. sg_info->curr_seg = seg_info;
  1813. msdu_info->frm_type = dp_tx_frm_sg;
  1814. msdu_info->num_seg = 1;
  1815. return nbuf;
  1816. }
  1817. /**
  1818. * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
  1819. * @vdev: DP vdev handle
  1820. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  1821. * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
  1822. *
  1823. * Return: NULL on failure,
  1824. * nbuf when extracted successfully
  1825. */
  1826. static
  1827. void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
  1828. struct dp_tx_msdu_info_s *msdu_info,
  1829. uint16_t ppdu_cookie)
  1830. {
  1831. struct htt_tx_msdu_desc_ext2_t *meta_data =
  1832. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  1833. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  1834. HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
  1835. (msdu_info->meta_data[5], 1);
  1836. HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
  1837. (msdu_info->meta_data[5], 1);
  1838. HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
  1839. (msdu_info->meta_data[6], ppdu_cookie);
  1840. msdu_info->exception_fw = 1;
  1841. msdu_info->is_tx_sniffer = 1;
  1842. }
  1843. #ifdef MESH_MODE_SUPPORT
  1844. /**
  1845. * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
  1846. and prepare msdu_info for mesh frames.
  1847. * @vdev: DP vdev handle
  1848. * @nbuf: skb
  1849. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  1850. *
  1851. * Return: NULL on failure,
  1852. * nbuf when extracted successfully
  1853. */
  1854. static
  1855. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1856. struct dp_tx_msdu_info_s *msdu_info)
  1857. {
  1858. struct meta_hdr_s *mhdr;
  1859. struct htt_tx_msdu_desc_ext2_t *meta_data =
  1860. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  1861. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  1862. if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
  1863. msdu_info->exception_fw = 0;
  1864. goto remove_meta_hdr;
  1865. }
  1866. msdu_info->exception_fw = 1;
  1867. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  1868. meta_data->host_tx_desc_pool = 1;
  1869. meta_data->update_peer_cache = 1;
  1870. meta_data->learning_frame = 1;
  1871. if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
  1872. meta_data->power = mhdr->power;
  1873. meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
  1874. meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
  1875. meta_data->pream_type = mhdr->rate_info[0].preamble_type;
  1876. meta_data->retry_limit = mhdr->rate_info[0].max_tries;
  1877. meta_data->dyn_bw = 1;
  1878. meta_data->valid_pwr = 1;
  1879. meta_data->valid_mcs_mask = 1;
  1880. meta_data->valid_nss_mask = 1;
  1881. meta_data->valid_preamble_type = 1;
  1882. meta_data->valid_retries = 1;
  1883. meta_data->valid_bw_info = 1;
  1884. }
  1885. if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
  1886. meta_data->encrypt_type = 0;
  1887. meta_data->valid_encrypt_type = 1;
  1888. meta_data->learning_frame = 0;
  1889. }
  1890. meta_data->valid_key_flags = 1;
  1891. meta_data->key_flags = (mhdr->keyix & 0x3);
  1892. remove_meta_hdr:
  1893. if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
  1894. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1895. "qdf_nbuf_pull_head failed");
  1896. qdf_nbuf_free(nbuf);
  1897. return NULL;
  1898. }
  1899. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  1900. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1901. "%s , Meta hdr %0x %0x %0x %0x %0x %0x"
  1902. " tid %d to_fw %d",
  1903. __func__, msdu_info->meta_data[0],
  1904. msdu_info->meta_data[1],
  1905. msdu_info->meta_data[2],
  1906. msdu_info->meta_data[3],
  1907. msdu_info->meta_data[4],
  1908. msdu_info->meta_data[5],
  1909. msdu_info->tid, msdu_info->exception_fw);
  1910. return nbuf;
  1911. }
  1912. #else
  1913. static
  1914. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1915. struct dp_tx_msdu_info_s *msdu_info)
  1916. {
  1917. return nbuf;
  1918. }
  1919. #endif
  1920. /**
  1921. * dp_check_exc_metadata() - Checks if parameters are valid
  1922. * @tx_exc - holds all exception path parameters
  1923. *
  1924. * Returns true when all the parameters are valid else false
  1925. *
  1926. */
  1927. static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
  1928. {
  1929. bool invalid_tid = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid !=
  1930. HTT_INVALID_TID);
  1931. bool invalid_encap_type =
  1932. (tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
  1933. tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
  1934. bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
  1935. tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
  1936. bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
  1937. tx_exc->ppdu_cookie == 0);
  1938. if (invalid_tid || invalid_encap_type || invalid_sec_type ||
  1939. invalid_cookie) {
  1940. return false;
  1941. }
  1942. return true;
  1943. }
  1944. /**
  1945. * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
  1946. * @soc: DP soc handle
  1947. * @vdev_id: id of DP vdev handle
  1948. * @nbuf: skb
  1949. * @tx_exc_metadata: Handle that holds exception path meta data
  1950. *
  1951. * Entry point for Core Tx layer (DP_TX) invoked from
  1952. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  1953. *
  1954. * Return: NULL on success,
  1955. * nbuf when it fails to send
  1956. */
  1957. qdf_nbuf_t
  1958. dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1959. qdf_nbuf_t nbuf,
  1960. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1961. {
  1962. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1963. qdf_ether_header_t *eh = NULL;
  1964. struct dp_tx_msdu_info_s msdu_info;
  1965. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  1966. DP_MOD_ID_TX_EXCEPTION);
  1967. if (qdf_unlikely(!vdev))
  1968. goto fail;
  1969. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  1970. if (!tx_exc_metadata)
  1971. goto fail;
  1972. msdu_info.tid = tx_exc_metadata->tid;
  1973. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1974. dp_verbose_debug("skb %pM", nbuf->data);
  1975. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  1976. if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
  1977. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1978. "Invalid parameters in exception path");
  1979. goto fail;
  1980. }
  1981. /* Basic sanity checks for unsupported packets */
  1982. /* MESH mode */
  1983. if (qdf_unlikely(vdev->mesh_vdev)) {
  1984. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1985. "Mesh mode is not supported in exception path");
  1986. goto fail;
  1987. }
  1988. /* TSO or SG */
  1989. if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
  1990. qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  1991. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1992. "TSO and SG are not supported in exception path");
  1993. goto fail;
  1994. }
  1995. /* RAW */
  1996. if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
  1997. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1998. "Raw frame is not supported in exception path");
  1999. goto fail;
  2000. }
  2001. /* Mcast enhancement*/
  2002. if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
  2003. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
  2004. !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
  2005. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2006. "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW");
  2007. }
  2008. }
  2009. if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
  2010. DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
  2011. qdf_nbuf_len(nbuf));
  2012. dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
  2013. tx_exc_metadata->ppdu_cookie);
  2014. }
  2015. /*
  2016. * Get HW Queue to use for this frame.
  2017. * TCL supports upto 4 DMA rings, out of which 3 rings are
  2018. * dedicated for data and 1 for command.
  2019. * "queue_id" maps to one hardware ring.
  2020. * With each ring, we also associate a unique Tx descriptor pool
  2021. * to minimize lock contention for these resources.
  2022. */
  2023. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2024. /*
  2025. * Check exception descriptors
  2026. */
  2027. if (dp_tx_exception_limit_check(vdev))
  2028. goto fail;
  2029. /* Single linear frame */
  2030. /*
  2031. * If nbuf is a simple linear frame, use send_single function to
  2032. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  2033. * SRNG. There is no need to setup a MSDU extension descriptor.
  2034. */
  2035. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
  2036. tx_exc_metadata->peer_id, tx_exc_metadata);
  2037. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2038. return nbuf;
  2039. fail:
  2040. if (vdev)
  2041. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2042. dp_verbose_debug("pkt send failed");
  2043. return nbuf;
  2044. }
  2045. /**
  2046. * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
  2047. * @soc: DP soc handle
  2048. * @vdev_id: DP vdev handle
  2049. * @nbuf: skb
  2050. *
  2051. * Entry point for Core Tx layer (DP_TX) invoked from
  2052. * hard_start_xmit in OSIF/HDD
  2053. *
  2054. * Return: NULL on success,
  2055. * nbuf when it fails to send
  2056. */
  2057. #ifdef MESH_MODE_SUPPORT
  2058. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2059. qdf_nbuf_t nbuf)
  2060. {
  2061. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2062. struct meta_hdr_s *mhdr;
  2063. qdf_nbuf_t nbuf_mesh = NULL;
  2064. qdf_nbuf_t nbuf_clone = NULL;
  2065. struct dp_vdev *vdev;
  2066. uint8_t no_enc_frame = 0;
  2067. nbuf_mesh = qdf_nbuf_unshare(nbuf);
  2068. if (!nbuf_mesh) {
  2069. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2070. "qdf_nbuf_unshare failed");
  2071. return nbuf;
  2072. }
  2073. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
  2074. if (!vdev) {
  2075. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2076. "vdev is NULL for vdev_id %d", vdev_id);
  2077. return nbuf;
  2078. }
  2079. nbuf = nbuf_mesh;
  2080. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  2081. if ((vdev->sec_type != cdp_sec_type_none) &&
  2082. (mhdr->flags & METAHDR_FLAG_NOENCRYPT))
  2083. no_enc_frame = 1;
  2084. if (mhdr->flags & METAHDR_FLAG_NOQOS)
  2085. qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
  2086. if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
  2087. !no_enc_frame) {
  2088. nbuf_clone = qdf_nbuf_clone(nbuf);
  2089. if (!nbuf_clone) {
  2090. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2091. "qdf_nbuf_clone failed");
  2092. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  2093. return nbuf;
  2094. }
  2095. qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
  2096. }
  2097. if (nbuf_clone) {
  2098. if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
  2099. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  2100. } else {
  2101. qdf_nbuf_free(nbuf_clone);
  2102. }
  2103. }
  2104. if (no_enc_frame)
  2105. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
  2106. else
  2107. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
  2108. nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
  2109. if ((!nbuf) && no_enc_frame) {
  2110. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  2111. }
  2112. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  2113. return nbuf;
  2114. }
  2115. #else
  2116. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
  2117. qdf_nbuf_t nbuf)
  2118. {
  2119. return dp_tx_send(soc, vdev_id, nbuf);
  2120. }
  2121. #endif
  2122. /**
  2123. * dp_tx_nawds_handler() - NAWDS handler
  2124. *
  2125. * @soc: DP soc handle
  2126. * @vdev_id: id of DP vdev handle
  2127. * @msdu_info: msdu_info required to create HTT metadata
  2128. * @nbuf: skb
  2129. *
  2130. * This API transfers the multicast frames with the peer id
  2131. * on NAWDS enabled peer.
  2132. * Return: none
  2133. */
  2134. static inline
  2135. void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
  2136. struct dp_tx_msdu_info_s *msdu_info, qdf_nbuf_t nbuf)
  2137. {
  2138. struct dp_peer *peer = NULL;
  2139. qdf_nbuf_t nbuf_clone = NULL;
  2140. uint16_t peer_id = DP_INVALID_PEER;
  2141. uint16_t sa_peer_id = DP_INVALID_PEER;
  2142. struct dp_ast_entry *ast_entry = NULL;
  2143. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2144. if (qdf_nbuf_get_tx_ftype(nbuf) == CB_FTYPE_INTRABSS_FWD) {
  2145. qdf_spin_lock_bh(&soc->ast_lock);
  2146. ast_entry = dp_peer_ast_hash_find_by_pdevid
  2147. (soc,
  2148. (uint8_t *)(eh->ether_shost),
  2149. vdev->pdev->pdev_id);
  2150. if (ast_entry)
  2151. sa_peer_id = ast_entry->peer_id;
  2152. qdf_spin_unlock_bh(&soc->ast_lock);
  2153. }
  2154. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2155. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2156. if (!peer->bss_peer && peer->nawds_enabled) {
  2157. peer_id = peer->peer_id;
  2158. /* Multicast packets needs to be
  2159. * dropped in case of intra bss forwarding
  2160. */
  2161. if (sa_peer_id == peer->peer_id) {
  2162. QDF_TRACE(QDF_MODULE_ID_DP,
  2163. QDF_TRACE_LEVEL_DEBUG,
  2164. " %s: multicast packet", __func__);
  2165. DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
  2166. continue;
  2167. }
  2168. nbuf_clone = qdf_nbuf_clone(nbuf);
  2169. if (!nbuf_clone) {
  2170. QDF_TRACE(QDF_MODULE_ID_DP,
  2171. QDF_TRACE_LEVEL_ERROR,
  2172. FL("nbuf clone failed"));
  2173. break;
  2174. }
  2175. nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
  2176. msdu_info, peer_id,
  2177. NULL);
  2178. if (nbuf_clone) {
  2179. QDF_TRACE(QDF_MODULE_ID_DP,
  2180. QDF_TRACE_LEVEL_DEBUG,
  2181. FL("pkt send failed"));
  2182. qdf_nbuf_free(nbuf_clone);
  2183. } else {
  2184. if (peer_id != DP_INVALID_PEER)
  2185. DP_STATS_INC_PKT(peer, tx.nawds_mcast,
  2186. 1, qdf_nbuf_len(nbuf));
  2187. }
  2188. }
  2189. }
  2190. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2191. }
  2192. /**
  2193. * dp_tx_send() - Transmit a frame on a given VAP
  2194. * @soc: DP soc handle
  2195. * @vdev_id: id of DP vdev handle
  2196. * @nbuf: skb
  2197. *
  2198. * Entry point for Core Tx layer (DP_TX) invoked from
  2199. * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
  2200. * cases
  2201. *
  2202. * Return: NULL on success,
  2203. * nbuf when it fails to send
  2204. */
  2205. qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2206. qdf_nbuf_t nbuf)
  2207. {
  2208. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2209. uint16_t peer_id = HTT_INVALID_PEER;
  2210. /*
  2211. * doing a memzero is causing additional function call overhead
  2212. * so doing static stack clearing
  2213. */
  2214. struct dp_tx_msdu_info_s msdu_info = {0};
  2215. struct dp_vdev *vdev = NULL;
  2216. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  2217. return nbuf;
  2218. /*
  2219. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  2220. * this in per packet path.
  2221. *
  2222. * As in this path vdev memory is already protected with netdev
  2223. * tx lock
  2224. */
  2225. vdev = soc->vdev_id_map[vdev_id];
  2226. if (qdf_unlikely(!vdev))
  2227. return nbuf;
  2228. dp_verbose_debug("skb %pM", nbuf->data);
  2229. /*
  2230. * Set Default Host TID value to invalid TID
  2231. * (TID override disabled)
  2232. */
  2233. msdu_info.tid = HTT_TX_EXT_TID_INVALID;
  2234. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  2235. if (qdf_unlikely(vdev->mesh_vdev)) {
  2236. qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
  2237. &msdu_info);
  2238. if (!nbuf_mesh) {
  2239. dp_verbose_debug("Extracting mesh metadata failed");
  2240. return nbuf;
  2241. }
  2242. nbuf = nbuf_mesh;
  2243. }
  2244. /*
  2245. * Get HW Queue to use for this frame.
  2246. * TCL supports upto 4 DMA rings, out of which 3 rings are
  2247. * dedicated for data and 1 for command.
  2248. * "queue_id" maps to one hardware ring.
  2249. * With each ring, we also associate a unique Tx descriptor pool
  2250. * to minimize lock contention for these resources.
  2251. */
  2252. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2253. /*
  2254. * TCL H/W supports 2 DSCP-TID mapping tables.
  2255. * Table 1 - Default DSCP-TID mapping table
  2256. * Table 2 - 1 DSCP-TID override table
  2257. *
  2258. * If we need a different DSCP-TID mapping for this vap,
  2259. * call tid_classify to extract DSCP/ToS from frame and
  2260. * map to a TID and store in msdu_info. This is later used
  2261. * to fill in TCL Input descriptor (per-packet TID override).
  2262. */
  2263. dp_tx_classify_tid(vdev, nbuf, &msdu_info);
  2264. /*
  2265. * Classify the frame and call corresponding
  2266. * "prepare" function which extracts the segment (TSO)
  2267. * and fragmentation information (for TSO , SG, ME, or Raw)
  2268. * into MSDU_INFO structure which is later used to fill
  2269. * SW and HW descriptors.
  2270. */
  2271. if (qdf_nbuf_is_tso(nbuf)) {
  2272. dp_verbose_debug("TSO frame %pK", vdev);
  2273. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  2274. qdf_nbuf_len(nbuf));
  2275. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  2276. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  2277. qdf_nbuf_len(nbuf));
  2278. return nbuf;
  2279. }
  2280. goto send_multiple;
  2281. }
  2282. /* SG */
  2283. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  2284. struct dp_tx_seg_info_s seg_info = {0};
  2285. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  2286. if (!nbuf)
  2287. return NULL;
  2288. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  2289. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  2290. qdf_nbuf_len(nbuf));
  2291. goto send_multiple;
  2292. }
  2293. #ifdef ATH_SUPPORT_IQUE
  2294. /* Mcast to Ucast Conversion*/
  2295. if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
  2296. qdf_ether_header_t *eh = (qdf_ether_header_t *)
  2297. qdf_nbuf_data(nbuf);
  2298. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
  2299. !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
  2300. dp_verbose_debug("Mcast frm for ME %pK", vdev);
  2301. DP_STATS_INC_PKT(vdev,
  2302. tx_i.mcast_en.mcast_pkt, 1,
  2303. qdf_nbuf_len(nbuf));
  2304. if (dp_tx_prepare_send_me(vdev, nbuf) ==
  2305. QDF_STATUS_SUCCESS) {
  2306. return NULL;
  2307. }
  2308. }
  2309. }
  2310. #endif
  2311. /* RAW */
  2312. if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
  2313. struct dp_tx_seg_info_s seg_info = {0};
  2314. nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
  2315. if (!nbuf)
  2316. return NULL;
  2317. dp_verbose_debug("Raw frame %pK", vdev);
  2318. goto send_multiple;
  2319. }
  2320. if (qdf_unlikely(vdev->nawds_enabled)) {
  2321. qdf_ether_header_t *eh = (qdf_ether_header_t *)
  2322. qdf_nbuf_data(nbuf);
  2323. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost))
  2324. dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf);
  2325. peer_id = DP_INVALID_PEER;
  2326. DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
  2327. 1, qdf_nbuf_len(nbuf));
  2328. }
  2329. /* Single linear frame */
  2330. /*
  2331. * If nbuf is a simple linear frame, use send_single function to
  2332. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  2333. * SRNG. There is no need to setup a MSDU extension descriptor.
  2334. */
  2335. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
  2336. return nbuf;
  2337. send_multiple:
  2338. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2339. if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
  2340. dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
  2341. return nbuf;
  2342. }
  2343. /**
  2344. * dp_tx_reinject_handler() - Tx Reinject Handler
  2345. * @soc: datapath soc handle
  2346. * @vdev: datapath vdev handle
  2347. * @tx_desc: software descriptor head pointer
  2348. * @status : Tx completion status from HTT descriptor
  2349. *
  2350. * This function reinjects frames back to Target.
  2351. * Todo - Host queue needs to be added
  2352. *
  2353. * Return: none
  2354. */
  2355. static
  2356. void dp_tx_reinject_handler(struct dp_soc *soc,
  2357. struct dp_vdev *vdev,
  2358. struct dp_tx_desc_s *tx_desc,
  2359. uint8_t *status)
  2360. {
  2361. struct dp_peer *peer = NULL;
  2362. uint32_t peer_id = HTT_INVALID_PEER;
  2363. qdf_nbuf_t nbuf = tx_desc->nbuf;
  2364. qdf_nbuf_t nbuf_copy = NULL;
  2365. struct dp_tx_msdu_info_s msdu_info;
  2366. #ifdef WDS_VENDOR_EXTENSION
  2367. int is_mcast = 0, is_ucast = 0;
  2368. int num_peers_3addr = 0;
  2369. qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
  2370. struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
  2371. #endif
  2372. qdf_assert(vdev);
  2373. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  2374. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2375. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2376. "%s Tx reinject path", __func__);
  2377. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  2378. qdf_nbuf_len(tx_desc->nbuf));
  2379. #ifdef WDS_VENDOR_EXTENSION
  2380. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  2381. is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
  2382. } else {
  2383. is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
  2384. }
  2385. is_ucast = !is_mcast;
  2386. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2387. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2388. if (peer->bss_peer)
  2389. continue;
  2390. /* Detect wds peers that use 3-addr framing for mcast.
  2391. * if there are any, the bss_peer is used to send the
  2392. * the mcast frame using 3-addr format. all wds enabled
  2393. * peers that use 4-addr framing for mcast frames will
  2394. * be duplicated and sent as 4-addr frames below.
  2395. */
  2396. if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
  2397. num_peers_3addr = 1;
  2398. break;
  2399. }
  2400. }
  2401. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2402. #endif
  2403. if (qdf_unlikely(vdev->mesh_vdev)) {
  2404. DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
  2405. } else {
  2406. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2407. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2408. if ((peer->peer_id != HTT_INVALID_PEER) &&
  2409. #ifdef WDS_VENDOR_EXTENSION
  2410. /*
  2411. * . if 3-addr STA, then send on BSS Peer
  2412. * . if Peer WDS enabled and accept 4-addr mcast,
  2413. * send mcast on that peer only
  2414. * . if Peer WDS enabled and accept 4-addr ucast,
  2415. * send ucast on that peer only
  2416. */
  2417. ((peer->bss_peer && num_peers_3addr && is_mcast) ||
  2418. (peer->wds_enabled &&
  2419. ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
  2420. (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
  2421. #else
  2422. ((peer->bss_peer &&
  2423. !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))))) {
  2424. #endif
  2425. peer_id = DP_INVALID_PEER;
  2426. nbuf_copy = qdf_nbuf_copy(nbuf);
  2427. if (!nbuf_copy) {
  2428. QDF_TRACE(QDF_MODULE_ID_DP,
  2429. QDF_TRACE_LEVEL_DEBUG,
  2430. FL("nbuf copy failed"));
  2431. break;
  2432. }
  2433. nbuf_copy = dp_tx_send_msdu_single(vdev,
  2434. nbuf_copy,
  2435. &msdu_info,
  2436. peer_id,
  2437. NULL);
  2438. if (nbuf_copy) {
  2439. QDF_TRACE(QDF_MODULE_ID_DP,
  2440. QDF_TRACE_LEVEL_DEBUG,
  2441. FL("pkt send failed"));
  2442. qdf_nbuf_free(nbuf_copy);
  2443. }
  2444. }
  2445. }
  2446. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2447. }
  2448. qdf_nbuf_free(nbuf);
  2449. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2450. }
  2451. /**
  2452. * dp_tx_inspect_handler() - Tx Inspect Handler
  2453. * @soc: datapath soc handle
  2454. * @vdev: datapath vdev handle
  2455. * @tx_desc: software descriptor head pointer
  2456. * @status : Tx completion status from HTT descriptor
  2457. *
  2458. * Handles Tx frames sent back to Host for inspection
  2459. * (ProxyARP)
  2460. *
  2461. * Return: none
  2462. */
  2463. static void dp_tx_inspect_handler(struct dp_soc *soc,
  2464. struct dp_vdev *vdev,
  2465. struct dp_tx_desc_s *tx_desc,
  2466. uint8_t *status)
  2467. {
  2468. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2469. "%s Tx inspect path",
  2470. __func__);
  2471. DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
  2472. qdf_nbuf_len(tx_desc->nbuf));
  2473. DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
  2474. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2475. }
  2476. #ifdef FEATURE_PERPKT_INFO
  2477. /**
  2478. * dp_get_completion_indication_for_stack() - send completion to stack
  2479. * @soc : dp_soc handle
  2480. * @pdev: dp_pdev handle
  2481. * @peer: dp peer handle
  2482. * @ts: transmit completion status structure
  2483. * @netbuf: Buffer pointer for free
  2484. *
  2485. * This function is used for indication whether buffer needs to be
  2486. * sent to stack for freeing or not
  2487. */
  2488. QDF_STATUS
  2489. dp_get_completion_indication_for_stack(struct dp_soc *soc,
  2490. struct dp_pdev *pdev,
  2491. struct dp_peer *peer,
  2492. struct hal_tx_completion_status *ts,
  2493. qdf_nbuf_t netbuf,
  2494. uint64_t time_latency)
  2495. {
  2496. struct tx_capture_hdr *ppdu_hdr;
  2497. uint16_t peer_id = ts->peer_id;
  2498. uint32_t ppdu_id = ts->ppdu_id;
  2499. uint8_t first_msdu = ts->first_msdu;
  2500. uint8_t last_msdu = ts->last_msdu;
  2501. uint32_t txcap_hdr_size = sizeof(struct tx_capture_hdr);
  2502. if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
  2503. !pdev->latency_capture_enable))
  2504. return QDF_STATUS_E_NOSUPPORT;
  2505. if (!peer) {
  2506. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2507. FL("Peer Invalid"));
  2508. return QDF_STATUS_E_INVAL;
  2509. }
  2510. if (pdev->mcopy_mode) {
  2511. /* If mcopy is enabled and mcopy_mode is M_COPY deliver 1st MSDU
  2512. * per PPDU. If mcopy_mode is M_COPY_EXTENDED deliver 1st MSDU
  2513. * for each MPDU
  2514. */
  2515. if (pdev->mcopy_mode == M_COPY) {
  2516. if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
  2517. (pdev->m_copy_id.tx_peer_id == peer_id)) {
  2518. return QDF_STATUS_E_INVAL;
  2519. }
  2520. }
  2521. if (!first_msdu)
  2522. return QDF_STATUS_E_INVAL;
  2523. pdev->m_copy_id.tx_ppdu_id = ppdu_id;
  2524. pdev->m_copy_id.tx_peer_id = peer_id;
  2525. }
  2526. if (qdf_unlikely(qdf_nbuf_headroom(netbuf) < txcap_hdr_size)) {
  2527. netbuf = qdf_nbuf_realloc_headroom(netbuf, txcap_hdr_size);
  2528. if (!netbuf) {
  2529. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2530. FL("No headroom"));
  2531. return QDF_STATUS_E_NOMEM;
  2532. }
  2533. }
  2534. if (!qdf_nbuf_push_head(netbuf, txcap_hdr_size)) {
  2535. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2536. FL("No headroom"));
  2537. return QDF_STATUS_E_NOMEM;
  2538. }
  2539. ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
  2540. qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
  2541. QDF_MAC_ADDR_SIZE);
  2542. qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
  2543. QDF_MAC_ADDR_SIZE);
  2544. ppdu_hdr->ppdu_id = ppdu_id;
  2545. ppdu_hdr->peer_id = peer_id;
  2546. ppdu_hdr->first_msdu = first_msdu;
  2547. ppdu_hdr->last_msdu = last_msdu;
  2548. if (qdf_unlikely(pdev->latency_capture_enable)) {
  2549. ppdu_hdr->tsf = ts->tsf;
  2550. ppdu_hdr->time_latency = time_latency;
  2551. }
  2552. return QDF_STATUS_SUCCESS;
  2553. }
  2554. /**
  2555. * dp_send_completion_to_stack() - send completion to stack
  2556. * @soc : dp_soc handle
  2557. * @pdev: dp_pdev handle
  2558. * @peer_id: peer_id of the peer for which completion came
  2559. * @ppdu_id: ppdu_id
  2560. * @netbuf: Buffer pointer for free
  2561. *
  2562. * This function is used to send completion to stack
  2563. * to free buffer
  2564. */
  2565. void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  2566. uint16_t peer_id, uint32_t ppdu_id,
  2567. qdf_nbuf_t netbuf)
  2568. {
  2569. dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
  2570. netbuf, peer_id,
  2571. WDI_NO_VAL, pdev->pdev_id);
  2572. }
  2573. #else
  2574. static QDF_STATUS
  2575. dp_get_completion_indication_for_stack(struct dp_soc *soc,
  2576. struct dp_pdev *pdev,
  2577. struct dp_peer *peer,
  2578. struct hal_tx_completion_status *ts,
  2579. qdf_nbuf_t netbuf,
  2580. uint64_t time_latency)
  2581. {
  2582. return QDF_STATUS_E_NOSUPPORT;
  2583. }
  2584. static void
  2585. dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  2586. uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
  2587. {
  2588. }
  2589. #endif
  2590. /**
  2591. * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
  2592. * @soc: Soc handle
  2593. * @desc: software Tx descriptor to be processed
  2594. *
  2595. * Return: none
  2596. */
  2597. static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
  2598. struct dp_tx_desc_s *desc)
  2599. {
  2600. qdf_nbuf_t nbuf = desc->nbuf;
  2601. /* nbuf already freed in vdev detach path */
  2602. if (!nbuf)
  2603. return;
  2604. /* If it is TDLS mgmt, don't unmap or free the frame */
  2605. if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
  2606. return dp_non_std_tx_comp_free_buff(soc, desc);
  2607. /* 0 : MSDU buffer, 1 : MLE */
  2608. if (desc->msdu_ext_desc) {
  2609. /* TSO free */
  2610. if (hal_tx_ext_desc_get_tso_enable(
  2611. desc->msdu_ext_desc->vaddr)) {
  2612. /* unmap eash TSO seg before free the nbuf */
  2613. dp_tx_tso_unmap_segment(soc, desc->tso_desc,
  2614. desc->tso_num_desc);
  2615. qdf_nbuf_free(nbuf);
  2616. return;
  2617. }
  2618. }
  2619. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  2620. QDF_DMA_TO_DEVICE, nbuf->len);
  2621. if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
  2622. return dp_mesh_tx_comp_free_buff(soc, desc);
  2623. qdf_nbuf_free(nbuf);
  2624. }
  2625. #ifdef MESH_MODE_SUPPORT
  2626. /**
  2627. * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
  2628. * in mesh meta header
  2629. * @tx_desc: software descriptor head pointer
  2630. * @ts: pointer to tx completion stats
  2631. * Return: none
  2632. */
  2633. static
  2634. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  2635. struct hal_tx_completion_status *ts)
  2636. {
  2637. struct meta_hdr_s *mhdr;
  2638. qdf_nbuf_t netbuf = tx_desc->nbuf;
  2639. if (!tx_desc->msdu_ext_desc) {
  2640. if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
  2641. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2642. "netbuf %pK offset %d",
  2643. netbuf, tx_desc->pkt_offset);
  2644. return;
  2645. }
  2646. }
  2647. if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
  2648. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2649. "netbuf %pK offset %lu", netbuf,
  2650. sizeof(struct meta_hdr_s));
  2651. return;
  2652. }
  2653. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
  2654. mhdr->rssi = ts->ack_frame_rssi;
  2655. mhdr->band = tx_desc->pdev->operating_channel.band;
  2656. mhdr->channel = tx_desc->pdev->operating_channel.num;
  2657. }
  2658. #else
  2659. static
  2660. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  2661. struct hal_tx_completion_status *ts)
  2662. {
  2663. }
  2664. #endif
  2665. #ifdef QCA_PEER_EXT_STATS
  2666. /*
  2667. * dp_tx_compute_tid_delay() - Compute per TID delay
  2668. * @stats: Per TID delay stats
  2669. * @tx_desc: Software Tx descriptor
  2670. *
  2671. * Compute the software enqueue and hw enqueue delays and
  2672. * update the respective histograms
  2673. *
  2674. * Return: void
  2675. */
  2676. static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  2677. struct dp_tx_desc_s *tx_desc)
  2678. {
  2679. struct cdp_delay_tx_stats *tx_delay = &stats->tx_delay;
  2680. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  2681. uint32_t sw_enqueue_delay, fwhw_transmit_delay;
  2682. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  2683. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  2684. timestamp_hw_enqueue = tx_desc->timestamp;
  2685. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  2686. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  2687. timestamp_hw_enqueue);
  2688. /*
  2689. * Update the Tx software enqueue delay and HW enque-Completion delay.
  2690. */
  2691. dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
  2692. dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
  2693. }
  2694. /*
  2695. * dp_tx_update_peer_ext_stats() - Update the peer extended stats
  2696. * @peer: DP peer context
  2697. * @tx_desc: Tx software descriptor
  2698. * @tid: Transmission ID
  2699. * @ring_id: Rx CPU context ID/CPU_ID
  2700. *
  2701. * Update the peer extended stats. These are enhanced other
  2702. * delay stats per msdu level.
  2703. *
  2704. * Return: void
  2705. */
  2706. static void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
  2707. struct dp_tx_desc_s *tx_desc,
  2708. uint8_t tid, uint8_t ring_id)
  2709. {
  2710. struct dp_pdev *pdev = peer->vdev->pdev;
  2711. struct dp_soc *soc = NULL;
  2712. struct cdp_peer_ext_stats *pext_stats = NULL;
  2713. soc = pdev->soc;
  2714. if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
  2715. return;
  2716. pext_stats = peer->pext_stats;
  2717. qdf_assert(pext_stats);
  2718. qdf_assert(ring < CDP_MAX_TXRX_CTX);
  2719. /*
  2720. * For non-TID packets use the TID 9
  2721. */
  2722. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  2723. tid = CDP_MAX_DATA_TIDS - 1;
  2724. dp_tx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
  2725. tx_desc);
  2726. }
  2727. #else
  2728. static inline void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
  2729. struct dp_tx_desc_s *tx_desc,
  2730. uint8_t tid, uint8_t ring_id)
  2731. {
  2732. }
  2733. #endif
  2734. /**
  2735. * dp_tx_compute_delay() - Compute and fill in all timestamps
  2736. * to pass in correct fields
  2737. *
  2738. * @vdev: pdev handle
  2739. * @tx_desc: tx descriptor
  2740. * @tid: tid value
  2741. * @ring_id: TCL or WBM ring number for transmit path
  2742. * Return: none
  2743. */
  2744. static void dp_tx_compute_delay(struct dp_vdev *vdev,
  2745. struct dp_tx_desc_s *tx_desc,
  2746. uint8_t tid, uint8_t ring_id)
  2747. {
  2748. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  2749. uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
  2750. if (qdf_likely(!vdev->pdev->delay_stats_flag))
  2751. return;
  2752. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  2753. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  2754. timestamp_hw_enqueue = tx_desc->timestamp;
  2755. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  2756. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  2757. timestamp_hw_enqueue);
  2758. interframe_delay = (uint32_t)(timestamp_ingress -
  2759. vdev->prev_tx_enq_tstamp);
  2760. /*
  2761. * Delay in software enqueue
  2762. */
  2763. dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
  2764. CDP_DELAY_STATS_SW_ENQ, ring_id);
  2765. /*
  2766. * Delay between packet enqueued to HW and Tx completion
  2767. */
  2768. dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
  2769. CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
  2770. /*
  2771. * Update interframe delay stats calculated at hardstart receive point.
  2772. * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
  2773. * interframe delay will not be calculate correctly for 1st frame.
  2774. * On the other side, this will help in avoiding extra per packet check
  2775. * of !vdev->prev_tx_enq_tstamp.
  2776. */
  2777. dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
  2778. CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
  2779. vdev->prev_tx_enq_tstamp = timestamp_ingress;
  2780. }
  2781. #ifdef DISABLE_DP_STATS
  2782. static
  2783. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
  2784. {
  2785. }
  2786. #else
  2787. static
  2788. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
  2789. {
  2790. enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
  2791. DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
  2792. if (subtype != QDF_PROTO_INVALID)
  2793. DP_STATS_INC(peer, tx.no_ack_count[subtype], 1);
  2794. }
  2795. #endif
  2796. /**
  2797. * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
  2798. * per wbm ring
  2799. *
  2800. * @tx_desc: software descriptor head pointer
  2801. * @ts: Tx completion status
  2802. * @peer: peer handle
  2803. * @ring_id: ring number
  2804. *
  2805. * Return: None
  2806. */
  2807. static inline void
  2808. dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
  2809. struct hal_tx_completion_status *ts,
  2810. struct dp_peer *peer, uint8_t ring_id)
  2811. {
  2812. struct dp_pdev *pdev = peer->vdev->pdev;
  2813. struct dp_soc *soc = NULL;
  2814. uint8_t mcs, pkt_type;
  2815. uint8_t tid = ts->tid;
  2816. uint32_t length;
  2817. struct cdp_tid_tx_stats *tid_stats;
  2818. if (!pdev)
  2819. return;
  2820. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  2821. tid = CDP_MAX_DATA_TIDS - 1;
  2822. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  2823. soc = pdev->soc;
  2824. mcs = ts->mcs;
  2825. pkt_type = ts->pkt_type;
  2826. if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
  2827. dp_err("Release source is not from TQM");
  2828. return;
  2829. }
  2830. length = qdf_nbuf_len(tx_desc->nbuf);
  2831. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
  2832. if (qdf_unlikely(pdev->delay_stats_flag))
  2833. dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
  2834. DP_STATS_INCC(peer, tx.dropped.age_out, 1,
  2835. (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
  2836. DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
  2837. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  2838. DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
  2839. (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
  2840. DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
  2841. (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
  2842. DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
  2843. (ts->status == HAL_TX_TQM_RR_FW_REASON1));
  2844. DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
  2845. (ts->status == HAL_TX_TQM_RR_FW_REASON2));
  2846. DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
  2847. (ts->status == HAL_TX_TQM_RR_FW_REASON3));
  2848. /*
  2849. * tx_failed is ideally supposed to be updated from HTT ppdu completion
  2850. * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
  2851. * are no completions for failed cases. Hence updating tx_failed from
  2852. * data path. Please note that if tx_failed is fixed to be from ppdu,
  2853. * then this has to be removed
  2854. */
  2855. peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
  2856. peer->stats.tx.dropped.fw_rem_notx +
  2857. peer->stats.tx.dropped.fw_rem_tx +
  2858. peer->stats.tx.dropped.age_out +
  2859. peer->stats.tx.dropped.fw_reason1 +
  2860. peer->stats.tx.dropped.fw_reason2 +
  2861. peer->stats.tx.dropped.fw_reason3;
  2862. if (ts->status < CDP_MAX_TX_TQM_STATUS) {
  2863. tid_stats->tqm_status_cnt[ts->status]++;
  2864. }
  2865. if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
  2866. dp_update_no_ack_stats(tx_desc->nbuf, peer);
  2867. return;
  2868. }
  2869. DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
  2870. DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
  2871. DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
  2872. /*
  2873. * Following Rate Statistics are updated from HTT PPDU events from FW.
  2874. * Return from here if HTT PPDU events are enabled.
  2875. */
  2876. if (!(soc->process_tx_status))
  2877. return;
  2878. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2879. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
  2880. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2881. ((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
  2882. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2883. ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
  2884. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2885. ((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
  2886. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2887. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
  2888. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2889. ((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
  2890. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2891. ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  2892. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2893. ((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  2894. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2895. ((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  2896. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2897. ((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  2898. DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
  2899. DP_STATS_INC(peer, tx.bw[ts->bw], 1);
  2900. DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
  2901. DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
  2902. DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
  2903. DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
  2904. DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
  2905. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  2906. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
  2907. &peer->stats, ts->peer_id,
  2908. UPDATE_PEER_STATS, pdev->pdev_id);
  2909. #endif
  2910. }
  2911. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  2912. /**
  2913. * dp_tx_flow_pool_lock() - take flow pool lock
  2914. * @soc: core txrx main context
  2915. * @tx_desc: tx desc
  2916. *
  2917. * Return: None
  2918. */
  2919. static inline
  2920. void dp_tx_flow_pool_lock(struct dp_soc *soc,
  2921. struct dp_tx_desc_s *tx_desc)
  2922. {
  2923. struct dp_tx_desc_pool_s *pool;
  2924. uint8_t desc_pool_id;
  2925. desc_pool_id = tx_desc->pool_id;
  2926. pool = &soc->tx_desc[desc_pool_id];
  2927. qdf_spin_lock_bh(&pool->flow_pool_lock);
  2928. }
  2929. /**
  2930. * dp_tx_flow_pool_unlock() - release flow pool lock
  2931. * @soc: core txrx main context
  2932. * @tx_desc: tx desc
  2933. *
  2934. * Return: None
  2935. */
  2936. static inline
  2937. void dp_tx_flow_pool_unlock(struct dp_soc *soc,
  2938. struct dp_tx_desc_s *tx_desc)
  2939. {
  2940. struct dp_tx_desc_pool_s *pool;
  2941. uint8_t desc_pool_id;
  2942. desc_pool_id = tx_desc->pool_id;
  2943. pool = &soc->tx_desc[desc_pool_id];
  2944. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  2945. }
  2946. #else
  2947. static inline
  2948. void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  2949. {
  2950. }
  2951. static inline
  2952. void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  2953. {
  2954. }
  2955. #endif
  2956. /**
  2957. * dp_tx_notify_completion() - Notify tx completion for this desc
  2958. * @soc: core txrx main context
  2959. * @vdev: datapath vdev handle
  2960. * @tx_desc: tx desc
  2961. * @netbuf: buffer
  2962. * @status: tx status
  2963. *
  2964. * Return: none
  2965. */
  2966. static inline void dp_tx_notify_completion(struct dp_soc *soc,
  2967. struct dp_vdev *vdev,
  2968. struct dp_tx_desc_s *tx_desc,
  2969. qdf_nbuf_t netbuf,
  2970. uint8_t status)
  2971. {
  2972. void *osif_dev;
  2973. ol_txrx_completion_fp tx_compl_cbk = NULL;
  2974. uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
  2975. qdf_assert(tx_desc);
  2976. dp_tx_flow_pool_lock(soc, tx_desc);
  2977. if (!vdev ||
  2978. !vdev->osif_vdev) {
  2979. dp_tx_flow_pool_unlock(soc, tx_desc);
  2980. return;
  2981. }
  2982. osif_dev = vdev->osif_vdev;
  2983. tx_compl_cbk = vdev->tx_comp;
  2984. dp_tx_flow_pool_unlock(soc, tx_desc);
  2985. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  2986. flag |= BIT(QDF_TX_RX_STATUS_OK);
  2987. if (tx_compl_cbk)
  2988. tx_compl_cbk(netbuf, osif_dev, flag);
  2989. }
  2990. /** dp_tx_sojourn_stats_process() - Collect sojourn stats
  2991. * @pdev: pdev handle
  2992. * @tid: tid value
  2993. * @txdesc_ts: timestamp from txdesc
  2994. * @ppdu_id: ppdu id
  2995. *
  2996. * Return: none
  2997. */
  2998. #ifdef FEATURE_PERPKT_INFO
  2999. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  3000. struct dp_peer *peer,
  3001. uint8_t tid,
  3002. uint64_t txdesc_ts,
  3003. uint32_t ppdu_id)
  3004. {
  3005. uint64_t delta_ms;
  3006. struct cdp_tx_sojourn_stats *sojourn_stats;
  3007. if (qdf_unlikely(pdev->enhanced_stats_en == 0))
  3008. return;
  3009. if (qdf_unlikely(tid == HTT_INVALID_TID ||
  3010. tid >= CDP_DATA_TID_MAX))
  3011. return;
  3012. if (qdf_unlikely(!pdev->sojourn_buf))
  3013. return;
  3014. sojourn_stats = (struct cdp_tx_sojourn_stats *)
  3015. qdf_nbuf_data(pdev->sojourn_buf);
  3016. sojourn_stats->cookie = (void *)peer->wlanstats_ctx;
  3017. delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
  3018. txdesc_ts;
  3019. qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
  3020. delta_ms);
  3021. sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
  3022. sojourn_stats->num_msdus[tid] = 1;
  3023. sojourn_stats->avg_sojourn_msdu[tid].internal =
  3024. peer->avg_sojourn_msdu[tid].internal;
  3025. dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
  3026. pdev->sojourn_buf, HTT_INVALID_PEER,
  3027. WDI_NO_VAL, pdev->pdev_id);
  3028. sojourn_stats->sum_sojourn_msdu[tid] = 0;
  3029. sojourn_stats->num_msdus[tid] = 0;
  3030. sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
  3031. }
  3032. #else
  3033. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  3034. struct dp_peer *peer,
  3035. uint8_t tid,
  3036. uint64_t txdesc_ts,
  3037. uint32_t ppdu_id)
  3038. {
  3039. }
  3040. #endif
  3041. /**
  3042. * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
  3043. * @soc: DP Soc handle
  3044. * @tx_desc: software Tx descriptor
  3045. * @ts : Tx completion status from HAL/HTT descriptor
  3046. *
  3047. * Return: none
  3048. */
  3049. static inline void
  3050. dp_tx_comp_process_desc(struct dp_soc *soc,
  3051. struct dp_tx_desc_s *desc,
  3052. struct hal_tx_completion_status *ts,
  3053. struct dp_peer *peer)
  3054. {
  3055. uint64_t time_latency = 0;
  3056. /*
  3057. * m_copy/tx_capture modes are not supported for
  3058. * scatter gather packets
  3059. */
  3060. if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
  3061. time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
  3062. desc->timestamp);
  3063. }
  3064. if (!(desc->msdu_ext_desc)) {
  3065. if (QDF_STATUS_SUCCESS ==
  3066. dp_tx_add_to_comp_queue(soc, desc, ts, peer)) {
  3067. return;
  3068. }
  3069. if (QDF_STATUS_SUCCESS ==
  3070. dp_get_completion_indication_for_stack(soc,
  3071. desc->pdev,
  3072. peer, ts,
  3073. desc->nbuf,
  3074. time_latency)) {
  3075. qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
  3076. QDF_DMA_TO_DEVICE,
  3077. desc->nbuf->len);
  3078. dp_send_completion_to_stack(soc,
  3079. desc->pdev,
  3080. ts->peer_id,
  3081. ts->ppdu_id,
  3082. desc->nbuf);
  3083. return;
  3084. }
  3085. }
  3086. dp_tx_comp_free_buf(soc, desc);
  3087. }
  3088. #ifdef DISABLE_DP_STATS
  3089. /**
  3090. * dp_tx_update_connectivity_stats() - update tx connectivity stats
  3091. * @soc: core txrx main context
  3092. * @tx_desc: tx desc
  3093. * @status: tx status
  3094. *
  3095. * Return: none
  3096. */
  3097. static inline
  3098. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  3099. struct dp_vdev *vdev,
  3100. struct dp_tx_desc_s *tx_desc,
  3101. uint8_t status)
  3102. {
  3103. }
  3104. #else
  3105. static inline
  3106. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  3107. struct dp_vdev *vdev,
  3108. struct dp_tx_desc_s *tx_desc,
  3109. uint8_t status)
  3110. {
  3111. void *osif_dev;
  3112. ol_txrx_stats_rx_fp stats_cbk;
  3113. uint8_t pkt_type;
  3114. qdf_assert(tx_desc);
  3115. if (!vdev ||
  3116. !vdev->osif_vdev ||
  3117. !vdev->stats_cb)
  3118. return;
  3119. osif_dev = vdev->osif_vdev;
  3120. stats_cbk = vdev->stats_cb;
  3121. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
  3122. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  3123. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
  3124. &pkt_type);
  3125. }
  3126. #endif
  3127. /**
  3128. * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
  3129. * @soc: DP soc handle
  3130. * @tx_desc: software descriptor head pointer
  3131. * @ts: Tx completion status
  3132. * @peer: peer handle
  3133. * @ring_id: ring number
  3134. *
  3135. * Return: none
  3136. */
  3137. static inline
  3138. void dp_tx_comp_process_tx_status(struct dp_soc *soc,
  3139. struct dp_tx_desc_s *tx_desc,
  3140. struct hal_tx_completion_status *ts,
  3141. struct dp_peer *peer, uint8_t ring_id)
  3142. {
  3143. uint32_t length;
  3144. qdf_ether_header_t *eh;
  3145. struct dp_vdev *vdev = NULL;
  3146. qdf_nbuf_t nbuf = tx_desc->nbuf;
  3147. uint8_t dp_status;
  3148. if (!nbuf) {
  3149. dp_info_rl("invalid tx descriptor. nbuf NULL");
  3150. goto out;
  3151. }
  3152. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  3153. length = qdf_nbuf_len(nbuf);
  3154. dp_status = qdf_dp_get_status_from_htt(ts->status);
  3155. DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
  3156. QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
  3157. QDF_TRACE_DEFAULT_PDEV_ID,
  3158. qdf_nbuf_data_addr(nbuf),
  3159. sizeof(qdf_nbuf_data(nbuf)),
  3160. tx_desc->id,
  3161. dp_status));
  3162. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  3163. "-------------------- \n"
  3164. "Tx Completion Stats: \n"
  3165. "-------------------- \n"
  3166. "ack_frame_rssi = %d \n"
  3167. "first_msdu = %d \n"
  3168. "last_msdu = %d \n"
  3169. "msdu_part_of_amsdu = %d \n"
  3170. "rate_stats valid = %d \n"
  3171. "bw = %d \n"
  3172. "pkt_type = %d \n"
  3173. "stbc = %d \n"
  3174. "ldpc = %d \n"
  3175. "sgi = %d \n"
  3176. "mcs = %d \n"
  3177. "ofdma = %d \n"
  3178. "tones_in_ru = %d \n"
  3179. "tsf = %d \n"
  3180. "ppdu_id = %d \n"
  3181. "transmit_cnt = %d \n"
  3182. "tid = %d \n"
  3183. "peer_id = %d\n",
  3184. ts->ack_frame_rssi, ts->first_msdu,
  3185. ts->last_msdu, ts->msdu_part_of_amsdu,
  3186. ts->valid, ts->bw, ts->pkt_type, ts->stbc,
  3187. ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
  3188. ts->tones_in_ru, ts->tsf, ts->ppdu_id,
  3189. ts->transmit_cnt, ts->tid, ts->peer_id);
  3190. /* Update SoC level stats */
  3191. DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
  3192. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  3193. if (!peer) {
  3194. dp_err_rl("peer is null or deletion in progress");
  3195. DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
  3196. goto out;
  3197. }
  3198. vdev = peer->vdev;
  3199. dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
  3200. /* Update per-packet stats for mesh mode */
  3201. if (qdf_unlikely(vdev->mesh_vdev) &&
  3202. !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
  3203. dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
  3204. /* Update peer level stats */
  3205. if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
  3206. if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
  3207. DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
  3208. if ((peer->vdev->tx_encap_type ==
  3209. htt_cmn_pkt_type_ethernet) &&
  3210. QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  3211. DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
  3212. }
  3213. }
  3214. } else {
  3215. DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
  3216. if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
  3217. DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
  3218. if (qdf_unlikely(peer->in_twt)) {
  3219. DP_STATS_INC_PKT(peer,
  3220. tx.tx_success_twt,
  3221. 1, length);
  3222. }
  3223. }
  3224. }
  3225. dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
  3226. dp_tx_update_peer_ext_stats(peer, tx_desc, ts->tid, ring_id);
  3227. #ifdef QCA_SUPPORT_RDK_STATS
  3228. if (soc->wlanstats_enabled)
  3229. dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
  3230. tx_desc->timestamp,
  3231. ts->ppdu_id);
  3232. #endif
  3233. out:
  3234. return;
  3235. }
  3236. /**
  3237. * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
  3238. * @soc: core txrx main context
  3239. * @comp_head: software descriptor head pointer
  3240. * @ring_id: ring number
  3241. *
  3242. * This function will process batch of descriptors reaped by dp_tx_comp_handler
  3243. * and release the software descriptors after processing is complete
  3244. *
  3245. * Return: none
  3246. */
  3247. static void
  3248. dp_tx_comp_process_desc_list(struct dp_soc *soc,
  3249. struct dp_tx_desc_s *comp_head, uint8_t ring_id)
  3250. {
  3251. struct dp_tx_desc_s *desc;
  3252. struct dp_tx_desc_s *next;
  3253. struct hal_tx_completion_status ts;
  3254. struct dp_peer *peer;
  3255. qdf_nbuf_t netbuf;
  3256. desc = comp_head;
  3257. while (desc) {
  3258. if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
  3259. struct dp_pdev *pdev = desc->pdev;
  3260. peer = dp_peer_get_ref_by_id(soc, desc->peer_id,
  3261. DP_MOD_ID_TX_COMP);
  3262. if (qdf_likely(peer)) {
  3263. /*
  3264. * Increment peer statistics
  3265. * Minimal statistics update done here
  3266. */
  3267. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1,
  3268. desc->length);
  3269. if (desc->tx_status !=
  3270. HAL_TX_TQM_RR_FRAME_ACKED)
  3271. peer->stats.tx.tx_failed++;
  3272. dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
  3273. }
  3274. qdf_assert(pdev);
  3275. dp_tx_outstanding_dec(pdev);
  3276. /*
  3277. * Calling a QDF WRAPPER here is creating signifcant
  3278. * performance impact so avoided the wrapper call here
  3279. */
  3280. next = desc->next;
  3281. qdf_mem_unmap_nbytes_single(soc->osdev,
  3282. desc->dma_addr,
  3283. QDF_DMA_TO_DEVICE,
  3284. desc->length);
  3285. qdf_nbuf_free(desc->nbuf);
  3286. dp_tx_desc_free(soc, desc, desc->pool_id);
  3287. desc = next;
  3288. continue;
  3289. }
  3290. hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
  3291. peer = dp_peer_get_ref_by_id(soc, ts.peer_id,
  3292. DP_MOD_ID_TX_COMP);
  3293. dp_tx_comp_process_tx_status(soc, desc, &ts, peer, ring_id);
  3294. netbuf = desc->nbuf;
  3295. /* check tx complete notification */
  3296. if (peer &&
  3297. QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(netbuf))
  3298. dp_tx_notify_completion(soc, peer->vdev, desc,
  3299. netbuf, ts.status);
  3300. dp_tx_comp_process_desc(soc, desc, &ts, peer);
  3301. if (peer)
  3302. dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
  3303. next = desc->next;
  3304. dp_tx_desc_release(desc, desc->pool_id);
  3305. desc = next;
  3306. }
  3307. }
  3308. /**
  3309. * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
  3310. * @tx_desc: software descriptor head pointer
  3311. * @status : Tx completion status from HTT descriptor
  3312. * @ring_id: ring number
  3313. *
  3314. * This function will process HTT Tx indication messages from Target
  3315. *
  3316. * Return: none
  3317. */
  3318. static
  3319. void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status,
  3320. uint8_t ring_id)
  3321. {
  3322. uint8_t tx_status;
  3323. struct dp_pdev *pdev;
  3324. struct dp_vdev *vdev;
  3325. struct dp_soc *soc;
  3326. struct hal_tx_completion_status ts = {0};
  3327. uint32_t *htt_desc = (uint32_t *)status;
  3328. struct dp_peer *peer;
  3329. struct cdp_tid_tx_stats *tid_stats = NULL;
  3330. struct htt_soc *htt_handle;
  3331. /*
  3332. * If the descriptor is already freed in vdev_detach,
  3333. * continue to next descriptor
  3334. */
  3335. if ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) && !tx_desc->flags) {
  3336. QDF_TRACE(QDF_MODULE_ID_DP,
  3337. QDF_TRACE_LEVEL_INFO,
  3338. "Descriptor freed in vdev_detach %d",
  3339. tx_desc->id);
  3340. return;
  3341. }
  3342. pdev = tx_desc->pdev;
  3343. soc = pdev->soc;
  3344. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  3345. QDF_TRACE(QDF_MODULE_ID_DP,
  3346. QDF_TRACE_LEVEL_INFO,
  3347. "pdev in down state %d",
  3348. tx_desc->id);
  3349. dp_tx_comp_free_buf(soc, tx_desc);
  3350. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3351. return;
  3352. }
  3353. qdf_assert(tx_desc->pdev);
  3354. vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
  3355. DP_MOD_ID_HTT_COMP);
  3356. if (!vdev)
  3357. return;
  3358. tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
  3359. htt_handle = (struct htt_soc *)soc->htt_handle;
  3360. htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
  3361. switch (tx_status) {
  3362. case HTT_TX_FW2WBM_TX_STATUS_OK:
  3363. case HTT_TX_FW2WBM_TX_STATUS_DROP:
  3364. case HTT_TX_FW2WBM_TX_STATUS_TTL:
  3365. {
  3366. uint8_t tid;
  3367. if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
  3368. ts.peer_id =
  3369. HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
  3370. htt_desc[2]);
  3371. ts.tid =
  3372. HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
  3373. htt_desc[2]);
  3374. } else {
  3375. ts.peer_id = HTT_INVALID_PEER;
  3376. ts.tid = HTT_INVALID_TID;
  3377. }
  3378. ts.ppdu_id =
  3379. HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
  3380. htt_desc[1]);
  3381. ts.ack_frame_rssi =
  3382. HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
  3383. htt_desc[1]);
  3384. ts.tsf = htt_desc[3];
  3385. ts.first_msdu = 1;
  3386. ts.last_msdu = 1;
  3387. tid = ts.tid;
  3388. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3389. tid = CDP_MAX_DATA_TIDS - 1;
  3390. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  3391. if (qdf_unlikely(pdev->delay_stats_flag))
  3392. dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
  3393. if (tx_status < CDP_MAX_TX_HTT_STATUS) {
  3394. tid_stats->htt_status_cnt[tx_status]++;
  3395. }
  3396. peer = dp_peer_get_ref_by_id(soc, ts.peer_id,
  3397. DP_MOD_ID_HTT_COMP);
  3398. dp_tx_comp_process_tx_status(soc, tx_desc, &ts, peer, ring_id);
  3399. dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
  3400. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3401. if (qdf_likely(peer))
  3402. dp_peer_unref_delete(peer, DP_MOD_ID_HTT_COMP);
  3403. break;
  3404. }
  3405. case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
  3406. {
  3407. dp_tx_reinject_handler(soc, vdev, tx_desc, status);
  3408. break;
  3409. }
  3410. case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
  3411. {
  3412. dp_tx_inspect_handler(soc, vdev, tx_desc, status);
  3413. break;
  3414. }
  3415. case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
  3416. {
  3417. dp_tx_mec_handler(vdev, status);
  3418. break;
  3419. }
  3420. default:
  3421. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  3422. "%s Invalid HTT tx_status %d\n",
  3423. __func__, tx_status);
  3424. break;
  3425. }
  3426. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
  3427. }
  3428. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  3429. static inline
  3430. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
  3431. {
  3432. bool limit_hit = false;
  3433. struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
  3434. limit_hit =
  3435. (num_reaped >= cfg->tx_comp_loop_pkt_limit) ? true : false;
  3436. if (limit_hit)
  3437. DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
  3438. return limit_hit;
  3439. }
  3440. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  3441. {
  3442. return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
  3443. }
  3444. #else
  3445. static inline
  3446. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
  3447. {
  3448. return false;
  3449. }
  3450. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  3451. {
  3452. return false;
  3453. }
  3454. #endif
  3455. uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  3456. hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
  3457. uint32_t quota)
  3458. {
  3459. void *tx_comp_hal_desc;
  3460. uint8_t buffer_src;
  3461. uint8_t pool_id;
  3462. uint32_t tx_desc_id;
  3463. struct dp_tx_desc_s *tx_desc = NULL;
  3464. struct dp_tx_desc_s *head_desc = NULL;
  3465. struct dp_tx_desc_s *tail_desc = NULL;
  3466. uint32_t num_processed = 0;
  3467. uint32_t count;
  3468. uint32_t num_avail_for_reap = 0;
  3469. bool force_break = false;
  3470. DP_HIST_INIT();
  3471. more_data:
  3472. /* Re-initialize local variables to be re-used */
  3473. head_desc = NULL;
  3474. tail_desc = NULL;
  3475. count = 0;
  3476. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  3477. dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
  3478. return 0;
  3479. }
  3480. num_avail_for_reap = hal_srng_dst_num_valid(soc->hal_soc, hal_ring_hdl, 0);
  3481. if (num_avail_for_reap >= quota)
  3482. num_avail_for_reap = quota;
  3483. dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
  3484. /* Find head descriptor from completion ring */
  3485. while (qdf_likely(num_avail_for_reap)) {
  3486. tx_comp_hal_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
  3487. if (qdf_unlikely(!tx_comp_hal_desc))
  3488. break;
  3489. buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
  3490. /* If this buffer was not released by TQM or FW, then it is not
  3491. * Tx completion indication, assert */
  3492. if (qdf_unlikely(buffer_src !=
  3493. HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
  3494. (qdf_unlikely(buffer_src !=
  3495. HAL_TX_COMP_RELEASE_SOURCE_FW))) {
  3496. uint8_t wbm_internal_error;
  3497. dp_err_rl(
  3498. "Tx comp release_src != TQM | FW but from %d",
  3499. buffer_src);
  3500. hal_dump_comp_desc(tx_comp_hal_desc);
  3501. DP_STATS_INC(soc, tx.invalid_release_source, 1);
  3502. /* When WBM sees NULL buffer_addr_info in any of
  3503. * ingress rings it sends an error indication,
  3504. * with wbm_internal_error=1, to a specific ring.
  3505. * The WBM2SW ring used to indicate these errors is
  3506. * fixed in HW, and that ring is being used as Tx
  3507. * completion ring. These errors are not related to
  3508. * Tx completions, and should just be ignored
  3509. */
  3510. wbm_internal_error = hal_get_wbm_internal_error(
  3511. soc->hal_soc,
  3512. tx_comp_hal_desc);
  3513. if (wbm_internal_error) {
  3514. dp_err_rl("Tx comp wbm_internal_error!!");
  3515. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
  3516. if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
  3517. buffer_src)
  3518. dp_handle_wbm_internal_error(
  3519. soc,
  3520. tx_comp_hal_desc,
  3521. hal_tx_comp_get_buffer_type(
  3522. tx_comp_hal_desc));
  3523. } else {
  3524. dp_err_rl("Tx comp wbm_internal_error false");
  3525. DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
  3526. }
  3527. continue;
  3528. }
  3529. /* Get descriptor id */
  3530. tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
  3531. pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
  3532. DP_TX_DESC_ID_POOL_OS;
  3533. /* Find Tx descriptor */
  3534. tx_desc = dp_tx_desc_find(soc, pool_id,
  3535. (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
  3536. DP_TX_DESC_ID_PAGE_OS,
  3537. (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
  3538. DP_TX_DESC_ID_OFFSET_OS);
  3539. /*
  3540. * If the release source is FW, process the HTT status
  3541. */
  3542. if (qdf_unlikely(buffer_src ==
  3543. HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  3544. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  3545. hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
  3546. htt_tx_status);
  3547. dp_tx_process_htt_completion(tx_desc,
  3548. htt_tx_status, ring_id);
  3549. } else {
  3550. /*
  3551. * If the fast completion mode is enabled extended
  3552. * metadata from descriptor is not copied
  3553. */
  3554. if (qdf_likely(tx_desc->flags &
  3555. DP_TX_DESC_FLAG_SIMPLE)) {
  3556. tx_desc->peer_id =
  3557. hal_tx_comp_get_peer_id(tx_comp_hal_desc);
  3558. tx_desc->tx_status =
  3559. hal_tx_comp_get_tx_status(tx_comp_hal_desc);
  3560. goto add_to_pool;
  3561. }
  3562. /*
  3563. * If the descriptor is already freed in vdev_detach,
  3564. * continue to next descriptor
  3565. */
  3566. if (qdf_unlikely
  3567. ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
  3568. !tx_desc->flags)) {
  3569. QDF_TRACE(QDF_MODULE_ID_DP,
  3570. QDF_TRACE_LEVEL_INFO,
  3571. "Descriptor freed in vdev_detach %d",
  3572. tx_desc_id);
  3573. continue;
  3574. }
  3575. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  3576. QDF_TRACE(QDF_MODULE_ID_DP,
  3577. QDF_TRACE_LEVEL_INFO,
  3578. "pdev in down state %d",
  3579. tx_desc_id);
  3580. dp_tx_comp_free_buf(soc, tx_desc);
  3581. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3582. goto next_desc;
  3583. }
  3584. /* Pool id is not matching. Error */
  3585. if (tx_desc->pool_id != pool_id) {
  3586. QDF_TRACE(QDF_MODULE_ID_DP,
  3587. QDF_TRACE_LEVEL_FATAL,
  3588. "Tx Comp pool id %d not matched %d",
  3589. pool_id, tx_desc->pool_id);
  3590. qdf_assert_always(0);
  3591. }
  3592. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  3593. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  3594. QDF_TRACE(QDF_MODULE_ID_DP,
  3595. QDF_TRACE_LEVEL_FATAL,
  3596. "Txdesc invalid, flgs = %x,id = %d",
  3597. tx_desc->flags, tx_desc_id);
  3598. qdf_assert_always(0);
  3599. }
  3600. /* Collect hw completion contents */
  3601. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  3602. &tx_desc->comp, 1);
  3603. add_to_pool:
  3604. DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
  3605. /* First ring descriptor on the cycle */
  3606. if (!head_desc) {
  3607. head_desc = tx_desc;
  3608. tail_desc = tx_desc;
  3609. }
  3610. tail_desc->next = tx_desc;
  3611. tx_desc->next = NULL;
  3612. tail_desc = tx_desc;
  3613. }
  3614. next_desc:
  3615. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  3616. /*
  3617. * Processed packet count is more than given quota
  3618. * stop to processing
  3619. */
  3620. count++;
  3621. if (dp_tx_comp_loop_pkt_limit_hit(soc, count))
  3622. break;
  3623. }
  3624. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  3625. /* Process the reaped descriptors */
  3626. if (head_desc)
  3627. dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
  3628. if (dp_tx_comp_enable_eol_data_check(soc)) {
  3629. if (num_processed >= quota)
  3630. force_break = true;
  3631. if (!force_break &&
  3632. hal_srng_dst_peek_sync_locked(soc->hal_soc,
  3633. hal_ring_hdl)) {
  3634. DP_STATS_INC(soc, tx.hp_oos2, 1);
  3635. if (!hif_exec_should_yield(soc->hif_handle,
  3636. int_ctx->dp_intr_id))
  3637. goto more_data;
  3638. }
  3639. }
  3640. DP_TX_HIST_STATS_PER_PDEV();
  3641. return num_processed;
  3642. }
  3643. #ifdef FEATURE_WLAN_TDLS
  3644. qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3645. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
  3646. {
  3647. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3648. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  3649. DP_MOD_ID_TDLS);
  3650. if (!vdev) {
  3651. dp_err("vdev handle for id %d is NULL", vdev_id);
  3652. return NULL;
  3653. }
  3654. if (tx_spec & OL_TX_SPEC_NO_FREE)
  3655. vdev->is_tdls_frame = true;
  3656. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  3657. return dp_tx_send(soc_hdl, vdev_id, msdu_list);
  3658. }
  3659. #endif
  3660. static void dp_tx_vdev_update_feature_flags(struct dp_vdev *vdev)
  3661. {
  3662. struct wlan_cfg_dp_soc_ctxt *cfg;
  3663. struct dp_soc *soc;
  3664. soc = vdev->pdev->soc;
  3665. if (!soc)
  3666. return;
  3667. cfg = soc->wlan_cfg_ctx;
  3668. if (!cfg)
  3669. return;
  3670. if (vdev->opmode == wlan_op_mode_ndi)
  3671. vdev->csum_enabled = wlan_cfg_get_nan_checksum_offload(cfg);
  3672. else if ((vdev->subtype == wlan_op_subtype_p2p_device) ||
  3673. (vdev->subtype == wlan_op_subtype_p2p_cli) ||
  3674. (vdev->subtype == wlan_op_subtype_p2p_go))
  3675. vdev->csum_enabled = wlan_cfg_get_p2p_checksum_offload(cfg);
  3676. else
  3677. vdev->csum_enabled = wlan_cfg_get_checksum_offload(cfg);
  3678. }
  3679. /**
  3680. * dp_tx_vdev_attach() - attach vdev to dp tx
  3681. * @vdev: virtual device instance
  3682. *
  3683. * Return: QDF_STATUS_SUCCESS: success
  3684. * QDF_STATUS_E_RESOURCES: Error return
  3685. */
  3686. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
  3687. {
  3688. int pdev_id;
  3689. /*
  3690. * Fill HTT TCL Metadata with Vdev ID and MAC ID
  3691. */
  3692. HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
  3693. HTT_TCL_METADATA_TYPE_VDEV_BASED);
  3694. HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
  3695. vdev->vdev_id);
  3696. pdev_id =
  3697. dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
  3698. vdev->pdev->pdev_id);
  3699. HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
  3700. /*
  3701. * Set HTT Extension Valid bit to 0 by default
  3702. */
  3703. HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
  3704. dp_tx_vdev_update_search_flags(vdev);
  3705. dp_tx_vdev_update_feature_flags(vdev);
  3706. return QDF_STATUS_SUCCESS;
  3707. }
  3708. #ifndef FEATURE_WDS
  3709. static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
  3710. {
  3711. return false;
  3712. }
  3713. #endif
  3714. /**
  3715. * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
  3716. * @vdev: virtual device instance
  3717. *
  3718. * Return: void
  3719. *
  3720. */
  3721. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
  3722. {
  3723. struct dp_soc *soc = vdev->pdev->soc;
  3724. /*
  3725. * Enable both AddrY (SA based search) and AddrX (Da based search)
  3726. * for TDLS link
  3727. *
  3728. * Enable AddrY (SA based search) only for non-WDS STA and
  3729. * ProxySTA VAP (in HKv1) modes.
  3730. *
  3731. * In all other VAP modes, only DA based search should be
  3732. * enabled
  3733. */
  3734. if (vdev->opmode == wlan_op_mode_sta &&
  3735. vdev->tdls_link_connected)
  3736. vdev->hal_desc_addr_search_flags =
  3737. (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
  3738. else if ((vdev->opmode == wlan_op_mode_sta) &&
  3739. !dp_tx_da_search_override(vdev))
  3740. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
  3741. else
  3742. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
  3743. /* Set search type only when peer map v2 messaging is enabled
  3744. * as we will have the search index (AST hash) only when v2 is
  3745. * enabled
  3746. */
  3747. if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
  3748. vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
  3749. else
  3750. vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
  3751. }
  3752. static inline bool
  3753. dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
  3754. struct dp_vdev *vdev,
  3755. struct dp_tx_desc_s *tx_desc)
  3756. {
  3757. if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
  3758. return false;
  3759. /*
  3760. * if vdev is given, then only check whether desc
  3761. * vdev match. if vdev is NULL, then check whether
  3762. * desc pdev match.
  3763. */
  3764. return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
  3765. (tx_desc->pdev == pdev);
  3766. }
  3767. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  3768. /**
  3769. * dp_tx_desc_flush() - release resources associated
  3770. * to TX Desc
  3771. *
  3772. * @dp_pdev: Handle to DP pdev structure
  3773. * @vdev: virtual device instance
  3774. * NULL: no specific Vdev is required and check all allcated TX desc
  3775. * on this pdev.
  3776. * Non-NULL: only check the allocated TX Desc associated to this Vdev.
  3777. *
  3778. * @force_free:
  3779. * true: flush the TX desc.
  3780. * false: only reset the Vdev in each allocated TX desc
  3781. * that associated to current Vdev.
  3782. *
  3783. * This function will go through the TX desc pool to flush
  3784. * the outstanding TX data or reset Vdev to NULL in associated TX
  3785. * Desc.
  3786. */
  3787. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  3788. bool force_free)
  3789. {
  3790. uint8_t i;
  3791. uint32_t j;
  3792. uint32_t num_desc, page_id, offset;
  3793. uint16_t num_desc_per_page;
  3794. struct dp_soc *soc = pdev->soc;
  3795. struct dp_tx_desc_s *tx_desc = NULL;
  3796. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  3797. if (!vdev && !force_free) {
  3798. dp_err("Reset TX desc vdev, Vdev param is required!");
  3799. return;
  3800. }
  3801. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  3802. tx_desc_pool = &soc->tx_desc[i];
  3803. if (!(tx_desc_pool->pool_size) ||
  3804. IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
  3805. !(tx_desc_pool->desc_pages.cacheable_pages))
  3806. continue;
  3807. /*
  3808. * Add flow pool lock protection in case pool is freed
  3809. * due to all tx_desc is recycled when handle TX completion.
  3810. * this is not necessary when do force flush as:
  3811. * a. double lock will happen if dp_tx_desc_release is
  3812. * also trying to acquire it.
  3813. * b. dp interrupt has been disabled before do force TX desc
  3814. * flush in dp_pdev_deinit().
  3815. */
  3816. if (!force_free)
  3817. qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
  3818. num_desc = tx_desc_pool->pool_size;
  3819. num_desc_per_page =
  3820. tx_desc_pool->desc_pages.num_element_per_page;
  3821. for (j = 0; j < num_desc; j++) {
  3822. page_id = j / num_desc_per_page;
  3823. offset = j % num_desc_per_page;
  3824. if (qdf_unlikely(!(tx_desc_pool->
  3825. desc_pages.cacheable_pages)))
  3826. break;
  3827. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  3828. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  3829. /*
  3830. * Free TX desc if force free is
  3831. * required, otherwise only reset vdev
  3832. * in this TX desc.
  3833. */
  3834. if (force_free) {
  3835. dp_tx_comp_free_buf(soc, tx_desc);
  3836. dp_tx_desc_release(tx_desc, i);
  3837. } else {
  3838. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  3839. }
  3840. }
  3841. }
  3842. if (!force_free)
  3843. qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
  3844. }
  3845. }
  3846. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  3847. /**
  3848. * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
  3849. *
  3850. * @soc: Handle to DP soc structure
  3851. * @tx_desc: pointer of one TX desc
  3852. * @desc_pool_id: TX Desc pool id
  3853. */
  3854. static inline void
  3855. dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  3856. uint8_t desc_pool_id)
  3857. {
  3858. TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
  3859. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  3860. TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
  3861. }
  3862. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  3863. bool force_free)
  3864. {
  3865. uint8_t i, num_pool;
  3866. uint32_t j;
  3867. uint32_t num_desc, page_id, offset;
  3868. uint16_t num_desc_per_page;
  3869. struct dp_soc *soc = pdev->soc;
  3870. struct dp_tx_desc_s *tx_desc = NULL;
  3871. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  3872. if (!vdev && !force_free) {
  3873. dp_err("Reset TX desc vdev, Vdev param is required!");
  3874. return;
  3875. }
  3876. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  3877. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  3878. for (i = 0; i < num_pool; i++) {
  3879. tx_desc_pool = &soc->tx_desc[i];
  3880. if (!tx_desc_pool->desc_pages.cacheable_pages)
  3881. continue;
  3882. num_desc_per_page =
  3883. tx_desc_pool->desc_pages.num_element_per_page;
  3884. for (j = 0; j < num_desc; j++) {
  3885. page_id = j / num_desc_per_page;
  3886. offset = j % num_desc_per_page;
  3887. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  3888. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  3889. if (force_free) {
  3890. dp_tx_comp_free_buf(soc, tx_desc);
  3891. dp_tx_desc_release(tx_desc, i);
  3892. } else {
  3893. dp_tx_desc_reset_vdev(soc, tx_desc,
  3894. i);
  3895. }
  3896. }
  3897. }
  3898. }
  3899. }
  3900. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  3901. /**
  3902. * dp_tx_vdev_detach() - detach vdev from dp tx
  3903. * @vdev: virtual device instance
  3904. *
  3905. * Return: QDF_STATUS_SUCCESS: success
  3906. * QDF_STATUS_E_RESOURCES: Error return
  3907. */
  3908. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
  3909. {
  3910. struct dp_pdev *pdev = vdev->pdev;
  3911. /* Reset TX desc associated to this Vdev as NULL */
  3912. dp_tx_desc_flush(pdev, vdev, false);
  3913. dp_tx_vdev_multipass_deinit(vdev);
  3914. return QDF_STATUS_SUCCESS;
  3915. }
  3916. /**
  3917. * dp_tx_pdev_attach() - attach pdev to dp tx
  3918. * @pdev: physical device instance
  3919. *
  3920. * Return: QDF_STATUS_SUCCESS: success
  3921. * QDF_STATUS_E_RESOURCES: Error return
  3922. */
  3923. QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
  3924. {
  3925. struct dp_soc *soc = pdev->soc;
  3926. /* Initialize Flow control counters */
  3927. qdf_atomic_init(&pdev->num_tx_outstanding);
  3928. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3929. /* Initialize descriptors in TCL Ring */
  3930. hal_tx_init_data_ring(soc->hal_soc,
  3931. soc->tcl_data_ring[pdev->pdev_id].hal_srng);
  3932. }
  3933. return QDF_STATUS_SUCCESS;
  3934. }
  3935. /**
  3936. * dp_tx_pdev_detach() - detach pdev from dp tx
  3937. * @pdev: physical device instance
  3938. *
  3939. * Return: QDF_STATUS_SUCCESS: success
  3940. * QDF_STATUS_E_RESOURCES: Error return
  3941. */
  3942. QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
  3943. {
  3944. /* flush TX outstanding data per pdev */
  3945. dp_tx_desc_flush(pdev, NULL, true);
  3946. dp_tx_me_exit(pdev);
  3947. return QDF_STATUS_SUCCESS;
  3948. }
  3949. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  3950. /* Pools will be allocated dynamically */
  3951. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  3952. int num_desc)
  3953. {
  3954. uint8_t i;
  3955. for (i = 0; i < num_pool; i++) {
  3956. qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
  3957. soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
  3958. }
  3959. return QDF_STATUS_SUCCESS;
  3960. }
  3961. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  3962. int num_desc)
  3963. {
  3964. return QDF_STATUS_SUCCESS;
  3965. }
  3966. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  3967. {
  3968. }
  3969. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  3970. {
  3971. uint8_t i;
  3972. for (i = 0; i < num_pool; i++)
  3973. qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
  3974. }
  3975. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  3976. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  3977. int num_desc)
  3978. {
  3979. uint8_t i, count;
  3980. /* Allocate software Tx descriptor pools */
  3981. for (i = 0; i < num_pool; i++) {
  3982. if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
  3983. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3984. FL("Tx Desc Pool alloc %d failed %pK"),
  3985. i, soc);
  3986. goto fail;
  3987. }
  3988. }
  3989. return QDF_STATUS_SUCCESS;
  3990. fail:
  3991. for (count = 0; count < i; count++)
  3992. dp_tx_desc_pool_free(soc, count);
  3993. return QDF_STATUS_E_NOMEM;
  3994. }
  3995. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  3996. int num_desc)
  3997. {
  3998. uint8_t i;
  3999. for (i = 0; i < num_pool; i++) {
  4000. if (dp_tx_desc_pool_init(soc, i, num_desc)) {
  4001. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4002. FL("Tx Desc Pool init %d failed %pK"),
  4003. i, soc);
  4004. return QDF_STATUS_E_NOMEM;
  4005. }
  4006. }
  4007. return QDF_STATUS_SUCCESS;
  4008. }
  4009. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  4010. {
  4011. uint8_t i;
  4012. for (i = 0; i < num_pool; i++)
  4013. dp_tx_desc_pool_deinit(soc, i);
  4014. }
  4015. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  4016. {
  4017. uint8_t i;
  4018. for (i = 0; i < num_pool; i++)
  4019. dp_tx_desc_pool_free(soc, i);
  4020. }
  4021. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  4022. /**
  4023. * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
  4024. * @soc: core txrx main context
  4025. * @num_pool: number of pools
  4026. *
  4027. */
  4028. void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  4029. {
  4030. dp_tx_tso_desc_pool_deinit(soc, num_pool);
  4031. dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
  4032. }
  4033. /**
  4034. * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
  4035. * @soc: core txrx main context
  4036. * @num_pool: number of pools
  4037. *
  4038. */
  4039. void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  4040. {
  4041. dp_tx_tso_desc_pool_free(soc, num_pool);
  4042. dp_tx_tso_num_seg_pool_free(soc, num_pool);
  4043. }
  4044. /**
  4045. * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
  4046. * @soc: core txrx main context
  4047. *
  4048. * This function frees all tx related descriptors as below
  4049. * 1. Regular TX descriptors (static pools)
  4050. * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
  4051. * 3. TSO descriptors
  4052. *
  4053. */
  4054. void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
  4055. {
  4056. uint8_t num_pool;
  4057. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4058. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  4059. dp_tx_ext_desc_pool_free(soc, num_pool);
  4060. dp_tx_delete_static_pools(soc, num_pool);
  4061. }
  4062. /**
  4063. * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
  4064. * @soc: core txrx main context
  4065. *
  4066. * This function de-initializes all tx related descriptors as below
  4067. * 1. Regular TX descriptors (static pools)
  4068. * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
  4069. * 3. TSO descriptors
  4070. *
  4071. */
  4072. void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
  4073. {
  4074. uint8_t num_pool;
  4075. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4076. dp_tx_flow_control_deinit(soc);
  4077. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  4078. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  4079. dp_tx_deinit_static_pools(soc, num_pool);
  4080. }
  4081. /**
  4082. * dp_tso_attach() - TSO attach handler
  4083. * @txrx_soc: Opaque Dp handle
  4084. *
  4085. * Reserve TSO descriptor buffers
  4086. *
  4087. * Return: QDF_STATUS_E_FAILURE on failure or
  4088. * QDF_STATUS_SUCCESS on success
  4089. */
  4090. QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
  4091. uint8_t num_pool,
  4092. uint16_t num_desc)
  4093. {
  4094. if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
  4095. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  4096. return QDF_STATUS_E_FAILURE;
  4097. }
  4098. if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
  4099. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  4100. num_pool, soc);
  4101. return QDF_STATUS_E_FAILURE;
  4102. }
  4103. return QDF_STATUS_SUCCESS;
  4104. }
  4105. /**
  4106. * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
  4107. * @soc: DP soc handle
  4108. * @num_pool: Number of pools
  4109. * @num_desc: Number of descriptors
  4110. *
  4111. * Initialize TSO descriptor pools
  4112. *
  4113. * Return: QDF_STATUS_E_FAILURE on failure or
  4114. * QDF_STATUS_SUCCESS on success
  4115. */
  4116. QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
  4117. uint8_t num_pool,
  4118. uint16_t num_desc)
  4119. {
  4120. if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
  4121. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  4122. return QDF_STATUS_E_FAILURE;
  4123. }
  4124. if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
  4125. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  4126. num_pool, soc);
  4127. return QDF_STATUS_E_FAILURE;
  4128. }
  4129. return QDF_STATUS_SUCCESS;
  4130. }
  4131. /**
  4132. * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
  4133. * @soc: core txrx main context
  4134. *
  4135. * This function allocates memory for following descriptor pools
  4136. * 1. regular sw tx descriptor pools (static pools)
  4137. * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
  4138. * 3. TSO descriptor pools
  4139. *
  4140. * Return: QDF_STATUS_SUCCESS: success
  4141. * QDF_STATUS_E_RESOURCES: Error return
  4142. */
  4143. QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
  4144. {
  4145. uint8_t num_pool;
  4146. uint32_t num_desc;
  4147. uint32_t num_ext_desc;
  4148. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4149. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4150. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  4151. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  4152. "%s Tx Desc Alloc num_pool = %d, descs = %d",
  4153. __func__, num_pool, num_desc);
  4154. if ((num_pool > MAX_TXDESC_POOLS) ||
  4155. (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
  4156. goto fail1;
  4157. if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
  4158. goto fail1;
  4159. if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
  4160. goto fail2;
  4161. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  4162. return QDF_STATUS_SUCCESS;
  4163. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  4164. goto fail3;
  4165. return QDF_STATUS_SUCCESS;
  4166. fail3:
  4167. dp_tx_ext_desc_pool_free(soc, num_pool);
  4168. fail2:
  4169. dp_tx_delete_static_pools(soc, num_pool);
  4170. fail1:
  4171. return QDF_STATUS_E_RESOURCES;
  4172. }
  4173. /**
  4174. * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
  4175. * @soc: core txrx main context
  4176. *
  4177. * This function initializes the following TX descriptor pools
  4178. * 1. regular sw tx descriptor pools (static pools)
  4179. * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
  4180. * 3. TSO descriptor pools
  4181. *
  4182. * Return: QDF_STATUS_SUCCESS: success
  4183. * QDF_STATUS_E_RESOURCES: Error return
  4184. */
  4185. QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
  4186. {
  4187. uint8_t num_pool;
  4188. uint32_t num_desc;
  4189. uint32_t num_ext_desc;
  4190. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4191. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4192. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  4193. if (dp_tx_init_static_pools(soc, num_pool, num_desc))
  4194. goto fail1;
  4195. if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
  4196. goto fail2;
  4197. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  4198. return QDF_STATUS_SUCCESS;
  4199. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  4200. goto fail3;
  4201. dp_tx_flow_control_init(soc);
  4202. soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
  4203. return QDF_STATUS_SUCCESS;
  4204. fail3:
  4205. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  4206. fail2:
  4207. dp_tx_deinit_static_pools(soc, num_pool);
  4208. fail1:
  4209. return QDF_STATUS_E_RESOURCES;
  4210. }
  4211. /**
  4212. * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
  4213. * @txrx_soc: dp soc handle
  4214. *
  4215. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  4216. * QDF_STATUS_E_FAILURE
  4217. */
  4218. QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
  4219. {
  4220. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4221. uint8_t num_pool;
  4222. uint32_t num_desc;
  4223. uint32_t num_ext_desc;
  4224. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4225. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4226. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  4227. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  4228. return QDF_STATUS_E_FAILURE;
  4229. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  4230. return QDF_STATUS_E_FAILURE;
  4231. return QDF_STATUS_SUCCESS;
  4232. }
  4233. /**
  4234. * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
  4235. * @txrx_soc: dp soc handle
  4236. *
  4237. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  4238. */
  4239. QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
  4240. {
  4241. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4242. uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4243. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  4244. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  4245. return QDF_STATUS_SUCCESS;
  4246. }