dp_tx.c 138 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "htt.h"
  19. #include "dp_htt.h"
  20. #include "hal_hw_headers.h"
  21. #include "dp_tx.h"
  22. #include "dp_tx_desc.h"
  23. #include "dp_peer.h"
  24. #include "dp_types.h"
  25. #include "hal_tx.h"
  26. #include "qdf_mem.h"
  27. #include "qdf_nbuf.h"
  28. #include "qdf_net_types.h"
  29. #include <wlan_cfg.h>
  30. #include "dp_ipa.h"
  31. #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
  32. #include "if_meta_hdr.h"
  33. #endif
  34. #include "enet.h"
  35. #include "dp_internal.h"
  36. #ifdef FEATURE_WDS
  37. #include "dp_txrx_wds.h"
  38. #endif
  39. #ifdef ATH_SUPPORT_IQUE
  40. #include "dp_txrx_me.h"
  41. #endif
  42. #include "dp_hist.h"
  43. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  44. #include <dp_swlm.h>
  45. #endif
  46. /* Flag to skip CCE classify when mesh or tid override enabled */
  47. #define DP_TX_SKIP_CCE_CLASSIFY \
  48. (DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
  49. /* TODO Add support in TSO */
  50. #define DP_DESC_NUM_FRAG(x) 0
  51. /* disable TQM_BYPASS */
  52. #define TQM_BYPASS_WAR 0
  53. /* invalid peer id for reinject*/
  54. #define DP_INVALID_PEER 0XFFFE
  55. #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
  56. /**
  57. * dp_update_tx_desc_stats - Update the increase or decrease in
  58. * outstanding tx desc count
  59. * values on pdev and soc
  60. * @vdev: DP pdev handle
  61. *
  62. * Return: void
  63. */
  64. static inline void
  65. dp_update_tx_desc_stats(struct dp_pdev *pdev)
  66. {
  67. int32_t tx_descs_cnt =
  68. qdf_atomic_read(&pdev->num_tx_outstanding);
  69. if (pdev->tx_descs_max < tx_descs_cnt)
  70. pdev->tx_descs_max = tx_descs_cnt;
  71. qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
  72. pdev->tx_descs_max);
  73. }
  74. #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
  75. static inline void
  76. dp_update_tx_desc_stats(struct dp_pdev *pdev)
  77. {
  78. }
  79. #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
  80. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
  81. static inline
  82. void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  83. {
  84. qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
  85. QDF_DMA_TO_DEVICE,
  86. desc->nbuf->len);
  87. desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
  88. }
  89. static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  90. {
  91. if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
  92. qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
  93. QDF_DMA_TO_DEVICE,
  94. desc->nbuf->len);
  95. }
  96. #else
  97. static inline
  98. void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  99. {
  100. }
  101. static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  102. {
  103. qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
  104. QDF_DMA_TO_DEVICE, desc->nbuf->len);
  105. }
  106. #endif
  107. #ifdef QCA_TX_LIMIT_CHECK
  108. /**
  109. * dp_tx_limit_check - Check if allocated tx descriptors reached
  110. * soc max limit and pdev max limit
  111. * @vdev: DP vdev handle
  112. *
  113. * Return: true if allocated tx descriptors reached max configured value, else
  114. * false
  115. */
  116. static inline bool
  117. dp_tx_limit_check(struct dp_vdev *vdev)
  118. {
  119. struct dp_pdev *pdev = vdev->pdev;
  120. struct dp_soc *soc = pdev->soc;
  121. if (qdf_atomic_read(&soc->num_tx_outstanding) >=
  122. soc->num_tx_allowed) {
  123. dp_tx_info("queued packets are more than max tx, drop the frame");
  124. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  125. return true;
  126. }
  127. if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
  128. pdev->num_tx_allowed) {
  129. dp_tx_info("queued packets are more than max tx, drop the frame");
  130. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  131. DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_outstand.num, 1);
  132. return true;
  133. }
  134. return false;
  135. }
  136. /**
  137. * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
  138. * reached soc max limit
  139. * @vdev: DP vdev handle
  140. *
  141. * Return: true if allocated tx descriptors reached max configured value, else
  142. * false
  143. */
  144. static inline bool
  145. dp_tx_exception_limit_check(struct dp_vdev *vdev)
  146. {
  147. struct dp_pdev *pdev = vdev->pdev;
  148. struct dp_soc *soc = pdev->soc;
  149. if (qdf_atomic_read(&soc->num_tx_exception) >=
  150. soc->num_msdu_exception_desc) {
  151. dp_info("exc packets are more than max drop the exc pkt");
  152. DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
  153. return true;
  154. }
  155. return false;
  156. }
  157. /**
  158. * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
  159. * @vdev: DP pdev handle
  160. *
  161. * Return: void
  162. */
  163. static inline void
  164. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  165. {
  166. struct dp_soc *soc = pdev->soc;
  167. qdf_atomic_inc(&pdev->num_tx_outstanding);
  168. qdf_atomic_inc(&soc->num_tx_outstanding);
  169. dp_update_tx_desc_stats(pdev);
  170. }
  171. /**
  172. * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
  173. * @vdev: DP pdev handle
  174. *
  175. * Return: void
  176. */
  177. static inline void
  178. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  179. {
  180. struct dp_soc *soc = pdev->soc;
  181. qdf_atomic_dec(&pdev->num_tx_outstanding);
  182. qdf_atomic_dec(&soc->num_tx_outstanding);
  183. dp_update_tx_desc_stats(pdev);
  184. }
  185. #else //QCA_TX_LIMIT_CHECK
  186. static inline bool
  187. dp_tx_limit_check(struct dp_vdev *vdev)
  188. {
  189. return false;
  190. }
  191. static inline bool
  192. dp_tx_exception_limit_check(struct dp_vdev *vdev)
  193. {
  194. return false;
  195. }
  196. static inline void
  197. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  198. {
  199. qdf_atomic_inc(&pdev->num_tx_outstanding);
  200. dp_update_tx_desc_stats(pdev);
  201. }
  202. static inline void
  203. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  204. {
  205. qdf_atomic_dec(&pdev->num_tx_outstanding);
  206. dp_update_tx_desc_stats(pdev);
  207. }
  208. #endif //QCA_TX_LIMIT_CHECK
  209. #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
  210. static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
  211. {
  212. enum dp_tx_event_type type;
  213. if (flags & DP_TX_DESC_FLAG_FLUSH)
  214. type = DP_TX_DESC_FLUSH;
  215. else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
  216. type = DP_TX_COMP_UNMAP_ERR;
  217. else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
  218. type = DP_TX_COMP_UNMAP;
  219. else
  220. type = DP_TX_DESC_UNMAP;
  221. return type;
  222. }
  223. static inline void
  224. dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
  225. qdf_nbuf_t skb, uint32_t sw_cookie,
  226. enum dp_tx_event_type type)
  227. {
  228. struct dp_tx_desc_event *entry;
  229. uint32_t idx;
  230. if (qdf_unlikely(!soc->tx_tcl_history || !soc->tx_comp_history))
  231. return;
  232. switch (type) {
  233. case DP_TX_COMP_UNMAP:
  234. case DP_TX_COMP_UNMAP_ERR:
  235. case DP_TX_COMP_MSDU_EXT:
  236. idx = dp_history_get_next_index(&soc->tx_comp_history->index,
  237. DP_TX_COMP_HISTORY_SIZE);
  238. entry = &soc->tx_comp_history->entry[idx];
  239. break;
  240. case DP_TX_DESC_MAP:
  241. case DP_TX_DESC_UNMAP:
  242. case DP_TX_DESC_COOKIE:
  243. case DP_TX_DESC_FLUSH:
  244. idx = dp_history_get_next_index(&soc->tx_tcl_history->index,
  245. DP_TX_TCL_HISTORY_SIZE);
  246. entry = &soc->tx_tcl_history->entry[idx];
  247. break;
  248. default:
  249. dp_info_rl("Invalid dp_tx_event_type: %d", type);
  250. return;
  251. }
  252. entry->skb = skb;
  253. entry->paddr = paddr;
  254. entry->sw_cookie = sw_cookie;
  255. entry->type = type;
  256. entry->ts = qdf_get_log_timestamp();
  257. }
  258. static inline void
  259. dp_tx_tso_seg_history_add(struct dp_soc *soc,
  260. struct qdf_tso_seg_elem_t *tso_seg,
  261. qdf_nbuf_t skb, uint32_t sw_cookie,
  262. enum dp_tx_event_type type)
  263. {
  264. int i;
  265. for (i = 1; i < tso_seg->seg.num_frags; i++) {
  266. dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
  267. skb, sw_cookie, type);
  268. }
  269. if (!tso_seg->next)
  270. dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
  271. skb, 0xFFFFFFFF, type);
  272. }
  273. static inline void
  274. dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
  275. qdf_nbuf_t skb, uint32_t sw_cookie,
  276. enum dp_tx_event_type type)
  277. {
  278. struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
  279. uint32_t num_segs = tso_info.num_segs;
  280. while (num_segs) {
  281. dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
  282. curr_seg = curr_seg->next;
  283. num_segs--;
  284. }
  285. }
  286. #else
  287. static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
  288. {
  289. return DP_TX_DESC_INVAL_EVT;
  290. }
  291. static inline void
  292. dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
  293. qdf_nbuf_t skb, uint32_t sw_cookie,
  294. enum dp_tx_event_type type)
  295. {
  296. }
  297. static inline void
  298. dp_tx_tso_seg_history_add(struct dp_soc *soc,
  299. struct qdf_tso_seg_elem_t *tso_seg,
  300. qdf_nbuf_t skb, uint32_t sw_cookie,
  301. enum dp_tx_event_type type)
  302. {
  303. }
  304. static inline void
  305. dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
  306. qdf_nbuf_t skb, uint32_t sw_cookie,
  307. enum dp_tx_event_type type)
  308. {
  309. }
  310. #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
  311. #if defined(FEATURE_TSO)
  312. /**
  313. * dp_tx_tso_unmap_segment() - Unmap TSO segment
  314. *
  315. * @soc - core txrx main context
  316. * @seg_desc - tso segment descriptor
  317. * @num_seg_desc - tso number segment descriptor
  318. */
  319. static void dp_tx_tso_unmap_segment(
  320. struct dp_soc *soc,
  321. struct qdf_tso_seg_elem_t *seg_desc,
  322. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  323. {
  324. TSO_DEBUG("%s: Unmap the tso segment", __func__);
  325. if (qdf_unlikely(!seg_desc)) {
  326. DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
  327. __func__, __LINE__);
  328. qdf_assert(0);
  329. } else if (qdf_unlikely(!num_seg_desc)) {
  330. DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
  331. __func__, __LINE__);
  332. qdf_assert(0);
  333. } else {
  334. bool is_last_seg;
  335. /* no tso segment left to do dma unmap */
  336. if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
  337. return;
  338. is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
  339. true : false;
  340. qdf_nbuf_unmap_tso_segment(soc->osdev,
  341. seg_desc, is_last_seg);
  342. num_seg_desc->num_seg.tso_cmn_num_seg--;
  343. }
  344. }
  345. /**
  346. * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
  347. * back to the freelist
  348. *
  349. * @soc - soc device handle
  350. * @tx_desc - Tx software descriptor
  351. */
  352. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  353. struct dp_tx_desc_s *tx_desc)
  354. {
  355. TSO_DEBUG("%s: Free the tso descriptor", __func__);
  356. if (qdf_unlikely(!tx_desc->tso_desc)) {
  357. dp_tx_err("SO desc is NULL!");
  358. qdf_assert(0);
  359. } else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
  360. dp_tx_err("TSO num desc is NULL!");
  361. qdf_assert(0);
  362. } else {
  363. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  364. (struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
  365. /* Add the tso num segment into the free list */
  366. if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
  367. dp_tso_num_seg_free(soc, tx_desc->pool_id,
  368. tx_desc->tso_num_desc);
  369. tx_desc->tso_num_desc = NULL;
  370. DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
  371. }
  372. /* Add the tso segment into the free list*/
  373. dp_tx_tso_desc_free(soc,
  374. tx_desc->pool_id, tx_desc->tso_desc);
  375. tx_desc->tso_desc = NULL;
  376. }
  377. }
  378. #else
  379. static void dp_tx_tso_unmap_segment(
  380. struct dp_soc *soc,
  381. struct qdf_tso_seg_elem_t *seg_desc,
  382. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  383. {
  384. }
  385. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  386. struct dp_tx_desc_s *tx_desc)
  387. {
  388. }
  389. #endif
  390. /**
  391. * dp_tx_desc_release() - Release Tx Descriptor
  392. * @tx_desc : Tx Descriptor
  393. * @desc_pool_id: Descriptor Pool ID
  394. *
  395. * Deallocate all resources attached to Tx descriptor and free the Tx
  396. * descriptor.
  397. *
  398. * Return:
  399. */
  400. static void
  401. dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
  402. {
  403. struct dp_pdev *pdev = tx_desc->pdev;
  404. struct dp_soc *soc;
  405. uint8_t comp_status = 0;
  406. qdf_assert(pdev);
  407. soc = pdev->soc;
  408. dp_tx_outstanding_dec(pdev);
  409. if (tx_desc->frm_type == dp_tx_frm_tso)
  410. dp_tx_tso_desc_release(soc, tx_desc);
  411. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
  412. dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
  413. if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
  414. dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
  415. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  416. qdf_atomic_dec(&soc->num_tx_exception);
  417. if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
  418. tx_desc->buffer_src)
  419. comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
  420. soc->hal_soc);
  421. else
  422. comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
  423. dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
  424. tx_desc->id, comp_status,
  425. qdf_atomic_read(&pdev->num_tx_outstanding));
  426. dp_tx_desc_free(soc, tx_desc, desc_pool_id);
  427. return;
  428. }
  429. /**
  430. * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
  431. * @vdev: DP vdev Handle
  432. * @nbuf: skb
  433. * @msdu_info: msdu_info required to create HTT metadata
  434. *
  435. * Prepares and fills HTT metadata in the frame pre-header for special frames
  436. * that should be transmitted using varying transmit parameters.
  437. * There are 2 VDEV modes that currently needs this special metadata -
  438. * 1) Mesh Mode
  439. * 2) DSRC Mode
  440. *
  441. * Return: HTT metadata size
  442. *
  443. */
  444. static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  445. struct dp_tx_msdu_info_s *msdu_info)
  446. {
  447. uint32_t *meta_data = msdu_info->meta_data;
  448. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  449. (struct htt_tx_msdu_desc_ext2_t *) meta_data;
  450. uint8_t htt_desc_size;
  451. /* Size rounded of multiple of 8 bytes */
  452. uint8_t htt_desc_size_aligned;
  453. uint8_t *hdr = NULL;
  454. /*
  455. * Metadata - HTT MSDU Extension header
  456. */
  457. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  458. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  459. if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
  460. HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
  461. meta_data[0])) {
  462. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
  463. htt_desc_size_aligned)) {
  464. nbuf = qdf_nbuf_realloc_headroom(nbuf,
  465. htt_desc_size_aligned);
  466. if (!nbuf) {
  467. /*
  468. * qdf_nbuf_realloc_headroom won't do skb_clone
  469. * as skb_realloc_headroom does. so, no free is
  470. * needed here.
  471. */
  472. DP_STATS_INC(vdev,
  473. tx_i.dropped.headroom_insufficient,
  474. 1);
  475. qdf_print(" %s[%d] skb_realloc_headroom failed",
  476. __func__, __LINE__);
  477. return 0;
  478. }
  479. }
  480. /* Fill and add HTT metaheader */
  481. hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
  482. if (!hdr) {
  483. dp_tx_err("Error in filling HTT metadata");
  484. return 0;
  485. }
  486. qdf_mem_copy(hdr, desc_ext, htt_desc_size);
  487. } else if (vdev->opmode == wlan_op_mode_ocb) {
  488. /* Todo - Add support for DSRC */
  489. }
  490. return htt_desc_size_aligned;
  491. }
  492. /**
  493. * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
  494. * @tso_seg: TSO segment to process
  495. * @ext_desc: Pointer to MSDU extension descriptor
  496. *
  497. * Return: void
  498. */
  499. #if defined(FEATURE_TSO)
  500. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  501. void *ext_desc)
  502. {
  503. uint8_t num_frag;
  504. uint32_t tso_flags;
  505. /*
  506. * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
  507. * tcp_flag_mask
  508. *
  509. * Checksum enable flags are set in TCL descriptor and not in Extension
  510. * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
  511. */
  512. tso_flags = *(uint32_t *) &tso_seg->tso_flags;
  513. hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
  514. hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
  515. tso_seg->tso_flags.ip_len);
  516. hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
  517. hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
  518. for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
  519. uint32_t lo = 0;
  520. uint32_t hi = 0;
  521. qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
  522. (tso_seg->tso_frags[num_frag].length));
  523. qdf_dmaaddr_to_32s(
  524. tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
  525. hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
  526. tso_seg->tso_frags[num_frag].length);
  527. }
  528. return;
  529. }
  530. #else
  531. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  532. void *ext_desc)
  533. {
  534. return;
  535. }
  536. #endif
  537. #if defined(FEATURE_TSO)
  538. /**
  539. * dp_tx_free_tso_seg_list() - Loop through the tso segments
  540. * allocated and free them
  541. *
  542. * @soc: soc handle
  543. * @free_seg: list of tso segments
  544. * @msdu_info: msdu descriptor
  545. *
  546. * Return - void
  547. */
  548. static void dp_tx_free_tso_seg_list(
  549. struct dp_soc *soc,
  550. struct qdf_tso_seg_elem_t *free_seg,
  551. struct dp_tx_msdu_info_s *msdu_info)
  552. {
  553. struct qdf_tso_seg_elem_t *next_seg;
  554. while (free_seg) {
  555. next_seg = free_seg->next;
  556. dp_tx_tso_desc_free(soc,
  557. msdu_info->tx_queue.desc_pool_id,
  558. free_seg);
  559. free_seg = next_seg;
  560. }
  561. }
  562. /**
  563. * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
  564. * allocated and free them
  565. *
  566. * @soc: soc handle
  567. * @free_num_seg: list of tso number segments
  568. * @msdu_info: msdu descriptor
  569. * Return - void
  570. */
  571. static void dp_tx_free_tso_num_seg_list(
  572. struct dp_soc *soc,
  573. struct qdf_tso_num_seg_elem_t *free_num_seg,
  574. struct dp_tx_msdu_info_s *msdu_info)
  575. {
  576. struct qdf_tso_num_seg_elem_t *next_num_seg;
  577. while (free_num_seg) {
  578. next_num_seg = free_num_seg->next;
  579. dp_tso_num_seg_free(soc,
  580. msdu_info->tx_queue.desc_pool_id,
  581. free_num_seg);
  582. free_num_seg = next_num_seg;
  583. }
  584. }
  585. /**
  586. * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
  587. * do dma unmap for each segment
  588. *
  589. * @soc: soc handle
  590. * @free_seg: list of tso segments
  591. * @num_seg_desc: tso number segment descriptor
  592. *
  593. * Return - void
  594. */
  595. static void dp_tx_unmap_tso_seg_list(
  596. struct dp_soc *soc,
  597. struct qdf_tso_seg_elem_t *free_seg,
  598. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  599. {
  600. struct qdf_tso_seg_elem_t *next_seg;
  601. if (qdf_unlikely(!num_seg_desc)) {
  602. DP_TRACE(ERROR, "TSO number seg desc is NULL!");
  603. return;
  604. }
  605. while (free_seg) {
  606. next_seg = free_seg->next;
  607. dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
  608. free_seg = next_seg;
  609. }
  610. }
  611. #ifdef FEATURE_TSO_STATS
  612. /**
  613. * dp_tso_get_stats_idx: Retrieve the tso packet id
  614. * @pdev - pdev handle
  615. *
  616. * Return: id
  617. */
  618. static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
  619. {
  620. uint32_t stats_idx;
  621. stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
  622. % CDP_MAX_TSO_PACKETS);
  623. return stats_idx;
  624. }
  625. #else
  626. static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
  627. {
  628. return 0;
  629. }
  630. #endif /* FEATURE_TSO_STATS */
  631. /**
  632. * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
  633. * free the tso segments descriptor and
  634. * tso num segments descriptor
  635. *
  636. * @soc: soc handle
  637. * @msdu_info: msdu descriptor
  638. * @tso_seg_unmap: flag to show if dma unmap is necessary
  639. *
  640. * Return - void
  641. */
  642. static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
  643. struct dp_tx_msdu_info_s *msdu_info,
  644. bool tso_seg_unmap)
  645. {
  646. struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
  647. struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
  648. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  649. tso_info->tso_num_seg_list;
  650. /* do dma unmap for each segment */
  651. if (tso_seg_unmap)
  652. dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
  653. /* free all tso number segment descriptor though looks only have 1 */
  654. dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
  655. /* free all tso segment descriptor */
  656. dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
  657. }
  658. /**
  659. * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
  660. * @vdev: virtual device handle
  661. * @msdu: network buffer
  662. * @msdu_info: meta data associated with the msdu
  663. *
  664. * Return: QDF_STATUS_SUCCESS success
  665. */
  666. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  667. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  668. {
  669. struct qdf_tso_seg_elem_t *tso_seg;
  670. int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
  671. struct dp_soc *soc = vdev->pdev->soc;
  672. struct dp_pdev *pdev = vdev->pdev;
  673. struct qdf_tso_info_t *tso_info;
  674. struct qdf_tso_num_seg_elem_t *tso_num_seg;
  675. tso_info = &msdu_info->u.tso_info;
  676. tso_info->curr_seg = NULL;
  677. tso_info->tso_seg_list = NULL;
  678. tso_info->num_segs = num_seg;
  679. msdu_info->frm_type = dp_tx_frm_tso;
  680. tso_info->tso_num_seg_list = NULL;
  681. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  682. while (num_seg) {
  683. tso_seg = dp_tx_tso_desc_alloc(
  684. soc, msdu_info->tx_queue.desc_pool_id);
  685. if (tso_seg) {
  686. tso_seg->next = tso_info->tso_seg_list;
  687. tso_info->tso_seg_list = tso_seg;
  688. num_seg--;
  689. } else {
  690. dp_err_rl("Failed to alloc tso seg desc");
  691. DP_STATS_INC_PKT(vdev->pdev,
  692. tso_stats.tso_no_mem_dropped, 1,
  693. qdf_nbuf_len(msdu));
  694. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  695. return QDF_STATUS_E_NOMEM;
  696. }
  697. }
  698. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  699. tso_num_seg = dp_tso_num_seg_alloc(soc,
  700. msdu_info->tx_queue.desc_pool_id);
  701. if (tso_num_seg) {
  702. tso_num_seg->next = tso_info->tso_num_seg_list;
  703. tso_info->tso_num_seg_list = tso_num_seg;
  704. } else {
  705. DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
  706. __func__);
  707. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  708. return QDF_STATUS_E_NOMEM;
  709. }
  710. msdu_info->num_seg =
  711. qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
  712. TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
  713. msdu_info->num_seg);
  714. if (!(msdu_info->num_seg)) {
  715. /*
  716. * Free allocated TSO seg desc and number seg desc,
  717. * do unmap for segments if dma map has done.
  718. */
  719. DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
  720. dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
  721. return QDF_STATUS_E_INVAL;
  722. }
  723. dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
  724. msdu, 0, DP_TX_DESC_MAP);
  725. tso_info->curr_seg = tso_info->tso_seg_list;
  726. tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
  727. dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
  728. msdu, msdu_info->num_seg);
  729. dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
  730. tso_info->msdu_stats_idx);
  731. dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
  732. return QDF_STATUS_SUCCESS;
  733. }
  734. #else
  735. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  736. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  737. {
  738. return QDF_STATUS_E_NOMEM;
  739. }
  740. #endif
  741. QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
  742. (DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
  743. sizeof(struct htt_tx_msdu_desc_ext2_t)));
  744. /**
  745. * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
  746. * @vdev: DP Vdev handle
  747. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  748. * @desc_pool_id: Descriptor Pool ID
  749. *
  750. * Return:
  751. */
  752. static
  753. struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
  754. struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
  755. {
  756. uint8_t i;
  757. uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
  758. struct dp_tx_seg_info_s *seg_info;
  759. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  760. struct dp_soc *soc = vdev->pdev->soc;
  761. /* Allocate an extension descriptor */
  762. msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
  763. qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
  764. if (!msdu_ext_desc) {
  765. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  766. return NULL;
  767. }
  768. if (msdu_info->exception_fw &&
  769. qdf_unlikely(vdev->mesh_vdev)) {
  770. qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
  771. &msdu_info->meta_data[0],
  772. sizeof(struct htt_tx_msdu_desc_ext2_t));
  773. qdf_atomic_inc(&soc->num_tx_exception);
  774. msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
  775. }
  776. switch (msdu_info->frm_type) {
  777. case dp_tx_frm_sg:
  778. case dp_tx_frm_me:
  779. case dp_tx_frm_raw:
  780. seg_info = msdu_info->u.sg_info.curr_seg;
  781. /* Update the buffer pointers in MSDU Extension Descriptor */
  782. for (i = 0; i < seg_info->frag_cnt; i++) {
  783. hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
  784. seg_info->frags[i].paddr_lo,
  785. seg_info->frags[i].paddr_hi,
  786. seg_info->frags[i].len);
  787. }
  788. break;
  789. case dp_tx_frm_tso:
  790. dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
  791. &cached_ext_desc[0]);
  792. break;
  793. default:
  794. break;
  795. }
  796. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  797. cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
  798. hal_tx_ext_desc_sync(&cached_ext_desc[0],
  799. msdu_ext_desc->vaddr);
  800. return msdu_ext_desc;
  801. }
  802. /**
  803. * dp_tx_trace_pkt() - Trace TX packet at DP layer
  804. *
  805. * @skb: skb to be traced
  806. * @msdu_id: msdu_id of the packet
  807. * @vdev_id: vdev_id of the packet
  808. *
  809. * Return: None
  810. */
  811. #ifdef DP_DISABLE_TX_PKT_TRACE
  812. static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
  813. uint8_t vdev_id)
  814. {
  815. }
  816. #else
  817. static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
  818. uint8_t vdev_id)
  819. {
  820. QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
  821. QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
  822. DPTRACE(qdf_dp_trace_ptr(skb,
  823. QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
  824. QDF_TRACE_DEFAULT_PDEV_ID,
  825. qdf_nbuf_data_addr(skb),
  826. sizeof(qdf_nbuf_data(skb)),
  827. msdu_id, vdev_id, 0));
  828. qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
  829. DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
  830. QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
  831. msdu_id, QDF_TX));
  832. }
  833. #endif
  834. #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
  835. /**
  836. * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
  837. * exception by the upper layer (OS_IF)
  838. * @soc: DP soc handle
  839. * @nbuf: packet to be transmitted
  840. *
  841. * Returns: 1 if the packet is marked as exception,
  842. * 0, if the packet is not marked as exception.
  843. */
  844. static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
  845. qdf_nbuf_t nbuf)
  846. {
  847. return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
  848. }
  849. #else
  850. static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
  851. qdf_nbuf_t nbuf)
  852. {
  853. return 0;
  854. }
  855. #endif
  856. /**
  857. * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
  858. * @vdev: DP vdev handle
  859. * @nbuf: skb
  860. * @desc_pool_id: Descriptor pool ID
  861. * @meta_data: Metadata to the fw
  862. * @tx_exc_metadata: Handle that holds exception path metadata
  863. * Allocate and prepare Tx descriptor with msdu information.
  864. *
  865. * Return: Pointer to Tx Descriptor on success,
  866. * NULL on failure
  867. */
  868. static
  869. struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
  870. qdf_nbuf_t nbuf, uint8_t desc_pool_id,
  871. struct dp_tx_msdu_info_s *msdu_info,
  872. struct cdp_tx_exception_metadata *tx_exc_metadata)
  873. {
  874. uint8_t align_pad;
  875. uint8_t is_exception = 0;
  876. uint8_t htt_hdr_size;
  877. struct dp_tx_desc_s *tx_desc;
  878. struct dp_pdev *pdev = vdev->pdev;
  879. struct dp_soc *soc = pdev->soc;
  880. if (dp_tx_limit_check(vdev))
  881. return NULL;
  882. /* Allocate software Tx descriptor */
  883. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  884. if (qdf_unlikely(!tx_desc)) {
  885. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  886. DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
  887. return NULL;
  888. }
  889. dp_tx_outstanding_inc(pdev);
  890. /* Initialize the SW tx descriptor */
  891. tx_desc->nbuf = nbuf;
  892. tx_desc->frm_type = dp_tx_frm_std;
  893. tx_desc->tx_encap_type = ((tx_exc_metadata &&
  894. (tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
  895. tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
  896. tx_desc->vdev_id = vdev->vdev_id;
  897. tx_desc->pdev = pdev;
  898. tx_desc->msdu_ext_desc = NULL;
  899. tx_desc->pkt_offset = 0;
  900. tx_desc->length = qdf_nbuf_headlen(nbuf);
  901. dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
  902. if (qdf_unlikely(vdev->multipass_en)) {
  903. if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
  904. goto failure;
  905. }
  906. /* Packets marked by upper layer (OS-IF) to be sent to FW */
  907. if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
  908. is_exception = 1;
  909. /*
  910. * For special modes (vdev_type == ocb or mesh), data frames should be
  911. * transmitted using varying transmit parameters (tx spec) which include
  912. * transmit rate, power, priority, channel, channel bandwidth , nss etc.
  913. * These are filled in HTT MSDU descriptor and sent in frame pre-header.
  914. * These frames are sent as exception packets to firmware.
  915. *
  916. * HW requirement is that metadata should always point to a
  917. * 8-byte aligned address. So we add alignment pad to start of buffer.
  918. * HTT Metadata should be ensured to be multiple of 8-bytes,
  919. * to get 8-byte aligned start address along with align_pad added
  920. *
  921. * |-----------------------------|
  922. * | |
  923. * |-----------------------------| <-----Buffer Pointer Address given
  924. * | | ^ in HW descriptor (aligned)
  925. * | HTT Metadata | |
  926. * | | |
  927. * | | | Packet Offset given in descriptor
  928. * | | |
  929. * |-----------------------------| |
  930. * | Alignment Pad | v
  931. * |-----------------------------| <----- Actual buffer start address
  932. * | SKB Data | (Unaligned)
  933. * | |
  934. * | |
  935. * | |
  936. * | |
  937. * | |
  938. * |-----------------------------|
  939. */
  940. if (qdf_unlikely((msdu_info->exception_fw)) ||
  941. (vdev->opmode == wlan_op_mode_ocb) ||
  942. (tx_exc_metadata &&
  943. tx_exc_metadata->is_tx_sniffer)) {
  944. align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
  945. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
  946. DP_STATS_INC(vdev,
  947. tx_i.dropped.headroom_insufficient, 1);
  948. goto failure;
  949. }
  950. if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
  951. dp_tx_err("qdf_nbuf_push_head failed");
  952. goto failure;
  953. }
  954. htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
  955. msdu_info);
  956. if (htt_hdr_size == 0)
  957. goto failure;
  958. tx_desc->length = qdf_nbuf_headlen(nbuf);
  959. tx_desc->pkt_offset = align_pad + htt_hdr_size;
  960. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  961. is_exception = 1;
  962. tx_desc->length -= tx_desc->pkt_offset;
  963. }
  964. #if !TQM_BYPASS_WAR
  965. if (is_exception || tx_exc_metadata)
  966. #endif
  967. {
  968. /* Temporary WAR due to TQM VP issues */
  969. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  970. qdf_atomic_inc(&soc->num_tx_exception);
  971. }
  972. return tx_desc;
  973. failure:
  974. dp_tx_desc_release(tx_desc, desc_pool_id);
  975. return NULL;
  976. }
  977. /**
  978. * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
  979. * @vdev: DP vdev handle
  980. * @nbuf: skb
  981. * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
  982. * @desc_pool_id : Descriptor Pool ID
  983. *
  984. * Allocate and prepare Tx descriptor with msdu and fragment descritor
  985. * information. For frames wth fragments, allocate and prepare
  986. * an MSDU extension descriptor
  987. *
  988. * Return: Pointer to Tx Descriptor on success,
  989. * NULL on failure
  990. */
  991. static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
  992. qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
  993. uint8_t desc_pool_id)
  994. {
  995. struct dp_tx_desc_s *tx_desc;
  996. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  997. struct dp_pdev *pdev = vdev->pdev;
  998. struct dp_soc *soc = pdev->soc;
  999. if (dp_tx_limit_check(vdev))
  1000. return NULL;
  1001. /* Allocate software Tx descriptor */
  1002. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  1003. if (!tx_desc) {
  1004. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1005. return NULL;
  1006. }
  1007. dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
  1008. nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
  1009. dp_tx_outstanding_inc(pdev);
  1010. /* Initialize the SW tx descriptor */
  1011. tx_desc->nbuf = nbuf;
  1012. tx_desc->frm_type = msdu_info->frm_type;
  1013. tx_desc->tx_encap_type = vdev->tx_encap_type;
  1014. tx_desc->vdev_id = vdev->vdev_id;
  1015. tx_desc->pdev = pdev;
  1016. tx_desc->pkt_offset = 0;
  1017. tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
  1018. tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
  1019. dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
  1020. /* Handle scattered frames - TSO/SG/ME */
  1021. /* Allocate and prepare an extension descriptor for scattered frames */
  1022. msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
  1023. if (!msdu_ext_desc) {
  1024. dp_tx_info("Tx Extension Descriptor Alloc Fail");
  1025. goto failure;
  1026. }
  1027. #if TQM_BYPASS_WAR
  1028. /* Temporary WAR due to TQM VP issues */
  1029. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1030. qdf_atomic_inc(&soc->num_tx_exception);
  1031. #endif
  1032. if (qdf_unlikely(msdu_info->exception_fw))
  1033. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1034. tx_desc->msdu_ext_desc = msdu_ext_desc;
  1035. tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
  1036. tx_desc->dma_addr = msdu_ext_desc->paddr;
  1037. if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
  1038. tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
  1039. else
  1040. tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
  1041. return tx_desc;
  1042. failure:
  1043. dp_tx_desc_release(tx_desc, desc_pool_id);
  1044. return NULL;
  1045. }
  1046. /**
  1047. * dp_tx_prepare_raw() - Prepare RAW packet TX
  1048. * @vdev: DP vdev handle
  1049. * @nbuf: buffer pointer
  1050. * @seg_info: Pointer to Segment info Descriptor to be prepared
  1051. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
  1052. * descriptor
  1053. *
  1054. * Return:
  1055. */
  1056. static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1057. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  1058. {
  1059. qdf_nbuf_t curr_nbuf = NULL;
  1060. uint16_t total_len = 0;
  1061. qdf_dma_addr_t paddr;
  1062. int32_t i;
  1063. int32_t mapped_buf_num = 0;
  1064. struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
  1065. qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  1066. DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
  1067. /* Continue only if frames are of DATA type */
  1068. if (!DP_FRAME_IS_DATA(qos_wh)) {
  1069. DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
  1070. dp_tx_debug("Pkt. recd is of not data type");
  1071. goto error;
  1072. }
  1073. /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
  1074. if (vdev->raw_mode_war &&
  1075. (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
  1076. (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
  1077. qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
  1078. for (curr_nbuf = nbuf, i = 0; curr_nbuf;
  1079. curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
  1080. /*
  1081. * Number of nbuf's must not exceed the size of the frags
  1082. * array in seg_info.
  1083. */
  1084. if (i >= DP_TX_MAX_NUM_FRAGS) {
  1085. dp_err_rl("nbuf cnt exceeds the max number of segs");
  1086. DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
  1087. goto error;
  1088. }
  1089. if (QDF_STATUS_SUCCESS !=
  1090. qdf_nbuf_map_nbytes_single(vdev->osdev,
  1091. curr_nbuf,
  1092. QDF_DMA_TO_DEVICE,
  1093. curr_nbuf->len)) {
  1094. dp_tx_err("%s dma map error ", __func__);
  1095. DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
  1096. goto error;
  1097. }
  1098. /* Update the count of mapped nbuf's */
  1099. mapped_buf_num++;
  1100. paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
  1101. seg_info->frags[i].paddr_lo = paddr;
  1102. seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
  1103. seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
  1104. seg_info->frags[i].vaddr = (void *) curr_nbuf;
  1105. total_len += qdf_nbuf_len(curr_nbuf);
  1106. }
  1107. seg_info->frag_cnt = i;
  1108. seg_info->total_len = total_len;
  1109. seg_info->next = NULL;
  1110. sg_info->curr_seg = seg_info;
  1111. msdu_info->frm_type = dp_tx_frm_raw;
  1112. msdu_info->num_seg = 1;
  1113. return nbuf;
  1114. error:
  1115. i = 0;
  1116. while (nbuf) {
  1117. curr_nbuf = nbuf;
  1118. if (i < mapped_buf_num) {
  1119. qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
  1120. QDF_DMA_TO_DEVICE,
  1121. curr_nbuf->len);
  1122. i++;
  1123. }
  1124. nbuf = qdf_nbuf_next(nbuf);
  1125. qdf_nbuf_free(curr_nbuf);
  1126. }
  1127. return NULL;
  1128. }
  1129. /**
  1130. * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
  1131. * @soc: DP soc handle
  1132. * @nbuf: Buffer pointer
  1133. *
  1134. * unmap the chain of nbufs that belong to this RAW frame.
  1135. *
  1136. * Return: None
  1137. */
  1138. static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
  1139. qdf_nbuf_t nbuf)
  1140. {
  1141. qdf_nbuf_t cur_nbuf = nbuf;
  1142. do {
  1143. qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
  1144. QDF_DMA_TO_DEVICE,
  1145. cur_nbuf->len);
  1146. cur_nbuf = qdf_nbuf_next(cur_nbuf);
  1147. } while (cur_nbuf);
  1148. }
  1149. #ifdef VDEV_PEER_PROTOCOL_COUNT
  1150. void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
  1151. qdf_nbuf_t nbuf)
  1152. {
  1153. qdf_nbuf_t nbuf_local;
  1154. struct dp_vdev *vdev_local = vdev_hdl;
  1155. do {
  1156. if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
  1157. break;
  1158. nbuf_local = nbuf;
  1159. if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
  1160. htt_cmn_pkt_type_raw))
  1161. break;
  1162. else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
  1163. break;
  1164. else if (qdf_nbuf_is_tso((nbuf_local)))
  1165. break;
  1166. dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
  1167. (nbuf_local),
  1168. NULL, 1, 0);
  1169. } while (0);
  1170. }
  1171. #endif
  1172. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  1173. /**
  1174. * dp_tx_update_stats() - Update soc level tx stats
  1175. * @soc: DP soc handle
  1176. * @nbuf: packet being transmitted
  1177. *
  1178. * Returns: none
  1179. */
  1180. void dp_tx_update_stats(struct dp_soc *soc,
  1181. qdf_nbuf_t nbuf)
  1182. {
  1183. DP_STATS_INC_PKT(soc, tx.egress, 1, qdf_nbuf_len(nbuf));
  1184. }
  1185. int
  1186. dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
  1187. struct dp_tx_desc_s *tx_desc,
  1188. uint8_t tid)
  1189. {
  1190. struct dp_swlm *swlm = &soc->swlm;
  1191. union swlm_data swlm_query_data;
  1192. struct dp_swlm_tcl_data tcl_data;
  1193. QDF_STATUS status;
  1194. int ret;
  1195. if (qdf_unlikely(!swlm->is_enabled))
  1196. return 0;
  1197. tcl_data.nbuf = tx_desc->nbuf;
  1198. tcl_data.tid = tid;
  1199. tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
  1200. swlm_query_data.tcl_data = &tcl_data;
  1201. status = dp_swlm_tcl_pre_check(soc, &tcl_data);
  1202. if (QDF_IS_STATUS_ERROR(status)) {
  1203. dp_swlm_tcl_reset_session_data(soc);
  1204. DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
  1205. return 0;
  1206. }
  1207. ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
  1208. if (ret) {
  1209. DP_STATS_INC(swlm, tcl.coalesce_success, 1);
  1210. } else {
  1211. DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
  1212. }
  1213. return ret;
  1214. }
  1215. void
  1216. dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
  1217. int coalesce)
  1218. {
  1219. if (coalesce)
  1220. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1221. else
  1222. dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
  1223. }
  1224. #endif
  1225. #ifdef FEATURE_RUNTIME_PM
  1226. /**
  1227. * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
  1228. * @soc: Datapath soc handle
  1229. * @hal_ring_hdl: HAL ring handle
  1230. * @coalesce: Coalesce the current write or not
  1231. *
  1232. * Wrapper for HAL ring access end for data transmission for
  1233. * FEATURE_RUNTIME_PM
  1234. *
  1235. * Returns: none
  1236. */
  1237. void
  1238. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  1239. hal_ring_handle_t hal_ring_hdl,
  1240. int coalesce)
  1241. {
  1242. int ret;
  1243. ret = hif_pm_runtime_get(soc->hif_handle,
  1244. RTPM_ID_DW_TX_HW_ENQUEUE, true);
  1245. switch (ret) {
  1246. case 0:
  1247. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1248. hif_pm_runtime_put(soc->hif_handle,
  1249. RTPM_ID_DW_TX_HW_ENQUEUE);
  1250. break;
  1251. /*
  1252. * If hif_pm_runtime_get returns -EBUSY or -EINPROGRESS,
  1253. * take the dp runtime refcount using dp_runtime_get,
  1254. * check link state,if up, write TX ring HP, else just set flush event.
  1255. * In dp_runtime_resume, wait until dp runtime refcount becomes
  1256. * zero or time out, then flush pending tx.
  1257. */
  1258. case -EBUSY:
  1259. case -EINPROGRESS:
  1260. dp_runtime_get(soc);
  1261. if (hif_pm_get_link_state(soc->hif_handle) ==
  1262. HIF_PM_LINK_STATE_UP) {
  1263. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1264. } else {
  1265. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1266. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1267. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1268. }
  1269. dp_runtime_put(soc);
  1270. break;
  1271. default:
  1272. dp_runtime_get(soc);
  1273. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1274. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1275. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1276. dp_runtime_put(soc);
  1277. }
  1278. }
  1279. #endif
  1280. /**
  1281. * dp_cce_classify() - Classify the frame based on CCE rules
  1282. * @vdev: DP vdev handle
  1283. * @nbuf: skb
  1284. *
  1285. * Classify frames based on CCE rules
  1286. * Return: bool( true if classified,
  1287. * else false)
  1288. */
  1289. static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1290. {
  1291. qdf_ether_header_t *eh = NULL;
  1292. uint16_t ether_type;
  1293. qdf_llc_t *llcHdr;
  1294. qdf_nbuf_t nbuf_clone = NULL;
  1295. qdf_dot3_qosframe_t *qos_wh = NULL;
  1296. if (qdf_likely(vdev->skip_sw_tid_classification)) {
  1297. /*
  1298. * In case of mesh packets or hlos tid override enabled,
  1299. * don't do any classification
  1300. */
  1301. if (qdf_unlikely(vdev->skip_sw_tid_classification
  1302. & DP_TX_SKIP_CCE_CLASSIFY))
  1303. return false;
  1304. }
  1305. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1306. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1307. ether_type = eh->ether_type;
  1308. llcHdr = (qdf_llc_t *)(nbuf->data +
  1309. sizeof(qdf_ether_header_t));
  1310. } else {
  1311. qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  1312. /* For encrypted packets don't do any classification */
  1313. if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
  1314. return false;
  1315. if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
  1316. if (qdf_unlikely(
  1317. qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
  1318. qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
  1319. ether_type = *(uint16_t *)(nbuf->data
  1320. + QDF_IEEE80211_4ADDR_HDR_LEN
  1321. + sizeof(qdf_llc_t)
  1322. - sizeof(ether_type));
  1323. llcHdr = (qdf_llc_t *)(nbuf->data +
  1324. QDF_IEEE80211_4ADDR_HDR_LEN);
  1325. } else {
  1326. ether_type = *(uint16_t *)(nbuf->data
  1327. + QDF_IEEE80211_3ADDR_HDR_LEN
  1328. + sizeof(qdf_llc_t)
  1329. - sizeof(ether_type));
  1330. llcHdr = (qdf_llc_t *)(nbuf->data +
  1331. QDF_IEEE80211_3ADDR_HDR_LEN);
  1332. }
  1333. if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
  1334. && (ether_type ==
  1335. qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
  1336. DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
  1337. return true;
  1338. }
  1339. }
  1340. return false;
  1341. }
  1342. if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
  1343. ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1344. sizeof(*llcHdr));
  1345. nbuf_clone = qdf_nbuf_clone(nbuf);
  1346. if (qdf_unlikely(nbuf_clone)) {
  1347. qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
  1348. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1349. qdf_nbuf_pull_head(nbuf_clone,
  1350. sizeof(qdf_net_vlanhdr_t));
  1351. }
  1352. }
  1353. } else {
  1354. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1355. nbuf_clone = qdf_nbuf_clone(nbuf);
  1356. if (qdf_unlikely(nbuf_clone)) {
  1357. qdf_nbuf_pull_head(nbuf_clone,
  1358. sizeof(qdf_net_vlanhdr_t));
  1359. }
  1360. }
  1361. }
  1362. if (qdf_unlikely(nbuf_clone))
  1363. nbuf = nbuf_clone;
  1364. if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
  1365. || qdf_nbuf_is_ipv4_arp_pkt(nbuf)
  1366. || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
  1367. || qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
  1368. || (qdf_nbuf_is_ipv4_pkt(nbuf)
  1369. && qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
  1370. || (qdf_nbuf_is_ipv6_pkt(nbuf) &&
  1371. qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
  1372. if (qdf_unlikely(nbuf_clone))
  1373. qdf_nbuf_free(nbuf_clone);
  1374. return true;
  1375. }
  1376. if (qdf_unlikely(nbuf_clone))
  1377. qdf_nbuf_free(nbuf_clone);
  1378. return false;
  1379. }
  1380. /**
  1381. * dp_tx_get_tid() - Obtain TID to be used for this frame
  1382. * @vdev: DP vdev handle
  1383. * @nbuf: skb
  1384. *
  1385. * Extract the DSCP or PCP information from frame and map into TID value.
  1386. *
  1387. * Return: void
  1388. */
  1389. static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1390. struct dp_tx_msdu_info_s *msdu_info)
  1391. {
  1392. uint8_t tos = 0, dscp_tid_override = 0;
  1393. uint8_t *hdr_ptr, *L3datap;
  1394. uint8_t is_mcast = 0;
  1395. qdf_ether_header_t *eh = NULL;
  1396. qdf_ethervlan_header_t *evh = NULL;
  1397. uint16_t ether_type;
  1398. qdf_llc_t *llcHdr;
  1399. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  1400. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1401. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1402. eh = (qdf_ether_header_t *)nbuf->data;
  1403. hdr_ptr = (uint8_t *)(eh->ether_dhost);
  1404. L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
  1405. } else {
  1406. qdf_dot3_qosframe_t *qos_wh =
  1407. (qdf_dot3_qosframe_t *) nbuf->data;
  1408. msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
  1409. qos_wh->i_qos[0] & DP_QOS_TID : 0;
  1410. return;
  1411. }
  1412. is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
  1413. ether_type = eh->ether_type;
  1414. llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
  1415. /*
  1416. * Check if packet is dot3 or eth2 type.
  1417. */
  1418. if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
  1419. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1420. sizeof(*llcHdr));
  1421. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1422. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
  1423. sizeof(*llcHdr);
  1424. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
  1425. + sizeof(*llcHdr) +
  1426. sizeof(qdf_net_vlanhdr_t));
  1427. } else {
  1428. L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
  1429. sizeof(*llcHdr);
  1430. }
  1431. } else {
  1432. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1433. evh = (qdf_ethervlan_header_t *) eh;
  1434. ether_type = evh->ether_type;
  1435. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
  1436. }
  1437. }
  1438. /*
  1439. * Find priority from IP TOS DSCP field
  1440. */
  1441. if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
  1442. qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
  1443. if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
  1444. /* Only for unicast frames */
  1445. if (!is_mcast) {
  1446. /* send it on VO queue */
  1447. msdu_info->tid = DP_VO_TID;
  1448. }
  1449. } else {
  1450. /*
  1451. * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
  1452. * from TOS byte.
  1453. */
  1454. tos = ip->ip_tos;
  1455. dscp_tid_override = 1;
  1456. }
  1457. } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
  1458. /* TODO
  1459. * use flowlabel
  1460. *igmpmld cases to be handled in phase 2
  1461. */
  1462. unsigned long ver_pri_flowlabel;
  1463. unsigned long pri;
  1464. ver_pri_flowlabel = *(unsigned long *) L3datap;
  1465. pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
  1466. DP_IPV6_PRIORITY_SHIFT;
  1467. tos = pri;
  1468. dscp_tid_override = 1;
  1469. } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
  1470. msdu_info->tid = DP_VO_TID;
  1471. else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
  1472. /* Only for unicast frames */
  1473. if (!is_mcast) {
  1474. /* send ucast arp on VO queue */
  1475. msdu_info->tid = DP_VO_TID;
  1476. }
  1477. }
  1478. /*
  1479. * Assign all MCAST packets to BE
  1480. */
  1481. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1482. if (is_mcast) {
  1483. tos = 0;
  1484. dscp_tid_override = 1;
  1485. }
  1486. }
  1487. if (dscp_tid_override == 1) {
  1488. tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  1489. msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
  1490. }
  1491. if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
  1492. msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
  1493. return;
  1494. }
  1495. /**
  1496. * dp_tx_classify_tid() - Obtain TID to be used for this frame
  1497. * @vdev: DP vdev handle
  1498. * @nbuf: skb
  1499. *
  1500. * Software based TID classification is required when more than 2 DSCP-TID
  1501. * mapping tables are needed.
  1502. * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
  1503. *
  1504. * Return: void
  1505. */
  1506. static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1507. struct dp_tx_msdu_info_s *msdu_info)
  1508. {
  1509. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1510. /*
  1511. * skip_sw_tid_classification flag will set in below cases-
  1512. * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
  1513. * 2. hlos_tid_override enabled for vdev
  1514. * 3. mesh mode enabled for vdev
  1515. */
  1516. if (qdf_likely(vdev->skip_sw_tid_classification)) {
  1517. /* Update tid in msdu_info from skb priority */
  1518. if (qdf_unlikely(vdev->skip_sw_tid_classification
  1519. & DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
  1520. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  1521. return;
  1522. }
  1523. return;
  1524. }
  1525. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1526. }
  1527. #ifdef FEATURE_WLAN_TDLS
  1528. /**
  1529. * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
  1530. * @soc: datapath SOC
  1531. * @vdev: datapath vdev
  1532. * @tx_desc: TX descriptor
  1533. *
  1534. * Return: None
  1535. */
  1536. static void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1537. struct dp_vdev *vdev,
  1538. struct dp_tx_desc_s *tx_desc)
  1539. {
  1540. if (vdev) {
  1541. if (vdev->is_tdls_frame) {
  1542. tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
  1543. vdev->is_tdls_frame = false;
  1544. }
  1545. }
  1546. }
  1547. /**
  1548. * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
  1549. * @soc: dp_soc handle
  1550. * @tx_desc: TX descriptor
  1551. * @vdev: datapath vdev handle
  1552. *
  1553. * Return: None
  1554. */
  1555. static void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
  1556. struct dp_tx_desc_s *tx_desc)
  1557. {
  1558. struct hal_tx_completion_status ts = {0};
  1559. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1560. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
  1561. DP_MOD_ID_TDLS);
  1562. if (qdf_unlikely(!vdev)) {
  1563. dp_err_rl("vdev is null!");
  1564. goto error;
  1565. }
  1566. hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
  1567. if (vdev->tx_non_std_data_callback.func) {
  1568. qdf_nbuf_set_next(nbuf, NULL);
  1569. vdev->tx_non_std_data_callback.func(
  1570. vdev->tx_non_std_data_callback.ctxt,
  1571. nbuf, ts.status);
  1572. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1573. return;
  1574. } else {
  1575. dp_err_rl("callback func is null");
  1576. }
  1577. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1578. error:
  1579. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  1580. qdf_nbuf_free(nbuf);
  1581. }
  1582. /**
  1583. * dp_tx_msdu_single_map() - do nbuf map
  1584. * @vdev: DP vdev handle
  1585. * @tx_desc: DP TX descriptor pointer
  1586. * @nbuf: skb pointer
  1587. *
  1588. * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
  1589. * operation done in other component.
  1590. *
  1591. * Return: QDF_STATUS
  1592. */
  1593. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1594. struct dp_tx_desc_s *tx_desc,
  1595. qdf_nbuf_t nbuf)
  1596. {
  1597. if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
  1598. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1599. nbuf,
  1600. QDF_DMA_TO_DEVICE,
  1601. nbuf->len);
  1602. else
  1603. return qdf_nbuf_map_single(vdev->osdev, nbuf,
  1604. QDF_DMA_TO_DEVICE);
  1605. }
  1606. #else
  1607. static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1608. struct dp_vdev *vdev,
  1609. struct dp_tx_desc_s *tx_desc)
  1610. {
  1611. }
  1612. static inline void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
  1613. struct dp_tx_desc_s *tx_desc)
  1614. {
  1615. }
  1616. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1617. struct dp_tx_desc_s *tx_desc,
  1618. qdf_nbuf_t nbuf)
  1619. {
  1620. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1621. nbuf,
  1622. QDF_DMA_TO_DEVICE,
  1623. nbuf->len);
  1624. }
  1625. #endif
  1626. #ifdef MESH_MODE_SUPPORT
  1627. /**
  1628. * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
  1629. * @soc: datapath SOC
  1630. * @vdev: datapath vdev
  1631. * @tx_desc: TX descriptor
  1632. *
  1633. * Return: None
  1634. */
  1635. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1636. struct dp_vdev *vdev,
  1637. struct dp_tx_desc_s *tx_desc)
  1638. {
  1639. if (qdf_unlikely(vdev->mesh_vdev))
  1640. tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
  1641. }
  1642. /**
  1643. * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
  1644. * @soc: dp_soc handle
  1645. * @tx_desc: TX descriptor
  1646. * @vdev: datapath vdev handle
  1647. *
  1648. * Return: None
  1649. */
  1650. static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1651. struct dp_tx_desc_s *tx_desc)
  1652. {
  1653. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1654. struct dp_vdev *vdev = NULL;
  1655. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
  1656. qdf_nbuf_free(nbuf);
  1657. DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
  1658. } else {
  1659. vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
  1660. DP_MOD_ID_MESH);
  1661. if (vdev && vdev->osif_tx_free_ext)
  1662. vdev->osif_tx_free_ext((nbuf));
  1663. else
  1664. qdf_nbuf_free(nbuf);
  1665. if (vdev)
  1666. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  1667. }
  1668. }
  1669. #else
  1670. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1671. struct dp_vdev *vdev,
  1672. struct dp_tx_desc_s *tx_desc)
  1673. {
  1674. }
  1675. static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1676. struct dp_tx_desc_s *tx_desc)
  1677. {
  1678. }
  1679. #endif
  1680. /**
  1681. * dp_tx_frame_is_drop() - checks if the packet is loopback
  1682. * @vdev: DP vdev handle
  1683. * @nbuf: skb
  1684. *
  1685. * Return: 1 if frame needs to be dropped else 0
  1686. */
  1687. int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
  1688. {
  1689. struct dp_pdev *pdev = NULL;
  1690. struct dp_ast_entry *src_ast_entry = NULL;
  1691. struct dp_ast_entry *dst_ast_entry = NULL;
  1692. struct dp_soc *soc = NULL;
  1693. qdf_assert(vdev);
  1694. pdev = vdev->pdev;
  1695. qdf_assert(pdev);
  1696. soc = pdev->soc;
  1697. dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1698. (soc, dstmac, vdev->pdev->pdev_id);
  1699. src_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1700. (soc, srcmac, vdev->pdev->pdev_id);
  1701. if (dst_ast_entry && src_ast_entry) {
  1702. if (dst_ast_entry->peer_id ==
  1703. src_ast_entry->peer_id)
  1704. return 1;
  1705. }
  1706. return 0;
  1707. }
  1708. /**
  1709. * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
  1710. * @vdev: DP vdev handle
  1711. * @nbuf: skb
  1712. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  1713. * @meta_data: Metadata to the fw
  1714. * @tx_q: Tx queue to be used for this Tx frame
  1715. * @peer_id: peer_id of the peer in case of NAWDS frames
  1716. * @tx_exc_metadata: Handle that holds exception path metadata
  1717. *
  1718. * Return: NULL on success,
  1719. * nbuf when it fails to send
  1720. */
  1721. qdf_nbuf_t
  1722. dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1723. struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
  1724. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1725. {
  1726. struct dp_pdev *pdev = vdev->pdev;
  1727. struct dp_soc *soc = pdev->soc;
  1728. struct dp_tx_desc_s *tx_desc;
  1729. QDF_STATUS status;
  1730. struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
  1731. uint16_t htt_tcl_metadata = 0;
  1732. enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
  1733. uint8_t tid = msdu_info->tid;
  1734. struct cdp_tid_tx_stats *tid_stats = NULL;
  1735. /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
  1736. tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
  1737. msdu_info, tx_exc_metadata);
  1738. if (!tx_desc) {
  1739. dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
  1740. vdev, tx_q->desc_pool_id);
  1741. drop_code = TX_DESC_ERR;
  1742. goto fail_return;
  1743. }
  1744. if (qdf_unlikely(soc->cce_disable)) {
  1745. if (dp_cce_classify(vdev, nbuf) == true) {
  1746. DP_STATS_INC(vdev, tx_i.cce_classified, 1);
  1747. tid = DP_VO_TID;
  1748. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1749. }
  1750. }
  1751. dp_tx_update_tdls_flags(soc, vdev, tx_desc);
  1752. if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
  1753. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1754. HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
  1755. } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
  1756. HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
  1757. HTT_TCL_METADATA_TYPE_PEER_BASED);
  1758. HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
  1759. peer_id);
  1760. } else
  1761. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1762. if (msdu_info->exception_fw)
  1763. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  1764. dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
  1765. !pdev->enhanced_stats_en);
  1766. dp_tx_update_mesh_flags(soc, vdev, tx_desc);
  1767. if (qdf_unlikely(QDF_STATUS_SUCCESS !=
  1768. dp_tx_msdu_single_map(vdev, tx_desc, nbuf))) {
  1769. /* Handle failure */
  1770. dp_err("qdf_nbuf_map failed");
  1771. DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
  1772. drop_code = TX_DMA_MAP_ERR;
  1773. goto release_desc;
  1774. }
  1775. tx_desc->dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
  1776. dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
  1777. tx_desc->id, DP_TX_DESC_MAP);
  1778. /* Enqueue the Tx MSDU descriptor to HW for transmit */
  1779. status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
  1780. htt_tcl_metadata,
  1781. tx_exc_metadata, msdu_info);
  1782. if (status != QDF_STATUS_SUCCESS) {
  1783. dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
  1784. tx_desc, tx_q->ring_id);
  1785. dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
  1786. tx_desc->id, DP_TX_DESC_UNMAP);
  1787. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  1788. QDF_DMA_TO_DEVICE,
  1789. nbuf->len);
  1790. drop_code = TX_HW_ENQUEUE;
  1791. goto release_desc;
  1792. }
  1793. return NULL;
  1794. release_desc:
  1795. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1796. fail_return:
  1797. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1798. tid_stats = &pdev->stats.tid_stats.
  1799. tid_tx_stats[tx_q->ring_id][tid];
  1800. tid_stats->swdrop_cnt[drop_code]++;
  1801. return nbuf;
  1802. }
  1803. /**
  1804. * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
  1805. * @soc: Soc handle
  1806. * @desc: software Tx descriptor to be processed
  1807. *
  1808. * Return: none
  1809. */
  1810. static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
  1811. struct dp_tx_desc_s *desc)
  1812. {
  1813. qdf_nbuf_t nbuf = desc->nbuf;
  1814. enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
  1815. /* nbuf already freed in vdev detach path */
  1816. if (!nbuf)
  1817. return;
  1818. /* If it is TDLS mgmt, don't unmap or free the frame */
  1819. if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
  1820. return dp_non_std_tx_comp_free_buff(soc, desc);
  1821. /* 0 : MSDU buffer, 1 : MLE */
  1822. if (desc->msdu_ext_desc) {
  1823. /* TSO free */
  1824. if (hal_tx_ext_desc_get_tso_enable(
  1825. desc->msdu_ext_desc->vaddr)) {
  1826. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
  1827. desc->id, DP_TX_COMP_MSDU_EXT);
  1828. dp_tx_tso_seg_history_add(soc, desc->tso_desc,
  1829. desc->nbuf, desc->id, type);
  1830. /* unmap eash TSO seg before free the nbuf */
  1831. dp_tx_tso_unmap_segment(soc, desc->tso_desc,
  1832. desc->tso_num_desc);
  1833. qdf_nbuf_free(nbuf);
  1834. return;
  1835. }
  1836. }
  1837. /* If it's ME frame, dont unmap the cloned nbuf's */
  1838. if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
  1839. goto nbuf_free;
  1840. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
  1841. dp_tx_unmap(soc, desc);
  1842. if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
  1843. return dp_mesh_tx_comp_free_buff(soc, desc);
  1844. nbuf_free:
  1845. qdf_nbuf_free(nbuf);
  1846. }
  1847. /**
  1848. * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
  1849. * @vdev: DP vdev handle
  1850. * @nbuf: skb
  1851. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  1852. *
  1853. * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
  1854. *
  1855. * Return: NULL on success,
  1856. * nbuf when it fails to send
  1857. */
  1858. #if QDF_LOCK_STATS
  1859. noinline
  1860. #else
  1861. #endif
  1862. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1863. struct dp_tx_msdu_info_s *msdu_info)
  1864. {
  1865. uint32_t i;
  1866. struct dp_pdev *pdev = vdev->pdev;
  1867. struct dp_soc *soc = pdev->soc;
  1868. struct dp_tx_desc_s *tx_desc;
  1869. bool is_cce_classified = false;
  1870. QDF_STATUS status;
  1871. uint16_t htt_tcl_metadata = 0;
  1872. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  1873. struct cdp_tid_tx_stats *tid_stats = NULL;
  1874. uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
  1875. if (qdf_unlikely(soc->cce_disable)) {
  1876. is_cce_classified = dp_cce_classify(vdev, nbuf);
  1877. if (is_cce_classified) {
  1878. DP_STATS_INC(vdev, tx_i.cce_classified, 1);
  1879. msdu_info->tid = DP_VO_TID;
  1880. }
  1881. }
  1882. if (msdu_info->frm_type == dp_tx_frm_me)
  1883. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  1884. i = 0;
  1885. /* Print statement to track i and num_seg */
  1886. /*
  1887. * For each segment (maps to 1 MSDU) , prepare software and hardware
  1888. * descriptors using information in msdu_info
  1889. */
  1890. while (i < msdu_info->num_seg) {
  1891. /*
  1892. * Setup Tx descriptor for an MSDU, and MSDU extension
  1893. * descriptor
  1894. */
  1895. tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
  1896. tx_q->desc_pool_id);
  1897. if (!tx_desc) {
  1898. if (msdu_info->frm_type == dp_tx_frm_me) {
  1899. prep_desc_fail++;
  1900. dp_tx_me_free_buf(pdev,
  1901. (void *)(msdu_info->u.sg_info
  1902. .curr_seg->frags[0].vaddr));
  1903. if (prep_desc_fail == msdu_info->num_seg) {
  1904. /*
  1905. * Unmap is needed only if descriptor
  1906. * preparation failed for all segments.
  1907. */
  1908. qdf_nbuf_unmap(soc->osdev,
  1909. msdu_info->u.sg_info.
  1910. curr_seg->nbuf,
  1911. QDF_DMA_TO_DEVICE);
  1912. }
  1913. /*
  1914. * Free the nbuf for the current segment
  1915. * and make it point to the next in the list.
  1916. * For me, there are as many segments as there
  1917. * are no of clients.
  1918. */
  1919. qdf_nbuf_free(msdu_info->u.sg_info
  1920. .curr_seg->nbuf);
  1921. if (msdu_info->u.sg_info.curr_seg->next) {
  1922. msdu_info->u.sg_info.curr_seg =
  1923. msdu_info->u.sg_info
  1924. .curr_seg->next;
  1925. nbuf = msdu_info->u.sg_info
  1926. .curr_seg->nbuf;
  1927. }
  1928. i++;
  1929. continue;
  1930. }
  1931. if (msdu_info->frm_type == dp_tx_frm_tso) {
  1932. dp_tx_tso_seg_history_add(
  1933. soc,
  1934. msdu_info->u.tso_info.curr_seg,
  1935. nbuf, 0, DP_TX_DESC_UNMAP);
  1936. dp_tx_tso_unmap_segment(soc,
  1937. msdu_info->u.tso_info.
  1938. curr_seg,
  1939. msdu_info->u.tso_info.
  1940. tso_num_seg_list);
  1941. if (msdu_info->u.tso_info.curr_seg->next) {
  1942. msdu_info->u.tso_info.curr_seg =
  1943. msdu_info->u.tso_info.curr_seg->next;
  1944. i++;
  1945. continue;
  1946. }
  1947. }
  1948. goto done;
  1949. }
  1950. if (msdu_info->frm_type == dp_tx_frm_me) {
  1951. tx_desc->me_buffer =
  1952. msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
  1953. tx_desc->flags |= DP_TX_DESC_FLAG_ME;
  1954. }
  1955. if (is_cce_classified)
  1956. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1957. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1958. if (msdu_info->exception_fw) {
  1959. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  1960. }
  1961. /*
  1962. * For frames with multiple segments (TSO, ME), jump to next
  1963. * segment.
  1964. */
  1965. if (msdu_info->frm_type == dp_tx_frm_tso) {
  1966. if (msdu_info->u.tso_info.curr_seg->next) {
  1967. msdu_info->u.tso_info.curr_seg =
  1968. msdu_info->u.tso_info.curr_seg->next;
  1969. /*
  1970. * If this is a jumbo nbuf, then increment the
  1971. * number of nbuf users for each additional
  1972. * segment of the msdu. This will ensure that
  1973. * the skb is freed only after receiving tx
  1974. * completion for all segments of an nbuf
  1975. */
  1976. qdf_nbuf_inc_users(nbuf);
  1977. /* Check with MCL if this is needed */
  1978. /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
  1979. */
  1980. }
  1981. }
  1982. /*
  1983. * Enqueue the Tx MSDU descriptor to HW for transmit
  1984. */
  1985. status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
  1986. htt_tcl_metadata,
  1987. NULL, msdu_info);
  1988. if (status != QDF_STATUS_SUCCESS) {
  1989. dp_info("Tx_hw_enqueue Fail tx_desc %pK queue %d",
  1990. tx_desc, tx_q->ring_id);
  1991. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1992. tid_stats = &pdev->stats.tid_stats.
  1993. tid_tx_stats[tx_q->ring_id][msdu_info->tid];
  1994. tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
  1995. if (msdu_info->frm_type == dp_tx_frm_me) {
  1996. hw_enq_fail++;
  1997. if (hw_enq_fail == msdu_info->num_seg) {
  1998. /*
  1999. * Unmap is needed only if enqueue
  2000. * failed for all segments.
  2001. */
  2002. qdf_nbuf_unmap(soc->osdev,
  2003. msdu_info->u.sg_info.
  2004. curr_seg->nbuf,
  2005. QDF_DMA_TO_DEVICE);
  2006. }
  2007. /*
  2008. * Free the nbuf for the current segment
  2009. * and make it point to the next in the list.
  2010. * For me, there are as many segments as there
  2011. * are no of clients.
  2012. */
  2013. qdf_nbuf_free(msdu_info->u.sg_info
  2014. .curr_seg->nbuf);
  2015. if (msdu_info->u.sg_info.curr_seg->next) {
  2016. msdu_info->u.sg_info.curr_seg =
  2017. msdu_info->u.sg_info
  2018. .curr_seg->next;
  2019. nbuf = msdu_info->u.sg_info
  2020. .curr_seg->nbuf;
  2021. } else
  2022. break;
  2023. i++;
  2024. continue;
  2025. }
  2026. /*
  2027. * For TSO frames, the nbuf users increment done for
  2028. * the current segment has to be reverted, since the
  2029. * hw enqueue for this segment failed
  2030. */
  2031. if (msdu_info->frm_type == dp_tx_frm_tso &&
  2032. msdu_info->u.tso_info.curr_seg) {
  2033. /*
  2034. * unmap and free current,
  2035. * retransmit remaining segments
  2036. */
  2037. dp_tx_comp_free_buf(soc, tx_desc);
  2038. i++;
  2039. continue;
  2040. }
  2041. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2042. goto done;
  2043. }
  2044. /*
  2045. * TODO
  2046. * if tso_info structure can be modified to have curr_seg
  2047. * as first element, following 2 blocks of code (for TSO and SG)
  2048. * can be combined into 1
  2049. */
  2050. /*
  2051. * For Multicast-Unicast converted packets,
  2052. * each converted frame (for a client) is represented as
  2053. * 1 segment
  2054. */
  2055. if ((msdu_info->frm_type == dp_tx_frm_sg) ||
  2056. (msdu_info->frm_type == dp_tx_frm_me)) {
  2057. if (msdu_info->u.sg_info.curr_seg->next) {
  2058. msdu_info->u.sg_info.curr_seg =
  2059. msdu_info->u.sg_info.curr_seg->next;
  2060. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  2061. } else
  2062. break;
  2063. }
  2064. i++;
  2065. }
  2066. nbuf = NULL;
  2067. done:
  2068. return nbuf;
  2069. }
  2070. /**
  2071. * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
  2072. * for SG frames
  2073. * @vdev: DP vdev handle
  2074. * @nbuf: skb
  2075. * @seg_info: Pointer to Segment info Descriptor to be prepared
  2076. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2077. *
  2078. * Return: NULL on success,
  2079. * nbuf when it fails to send
  2080. */
  2081. static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2082. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  2083. {
  2084. uint32_t cur_frag, nr_frags, i;
  2085. qdf_dma_addr_t paddr;
  2086. struct dp_tx_sg_info_s *sg_info;
  2087. sg_info = &msdu_info->u.sg_info;
  2088. nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  2089. if (QDF_STATUS_SUCCESS !=
  2090. qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
  2091. QDF_DMA_TO_DEVICE,
  2092. qdf_nbuf_headlen(nbuf))) {
  2093. dp_tx_err("dma map error");
  2094. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  2095. qdf_nbuf_free(nbuf);
  2096. return NULL;
  2097. }
  2098. paddr = qdf_nbuf_mapped_paddr_get(nbuf);
  2099. seg_info->frags[0].paddr_lo = paddr;
  2100. seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
  2101. seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
  2102. seg_info->frags[0].vaddr = (void *) nbuf;
  2103. for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
  2104. if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
  2105. nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
  2106. dp_tx_err("frag dma map error");
  2107. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  2108. goto map_err;
  2109. }
  2110. paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
  2111. seg_info->frags[cur_frag + 1].paddr_lo = paddr;
  2112. seg_info->frags[cur_frag + 1].paddr_hi =
  2113. ((uint64_t) paddr) >> 32;
  2114. seg_info->frags[cur_frag + 1].len =
  2115. qdf_nbuf_get_frag_size(nbuf, cur_frag);
  2116. }
  2117. seg_info->frag_cnt = (cur_frag + 1);
  2118. seg_info->total_len = qdf_nbuf_len(nbuf);
  2119. seg_info->next = NULL;
  2120. sg_info->curr_seg = seg_info;
  2121. msdu_info->frm_type = dp_tx_frm_sg;
  2122. msdu_info->num_seg = 1;
  2123. return nbuf;
  2124. map_err:
  2125. /* restore paddr into nbuf before calling unmap */
  2126. qdf_nbuf_mapped_paddr_set(nbuf,
  2127. (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
  2128. ((uint64_t)
  2129. seg_info->frags[0].paddr_hi) << 32));
  2130. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  2131. QDF_DMA_TO_DEVICE,
  2132. seg_info->frags[0].len);
  2133. for (i = 1; i <= cur_frag; i++) {
  2134. qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
  2135. (seg_info->frags[i].paddr_lo | ((uint64_t)
  2136. seg_info->frags[i].paddr_hi) << 32),
  2137. seg_info->frags[i].len,
  2138. QDF_DMA_TO_DEVICE);
  2139. }
  2140. qdf_nbuf_free(nbuf);
  2141. return NULL;
  2142. }
  2143. /**
  2144. * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
  2145. * @vdev: DP vdev handle
  2146. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2147. * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
  2148. *
  2149. * Return: NULL on failure,
  2150. * nbuf when extracted successfully
  2151. */
  2152. static
  2153. void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
  2154. struct dp_tx_msdu_info_s *msdu_info,
  2155. uint16_t ppdu_cookie)
  2156. {
  2157. struct htt_tx_msdu_desc_ext2_t *meta_data =
  2158. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  2159. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  2160. HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
  2161. (msdu_info->meta_data[5], 1);
  2162. HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
  2163. (msdu_info->meta_data[5], 1);
  2164. HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
  2165. (msdu_info->meta_data[6], ppdu_cookie);
  2166. msdu_info->exception_fw = 1;
  2167. msdu_info->is_tx_sniffer = 1;
  2168. }
  2169. #ifdef MESH_MODE_SUPPORT
  2170. /**
  2171. * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
  2172. and prepare msdu_info for mesh frames.
  2173. * @vdev: DP vdev handle
  2174. * @nbuf: skb
  2175. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2176. *
  2177. * Return: NULL on failure,
  2178. * nbuf when extracted successfully
  2179. */
  2180. static
  2181. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2182. struct dp_tx_msdu_info_s *msdu_info)
  2183. {
  2184. struct meta_hdr_s *mhdr;
  2185. struct htt_tx_msdu_desc_ext2_t *meta_data =
  2186. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  2187. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  2188. if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
  2189. msdu_info->exception_fw = 0;
  2190. goto remove_meta_hdr;
  2191. }
  2192. msdu_info->exception_fw = 1;
  2193. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  2194. meta_data->host_tx_desc_pool = 1;
  2195. meta_data->update_peer_cache = 1;
  2196. meta_data->learning_frame = 1;
  2197. if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
  2198. meta_data->power = mhdr->power;
  2199. meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
  2200. meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
  2201. meta_data->pream_type = mhdr->rate_info[0].preamble_type;
  2202. meta_data->retry_limit = mhdr->rate_info[0].max_tries;
  2203. meta_data->dyn_bw = 1;
  2204. meta_data->valid_pwr = 1;
  2205. meta_data->valid_mcs_mask = 1;
  2206. meta_data->valid_nss_mask = 1;
  2207. meta_data->valid_preamble_type = 1;
  2208. meta_data->valid_retries = 1;
  2209. meta_data->valid_bw_info = 1;
  2210. }
  2211. if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
  2212. meta_data->encrypt_type = 0;
  2213. meta_data->valid_encrypt_type = 1;
  2214. meta_data->learning_frame = 0;
  2215. }
  2216. meta_data->valid_key_flags = 1;
  2217. meta_data->key_flags = (mhdr->keyix & 0x3);
  2218. remove_meta_hdr:
  2219. if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
  2220. dp_tx_err("qdf_nbuf_pull_head failed");
  2221. qdf_nbuf_free(nbuf);
  2222. return NULL;
  2223. }
  2224. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  2225. dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
  2226. " tid %d to_fw %d",
  2227. msdu_info->meta_data[0],
  2228. msdu_info->meta_data[1],
  2229. msdu_info->meta_data[2],
  2230. msdu_info->meta_data[3],
  2231. msdu_info->meta_data[4],
  2232. msdu_info->meta_data[5],
  2233. msdu_info->tid, msdu_info->exception_fw);
  2234. return nbuf;
  2235. }
  2236. #else
  2237. static
  2238. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2239. struct dp_tx_msdu_info_s *msdu_info)
  2240. {
  2241. return nbuf;
  2242. }
  2243. #endif
  2244. /**
  2245. * dp_check_exc_metadata() - Checks if parameters are valid
  2246. * @tx_exc - holds all exception path parameters
  2247. *
  2248. * Returns true when all the parameters are valid else false
  2249. *
  2250. */
  2251. static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
  2252. {
  2253. bool invalid_tid = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid !=
  2254. HTT_INVALID_TID);
  2255. bool invalid_encap_type =
  2256. (tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
  2257. tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
  2258. bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
  2259. tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
  2260. bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
  2261. tx_exc->ppdu_cookie == 0);
  2262. if (invalid_tid || invalid_encap_type || invalid_sec_type ||
  2263. invalid_cookie) {
  2264. return false;
  2265. }
  2266. return true;
  2267. }
  2268. #ifdef ATH_SUPPORT_IQUE
  2269. /**
  2270. * dp_tx_mcast_enhance() - Multicast enhancement on TX
  2271. * @vdev: vdev handle
  2272. * @nbuf: skb
  2273. *
  2274. * Return: true on success,
  2275. * false on failure
  2276. */
  2277. static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2278. {
  2279. qdf_ether_header_t *eh;
  2280. /* Mcast to Ucast Conversion*/
  2281. if (qdf_likely(!vdev->mcast_enhancement_en))
  2282. return true;
  2283. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2284. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
  2285. !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
  2286. dp_verbose_debug("Mcast frm for ME %pK", vdev);
  2287. qdf_nbuf_set_next(nbuf, NULL);
  2288. DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
  2289. qdf_nbuf_len(nbuf));
  2290. if (dp_tx_prepare_send_me(vdev, nbuf) ==
  2291. QDF_STATUS_SUCCESS) {
  2292. return false;
  2293. }
  2294. if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
  2295. if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
  2296. QDF_STATUS_SUCCESS) {
  2297. return false;
  2298. }
  2299. }
  2300. }
  2301. return true;
  2302. }
  2303. #else
  2304. static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2305. {
  2306. return true;
  2307. }
  2308. #endif
  2309. /**
  2310. * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
  2311. * @nbuf: qdf_nbuf_t
  2312. * @vdev: struct dp_vdev *
  2313. *
  2314. * Allow packet for processing only if it is for peer client which is
  2315. * connected with same vap. Drop packet if client is connected to
  2316. * different vap.
  2317. *
  2318. * Return: QDF_STATUS
  2319. */
  2320. static inline QDF_STATUS
  2321. dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
  2322. {
  2323. struct dp_ast_entry *dst_ast_entry = NULL;
  2324. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2325. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
  2326. DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
  2327. return QDF_STATUS_SUCCESS;
  2328. qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
  2329. dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
  2330. eh->ether_dhost,
  2331. vdev->vdev_id);
  2332. /* If there is no ast entry, return failure */
  2333. if (qdf_unlikely(!dst_ast_entry)) {
  2334. qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
  2335. return QDF_STATUS_E_FAILURE;
  2336. }
  2337. qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
  2338. return QDF_STATUS_SUCCESS;
  2339. }
  2340. /**
  2341. * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
  2342. * @soc: DP soc handle
  2343. * @vdev_id: id of DP vdev handle
  2344. * @nbuf: skb
  2345. * @tx_exc_metadata: Handle that holds exception path meta data
  2346. *
  2347. * Entry point for Core Tx layer (DP_TX) invoked from
  2348. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  2349. *
  2350. * Return: NULL on success,
  2351. * nbuf when it fails to send
  2352. */
  2353. qdf_nbuf_t
  2354. dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2355. qdf_nbuf_t nbuf,
  2356. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2357. {
  2358. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2359. qdf_ether_header_t *eh = NULL;
  2360. struct dp_tx_msdu_info_s msdu_info;
  2361. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2362. DP_MOD_ID_TX_EXCEPTION);
  2363. if (qdf_unlikely(!vdev))
  2364. goto fail;
  2365. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  2366. if (!tx_exc_metadata)
  2367. goto fail;
  2368. msdu_info.tid = tx_exc_metadata->tid;
  2369. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2370. dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
  2371. QDF_MAC_ADDR_REF(nbuf->data));
  2372. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  2373. if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
  2374. dp_tx_err("Invalid parameters in exception path");
  2375. goto fail;
  2376. }
  2377. /* Basic sanity checks for unsupported packets */
  2378. /* MESH mode */
  2379. if (qdf_unlikely(vdev->mesh_vdev)) {
  2380. dp_tx_err("Mesh mode is not supported in exception path");
  2381. goto fail;
  2382. }
  2383. /*
  2384. * Classify the frame and call corresponding
  2385. * "prepare" function which extracts the segment (TSO)
  2386. * and fragmentation information (for TSO , SG, ME, or Raw)
  2387. * into MSDU_INFO structure which is later used to fill
  2388. * SW and HW descriptors.
  2389. */
  2390. if (qdf_nbuf_is_tso(nbuf)) {
  2391. dp_verbose_debug("TSO frame %pK", vdev);
  2392. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  2393. qdf_nbuf_len(nbuf));
  2394. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  2395. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  2396. qdf_nbuf_len(nbuf));
  2397. goto fail;
  2398. }
  2399. goto send_multiple;
  2400. }
  2401. /* SG */
  2402. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  2403. struct dp_tx_seg_info_s seg_info = {0};
  2404. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  2405. if (!nbuf)
  2406. goto fail;
  2407. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  2408. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  2409. qdf_nbuf_len(nbuf));
  2410. goto send_multiple;
  2411. }
  2412. if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
  2413. DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
  2414. qdf_nbuf_len(nbuf));
  2415. dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
  2416. tx_exc_metadata->ppdu_cookie);
  2417. }
  2418. /*
  2419. * Get HW Queue to use for this frame.
  2420. * TCL supports upto 4 DMA rings, out of which 3 rings are
  2421. * dedicated for data and 1 for command.
  2422. * "queue_id" maps to one hardware ring.
  2423. * With each ring, we also associate a unique Tx descriptor pool
  2424. * to minimize lock contention for these resources.
  2425. */
  2426. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2427. /*
  2428. * Check exception descriptors
  2429. */
  2430. if (dp_tx_exception_limit_check(vdev))
  2431. goto fail;
  2432. /* Single linear frame */
  2433. /*
  2434. * If nbuf is a simple linear frame, use send_single function to
  2435. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  2436. * SRNG. There is no need to setup a MSDU extension descriptor.
  2437. */
  2438. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
  2439. tx_exc_metadata->peer_id, tx_exc_metadata);
  2440. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2441. return nbuf;
  2442. send_multiple:
  2443. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2444. fail:
  2445. if (vdev)
  2446. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2447. dp_verbose_debug("pkt send failed");
  2448. return nbuf;
  2449. }
  2450. /**
  2451. * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
  2452. * in exception path in special case to avoid regular exception path chk.
  2453. * @soc: DP soc handle
  2454. * @vdev_id: id of DP vdev handle
  2455. * @nbuf: skb
  2456. * @tx_exc_metadata: Handle that holds exception path meta data
  2457. *
  2458. * Entry point for Core Tx layer (DP_TX) invoked from
  2459. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  2460. *
  2461. * Return: NULL on success,
  2462. * nbuf when it fails to send
  2463. */
  2464. qdf_nbuf_t
  2465. dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
  2466. uint8_t vdev_id, qdf_nbuf_t nbuf,
  2467. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2468. {
  2469. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2470. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2471. DP_MOD_ID_TX_EXCEPTION);
  2472. if (qdf_unlikely(!vdev))
  2473. goto fail;
  2474. if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
  2475. == QDF_STATUS_E_FAILURE)) {
  2476. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  2477. goto fail;
  2478. }
  2479. /* Unref count as it will agin be taken inside dp_tx_exception */
  2480. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2481. return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
  2482. fail:
  2483. if (vdev)
  2484. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2485. dp_verbose_debug("pkt send failed");
  2486. return nbuf;
  2487. }
  2488. /**
  2489. * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
  2490. * @soc: DP soc handle
  2491. * @vdev_id: DP vdev handle
  2492. * @nbuf: skb
  2493. *
  2494. * Entry point for Core Tx layer (DP_TX) invoked from
  2495. * hard_start_xmit in OSIF/HDD
  2496. *
  2497. * Return: NULL on success,
  2498. * nbuf when it fails to send
  2499. */
  2500. #ifdef MESH_MODE_SUPPORT
  2501. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2502. qdf_nbuf_t nbuf)
  2503. {
  2504. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2505. struct meta_hdr_s *mhdr;
  2506. qdf_nbuf_t nbuf_mesh = NULL;
  2507. qdf_nbuf_t nbuf_clone = NULL;
  2508. struct dp_vdev *vdev;
  2509. uint8_t no_enc_frame = 0;
  2510. nbuf_mesh = qdf_nbuf_unshare(nbuf);
  2511. if (!nbuf_mesh) {
  2512. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2513. "qdf_nbuf_unshare failed");
  2514. return nbuf;
  2515. }
  2516. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
  2517. if (!vdev) {
  2518. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2519. "vdev is NULL for vdev_id %d", vdev_id);
  2520. return nbuf;
  2521. }
  2522. nbuf = nbuf_mesh;
  2523. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  2524. if ((vdev->sec_type != cdp_sec_type_none) &&
  2525. (mhdr->flags & METAHDR_FLAG_NOENCRYPT))
  2526. no_enc_frame = 1;
  2527. if (mhdr->flags & METAHDR_FLAG_NOQOS)
  2528. qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
  2529. if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
  2530. !no_enc_frame) {
  2531. nbuf_clone = qdf_nbuf_clone(nbuf);
  2532. if (!nbuf_clone) {
  2533. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2534. "qdf_nbuf_clone failed");
  2535. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  2536. return nbuf;
  2537. }
  2538. qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
  2539. }
  2540. if (nbuf_clone) {
  2541. if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
  2542. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  2543. } else {
  2544. qdf_nbuf_free(nbuf_clone);
  2545. }
  2546. }
  2547. if (no_enc_frame)
  2548. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
  2549. else
  2550. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
  2551. nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
  2552. if ((!nbuf) && no_enc_frame) {
  2553. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  2554. }
  2555. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  2556. return nbuf;
  2557. }
  2558. #else
  2559. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
  2560. qdf_nbuf_t nbuf)
  2561. {
  2562. return dp_tx_send(soc, vdev_id, nbuf);
  2563. }
  2564. #endif
  2565. /**
  2566. * dp_tx_nawds_handler() - NAWDS handler
  2567. *
  2568. * @soc: DP soc handle
  2569. * @vdev_id: id of DP vdev handle
  2570. * @msdu_info: msdu_info required to create HTT metadata
  2571. * @nbuf: skb
  2572. *
  2573. * This API transfers the multicast frames with the peer id
  2574. * on NAWDS enabled peer.
  2575. * Return: none
  2576. */
  2577. static inline
  2578. void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
  2579. struct dp_tx_msdu_info_s *msdu_info, qdf_nbuf_t nbuf)
  2580. {
  2581. struct dp_peer *peer = NULL;
  2582. qdf_nbuf_t nbuf_clone = NULL;
  2583. uint16_t peer_id = DP_INVALID_PEER;
  2584. uint16_t sa_peer_id = DP_INVALID_PEER;
  2585. struct dp_ast_entry *ast_entry = NULL;
  2586. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2587. qdf_spin_lock_bh(&soc->ast_lock);
  2588. ast_entry = dp_peer_ast_hash_find_by_pdevid
  2589. (soc,
  2590. (uint8_t *)(eh->ether_shost),
  2591. vdev->pdev->pdev_id);
  2592. if (ast_entry)
  2593. sa_peer_id = ast_entry->peer_id;
  2594. qdf_spin_unlock_bh(&soc->ast_lock);
  2595. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2596. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2597. if (!peer->bss_peer && peer->nawds_enabled) {
  2598. peer_id = peer->peer_id;
  2599. /* Multicast packets needs to be
  2600. * dropped in case of intra bss forwarding
  2601. */
  2602. if (sa_peer_id == peer->peer_id) {
  2603. dp_tx_debug("multicast packet");
  2604. DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
  2605. continue;
  2606. }
  2607. nbuf_clone = qdf_nbuf_clone(nbuf);
  2608. if (!nbuf_clone) {
  2609. QDF_TRACE(QDF_MODULE_ID_DP,
  2610. QDF_TRACE_LEVEL_ERROR,
  2611. FL("nbuf clone failed"));
  2612. break;
  2613. }
  2614. nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
  2615. msdu_info, peer_id,
  2616. NULL);
  2617. if (nbuf_clone) {
  2618. dp_tx_debug("pkt send failed");
  2619. qdf_nbuf_free(nbuf_clone);
  2620. } else {
  2621. if (peer_id != DP_INVALID_PEER)
  2622. DP_STATS_INC_PKT(peer, tx.nawds_mcast,
  2623. 1, qdf_nbuf_len(nbuf));
  2624. }
  2625. }
  2626. }
  2627. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2628. }
  2629. /**
  2630. * dp_tx_send() - Transmit a frame on a given VAP
  2631. * @soc: DP soc handle
  2632. * @vdev_id: id of DP vdev handle
  2633. * @nbuf: skb
  2634. *
  2635. * Entry point for Core Tx layer (DP_TX) invoked from
  2636. * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
  2637. * cases
  2638. *
  2639. * Return: NULL on success,
  2640. * nbuf when it fails to send
  2641. */
  2642. qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2643. qdf_nbuf_t nbuf)
  2644. {
  2645. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2646. uint16_t peer_id = HTT_INVALID_PEER;
  2647. /*
  2648. * doing a memzero is causing additional function call overhead
  2649. * so doing static stack clearing
  2650. */
  2651. struct dp_tx_msdu_info_s msdu_info = {0};
  2652. struct dp_vdev *vdev = NULL;
  2653. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  2654. return nbuf;
  2655. /*
  2656. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  2657. * this in per packet path.
  2658. *
  2659. * As in this path vdev memory is already protected with netdev
  2660. * tx lock
  2661. */
  2662. vdev = soc->vdev_id_map[vdev_id];
  2663. if (qdf_unlikely(!vdev))
  2664. return nbuf;
  2665. dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
  2666. QDF_MAC_ADDR_REF(nbuf->data));
  2667. /*
  2668. * Set Default Host TID value to invalid TID
  2669. * (TID override disabled)
  2670. */
  2671. msdu_info.tid = HTT_TX_EXT_TID_INVALID;
  2672. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  2673. if (qdf_unlikely(vdev->mesh_vdev)) {
  2674. qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
  2675. &msdu_info);
  2676. if (!nbuf_mesh) {
  2677. dp_verbose_debug("Extracting mesh metadata failed");
  2678. return nbuf;
  2679. }
  2680. nbuf = nbuf_mesh;
  2681. }
  2682. /*
  2683. * Get HW Queue to use for this frame.
  2684. * TCL supports upto 4 DMA rings, out of which 3 rings are
  2685. * dedicated for data and 1 for command.
  2686. * "queue_id" maps to one hardware ring.
  2687. * With each ring, we also associate a unique Tx descriptor pool
  2688. * to minimize lock contention for these resources.
  2689. */
  2690. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2691. /*
  2692. * TCL H/W supports 2 DSCP-TID mapping tables.
  2693. * Table 1 - Default DSCP-TID mapping table
  2694. * Table 2 - 1 DSCP-TID override table
  2695. *
  2696. * If we need a different DSCP-TID mapping for this vap,
  2697. * call tid_classify to extract DSCP/ToS from frame and
  2698. * map to a TID and store in msdu_info. This is later used
  2699. * to fill in TCL Input descriptor (per-packet TID override).
  2700. */
  2701. dp_tx_classify_tid(vdev, nbuf, &msdu_info);
  2702. /*
  2703. * Classify the frame and call corresponding
  2704. * "prepare" function which extracts the segment (TSO)
  2705. * and fragmentation information (for TSO , SG, ME, or Raw)
  2706. * into MSDU_INFO structure which is later used to fill
  2707. * SW and HW descriptors.
  2708. */
  2709. if (qdf_nbuf_is_tso(nbuf)) {
  2710. dp_verbose_debug("TSO frame %pK", vdev);
  2711. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  2712. qdf_nbuf_len(nbuf));
  2713. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  2714. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  2715. qdf_nbuf_len(nbuf));
  2716. return nbuf;
  2717. }
  2718. goto send_multiple;
  2719. }
  2720. /* SG */
  2721. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  2722. struct dp_tx_seg_info_s seg_info = {0};
  2723. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  2724. if (!nbuf)
  2725. return NULL;
  2726. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  2727. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  2728. qdf_nbuf_len(nbuf));
  2729. goto send_multiple;
  2730. }
  2731. if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
  2732. return NULL;
  2733. /* RAW */
  2734. if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
  2735. struct dp_tx_seg_info_s seg_info = {0};
  2736. nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
  2737. if (!nbuf)
  2738. return NULL;
  2739. dp_verbose_debug("Raw frame %pK", vdev);
  2740. goto send_multiple;
  2741. }
  2742. if (qdf_unlikely(vdev->nawds_enabled)) {
  2743. qdf_ether_header_t *eh = (qdf_ether_header_t *)
  2744. qdf_nbuf_data(nbuf);
  2745. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost))
  2746. dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf);
  2747. peer_id = DP_INVALID_PEER;
  2748. DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
  2749. 1, qdf_nbuf_len(nbuf));
  2750. }
  2751. /* Single linear frame */
  2752. /*
  2753. * If nbuf is a simple linear frame, use send_single function to
  2754. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  2755. * SRNG. There is no need to setup a MSDU extension descriptor.
  2756. */
  2757. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
  2758. return nbuf;
  2759. send_multiple:
  2760. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2761. if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
  2762. dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
  2763. return nbuf;
  2764. }
  2765. /**
  2766. * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
  2767. * case to vaoid check in perpkt path.
  2768. * @soc: DP soc handle
  2769. * @vdev_id: id of DP vdev handle
  2770. * @nbuf: skb
  2771. *
  2772. * Entry point for Core Tx layer (DP_TX) invoked from
  2773. * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
  2774. * with special condition to avoid per pkt check in dp_tx_send
  2775. *
  2776. * Return: NULL on success,
  2777. * nbuf when it fails to send
  2778. */
  2779. qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
  2780. uint8_t vdev_id, qdf_nbuf_t nbuf)
  2781. {
  2782. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2783. struct dp_vdev *vdev = NULL;
  2784. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  2785. return nbuf;
  2786. /*
  2787. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  2788. * this in per packet path.
  2789. *
  2790. * As in this path vdev memory is already protected with netdev
  2791. * tx lock
  2792. */
  2793. vdev = soc->vdev_id_map[vdev_id];
  2794. if (qdf_unlikely(!vdev))
  2795. return nbuf;
  2796. if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
  2797. == QDF_STATUS_E_FAILURE)) {
  2798. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  2799. return nbuf;
  2800. }
  2801. return dp_tx_send(soc_hdl, vdev_id, nbuf);
  2802. }
  2803. #ifdef UMAC_SUPPORT_PROXY_ARP
  2804. /**
  2805. * dp_tx_proxy_arp() - Tx proxy arp handler
  2806. * @vdev: datapath vdev handle
  2807. * @buf: sk buffer
  2808. *
  2809. * Return: status
  2810. */
  2811. static inline
  2812. int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2813. {
  2814. if (vdev->osif_proxy_arp)
  2815. return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
  2816. /*
  2817. * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
  2818. * osif_proxy_arp has a valid function pointer assigned
  2819. * to it
  2820. */
  2821. dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
  2822. return QDF_STATUS_NOT_INITIALIZED;
  2823. }
  2824. #else
  2825. /**
  2826. * dp_tx_proxy_arp() - Tx proxy arp handler
  2827. * @vdev: datapath vdev handle
  2828. * @buf: sk buffer
  2829. *
  2830. * This function always return 0 when UMAC_SUPPORT_PROXY_ARP
  2831. * is not defined.
  2832. *
  2833. * Return: status
  2834. */
  2835. static inline
  2836. int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2837. {
  2838. return QDF_STATUS_SUCCESS;
  2839. }
  2840. #endif
  2841. /**
  2842. * dp_tx_reinject_handler() - Tx Reinject Handler
  2843. * @soc: datapath soc handle
  2844. * @vdev: datapath vdev handle
  2845. * @tx_desc: software descriptor head pointer
  2846. * @status : Tx completion status from HTT descriptor
  2847. *
  2848. * This function reinjects frames back to Target.
  2849. * Todo - Host queue needs to be added
  2850. *
  2851. * Return: none
  2852. */
  2853. static
  2854. void dp_tx_reinject_handler(struct dp_soc *soc,
  2855. struct dp_vdev *vdev,
  2856. struct dp_tx_desc_s *tx_desc,
  2857. uint8_t *status)
  2858. {
  2859. struct dp_peer *peer = NULL;
  2860. uint32_t peer_id = HTT_INVALID_PEER;
  2861. qdf_nbuf_t nbuf = tx_desc->nbuf;
  2862. qdf_nbuf_t nbuf_copy = NULL;
  2863. struct dp_tx_msdu_info_s msdu_info;
  2864. #ifdef WDS_VENDOR_EXTENSION
  2865. int is_mcast = 0, is_ucast = 0;
  2866. int num_peers_3addr = 0;
  2867. qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
  2868. struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
  2869. #endif
  2870. qdf_assert(vdev);
  2871. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  2872. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2873. dp_tx_debug("Tx reinject path");
  2874. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  2875. qdf_nbuf_len(tx_desc->nbuf));
  2876. #ifdef WDS_VENDOR_EXTENSION
  2877. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  2878. is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
  2879. } else {
  2880. is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
  2881. }
  2882. is_ucast = !is_mcast;
  2883. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2884. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2885. if (peer->bss_peer)
  2886. continue;
  2887. /* Detect wds peers that use 3-addr framing for mcast.
  2888. * if there are any, the bss_peer is used to send the
  2889. * the mcast frame using 3-addr format. all wds enabled
  2890. * peers that use 4-addr framing for mcast frames will
  2891. * be duplicated and sent as 4-addr frames below.
  2892. */
  2893. if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
  2894. num_peers_3addr = 1;
  2895. break;
  2896. }
  2897. }
  2898. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2899. #endif
  2900. if (qdf_unlikely(vdev->mesh_vdev)) {
  2901. DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
  2902. } else {
  2903. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2904. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2905. if ((peer->peer_id != HTT_INVALID_PEER) &&
  2906. #ifdef WDS_VENDOR_EXTENSION
  2907. /*
  2908. * . if 3-addr STA, then send on BSS Peer
  2909. * . if Peer WDS enabled and accept 4-addr mcast,
  2910. * send mcast on that peer only
  2911. * . if Peer WDS enabled and accept 4-addr ucast,
  2912. * send ucast on that peer only
  2913. */
  2914. ((peer->bss_peer && num_peers_3addr && is_mcast) ||
  2915. (peer->wds_enabled &&
  2916. ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
  2917. (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
  2918. #else
  2919. (peer->bss_peer &&
  2920. (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
  2921. #endif
  2922. peer_id = DP_INVALID_PEER;
  2923. nbuf_copy = qdf_nbuf_copy(nbuf);
  2924. if (!nbuf_copy) {
  2925. dp_tx_debug("nbuf copy failed");
  2926. break;
  2927. }
  2928. nbuf_copy = dp_tx_send_msdu_single(vdev,
  2929. nbuf_copy,
  2930. &msdu_info,
  2931. peer_id,
  2932. NULL);
  2933. if (nbuf_copy) {
  2934. dp_tx_debug("pkt send failed");
  2935. qdf_nbuf_free(nbuf_copy);
  2936. }
  2937. }
  2938. }
  2939. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2940. }
  2941. qdf_nbuf_free(nbuf);
  2942. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2943. }
  2944. /**
  2945. * dp_tx_inspect_handler() - Tx Inspect Handler
  2946. * @soc: datapath soc handle
  2947. * @vdev: datapath vdev handle
  2948. * @tx_desc: software descriptor head pointer
  2949. * @status : Tx completion status from HTT descriptor
  2950. *
  2951. * Handles Tx frames sent back to Host for inspection
  2952. * (ProxyARP)
  2953. *
  2954. * Return: none
  2955. */
  2956. static void dp_tx_inspect_handler(struct dp_soc *soc,
  2957. struct dp_vdev *vdev,
  2958. struct dp_tx_desc_s *tx_desc,
  2959. uint8_t *status)
  2960. {
  2961. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2962. "%s Tx inspect path",
  2963. __func__);
  2964. DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
  2965. qdf_nbuf_len(tx_desc->nbuf));
  2966. DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
  2967. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2968. }
  2969. #ifdef MESH_MODE_SUPPORT
  2970. /**
  2971. * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
  2972. * in mesh meta header
  2973. * @tx_desc: software descriptor head pointer
  2974. * @ts: pointer to tx completion stats
  2975. * Return: none
  2976. */
  2977. static
  2978. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  2979. struct hal_tx_completion_status *ts)
  2980. {
  2981. struct meta_hdr_s *mhdr;
  2982. qdf_nbuf_t netbuf = tx_desc->nbuf;
  2983. if (!tx_desc->msdu_ext_desc) {
  2984. if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
  2985. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2986. "netbuf %pK offset %d",
  2987. netbuf, tx_desc->pkt_offset);
  2988. return;
  2989. }
  2990. }
  2991. if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
  2992. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2993. "netbuf %pK offset %zu", netbuf,
  2994. sizeof(struct meta_hdr_s));
  2995. return;
  2996. }
  2997. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
  2998. mhdr->rssi = ts->ack_frame_rssi;
  2999. mhdr->band = tx_desc->pdev->operating_channel.band;
  3000. mhdr->channel = tx_desc->pdev->operating_channel.num;
  3001. }
  3002. #else
  3003. static
  3004. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  3005. struct hal_tx_completion_status *ts)
  3006. {
  3007. }
  3008. #endif
  3009. #ifdef QCA_PEER_EXT_STATS
  3010. /*
  3011. * dp_tx_compute_tid_delay() - Compute per TID delay
  3012. * @stats: Per TID delay stats
  3013. * @tx_desc: Software Tx descriptor
  3014. *
  3015. * Compute the software enqueue and hw enqueue delays and
  3016. * update the respective histograms
  3017. *
  3018. * Return: void
  3019. */
  3020. static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  3021. struct dp_tx_desc_s *tx_desc)
  3022. {
  3023. struct cdp_delay_tx_stats *tx_delay = &stats->tx_delay;
  3024. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  3025. uint32_t sw_enqueue_delay, fwhw_transmit_delay;
  3026. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  3027. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  3028. timestamp_hw_enqueue = tx_desc->timestamp;
  3029. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3030. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  3031. timestamp_hw_enqueue);
  3032. /*
  3033. * Update the Tx software enqueue delay and HW enque-Completion delay.
  3034. */
  3035. dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
  3036. dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
  3037. }
  3038. /*
  3039. * dp_tx_update_peer_ext_stats() - Update the peer extended stats
  3040. * @peer: DP peer context
  3041. * @tx_desc: Tx software descriptor
  3042. * @tid: Transmission ID
  3043. * @ring_id: Rx CPU context ID/CPU_ID
  3044. *
  3045. * Update the peer extended stats. These are enhanced other
  3046. * delay stats per msdu level.
  3047. *
  3048. * Return: void
  3049. */
  3050. static void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
  3051. struct dp_tx_desc_s *tx_desc,
  3052. uint8_t tid, uint8_t ring_id)
  3053. {
  3054. struct dp_pdev *pdev = peer->vdev->pdev;
  3055. struct dp_soc *soc = NULL;
  3056. struct cdp_peer_ext_stats *pext_stats = NULL;
  3057. soc = pdev->soc;
  3058. if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
  3059. return;
  3060. pext_stats = peer->pext_stats;
  3061. qdf_assert(pext_stats);
  3062. qdf_assert(ring < CDP_MAX_TXRX_CTX);
  3063. /*
  3064. * For non-TID packets use the TID 9
  3065. */
  3066. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3067. tid = CDP_MAX_DATA_TIDS - 1;
  3068. dp_tx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
  3069. tx_desc);
  3070. }
  3071. #else
  3072. static inline void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
  3073. struct dp_tx_desc_s *tx_desc,
  3074. uint8_t tid, uint8_t ring_id)
  3075. {
  3076. }
  3077. #endif
  3078. /**
  3079. * dp_tx_compute_delay() - Compute and fill in all timestamps
  3080. * to pass in correct fields
  3081. *
  3082. * @vdev: pdev handle
  3083. * @tx_desc: tx descriptor
  3084. * @tid: tid value
  3085. * @ring_id: TCL or WBM ring number for transmit path
  3086. * Return: none
  3087. */
  3088. static void dp_tx_compute_delay(struct dp_vdev *vdev,
  3089. struct dp_tx_desc_s *tx_desc,
  3090. uint8_t tid, uint8_t ring_id)
  3091. {
  3092. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  3093. uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
  3094. if (qdf_likely(!vdev->pdev->delay_stats_flag))
  3095. return;
  3096. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  3097. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  3098. timestamp_hw_enqueue = tx_desc->timestamp;
  3099. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3100. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  3101. timestamp_hw_enqueue);
  3102. interframe_delay = (uint32_t)(timestamp_ingress -
  3103. vdev->prev_tx_enq_tstamp);
  3104. /*
  3105. * Delay in software enqueue
  3106. */
  3107. dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
  3108. CDP_DELAY_STATS_SW_ENQ, ring_id);
  3109. /*
  3110. * Delay between packet enqueued to HW and Tx completion
  3111. */
  3112. dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
  3113. CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
  3114. /*
  3115. * Update interframe delay stats calculated at hardstart receive point.
  3116. * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
  3117. * interframe delay will not be calculate correctly for 1st frame.
  3118. * On the other side, this will help in avoiding extra per packet check
  3119. * of !vdev->prev_tx_enq_tstamp.
  3120. */
  3121. dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
  3122. CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
  3123. vdev->prev_tx_enq_tstamp = timestamp_ingress;
  3124. }
  3125. #ifdef DISABLE_DP_STATS
  3126. static
  3127. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
  3128. {
  3129. }
  3130. #else
  3131. static
  3132. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
  3133. {
  3134. enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
  3135. DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
  3136. if (subtype != QDF_PROTO_INVALID)
  3137. DP_STATS_INC(peer, tx.no_ack_count[subtype], 1);
  3138. }
  3139. #endif
  3140. /**
  3141. * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
  3142. * per wbm ring
  3143. *
  3144. * @tx_desc: software descriptor head pointer
  3145. * @ts: Tx completion status
  3146. * @peer: peer handle
  3147. * @ring_id: ring number
  3148. *
  3149. * Return: None
  3150. */
  3151. static inline void
  3152. dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
  3153. struct hal_tx_completion_status *ts,
  3154. struct dp_peer *peer, uint8_t ring_id)
  3155. {
  3156. struct dp_pdev *pdev = peer->vdev->pdev;
  3157. struct dp_soc *soc = NULL;
  3158. uint8_t mcs, pkt_type;
  3159. uint8_t tid = ts->tid;
  3160. uint32_t length;
  3161. struct cdp_tid_tx_stats *tid_stats;
  3162. if (!pdev)
  3163. return;
  3164. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3165. tid = CDP_MAX_DATA_TIDS - 1;
  3166. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  3167. soc = pdev->soc;
  3168. mcs = ts->mcs;
  3169. pkt_type = ts->pkt_type;
  3170. if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
  3171. dp_err("Release source is not from TQM");
  3172. return;
  3173. }
  3174. length = qdf_nbuf_len(tx_desc->nbuf);
  3175. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
  3176. if (qdf_unlikely(pdev->delay_stats_flag))
  3177. dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
  3178. DP_STATS_INCC(peer, tx.dropped.age_out, 1,
  3179. (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
  3180. DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
  3181. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  3182. DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
  3183. (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
  3184. DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
  3185. (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
  3186. DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
  3187. (ts->status == HAL_TX_TQM_RR_FW_REASON1));
  3188. DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
  3189. (ts->status == HAL_TX_TQM_RR_FW_REASON2));
  3190. DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
  3191. (ts->status == HAL_TX_TQM_RR_FW_REASON3));
  3192. /*
  3193. * tx_failed is ideally supposed to be updated from HTT ppdu completion
  3194. * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
  3195. * are no completions for failed cases. Hence updating tx_failed from
  3196. * data path. Please note that if tx_failed is fixed to be from ppdu,
  3197. * then this has to be removed
  3198. */
  3199. peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
  3200. peer->stats.tx.dropped.fw_rem_notx +
  3201. peer->stats.tx.dropped.fw_rem_tx +
  3202. peer->stats.tx.dropped.age_out +
  3203. peer->stats.tx.dropped.fw_reason1 +
  3204. peer->stats.tx.dropped.fw_reason2 +
  3205. peer->stats.tx.dropped.fw_reason3;
  3206. if (ts->status < CDP_MAX_TX_TQM_STATUS) {
  3207. tid_stats->tqm_status_cnt[ts->status]++;
  3208. }
  3209. if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
  3210. dp_update_no_ack_stats(tx_desc->nbuf, peer);
  3211. return;
  3212. }
  3213. DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
  3214. DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
  3215. DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
  3216. /*
  3217. * Following Rate Statistics are updated from HTT PPDU events from FW.
  3218. * Return from here if HTT PPDU events are enabled.
  3219. */
  3220. if (!(soc->process_tx_status))
  3221. return;
  3222. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3223. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
  3224. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3225. ((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
  3226. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3227. ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
  3228. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3229. ((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
  3230. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3231. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
  3232. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3233. ((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
  3234. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3235. ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  3236. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3237. ((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  3238. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3239. ((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  3240. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3241. ((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  3242. DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
  3243. DP_STATS_INC(peer, tx.bw[ts->bw], 1);
  3244. DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
  3245. DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
  3246. DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
  3247. DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
  3248. DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
  3249. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  3250. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
  3251. &peer->stats, ts->peer_id,
  3252. UPDATE_PEER_STATS, pdev->pdev_id);
  3253. #endif
  3254. }
  3255. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  3256. /**
  3257. * dp_tx_flow_pool_lock() - take flow pool lock
  3258. * @soc: core txrx main context
  3259. * @tx_desc: tx desc
  3260. *
  3261. * Return: None
  3262. */
  3263. static inline
  3264. void dp_tx_flow_pool_lock(struct dp_soc *soc,
  3265. struct dp_tx_desc_s *tx_desc)
  3266. {
  3267. struct dp_tx_desc_pool_s *pool;
  3268. uint8_t desc_pool_id;
  3269. desc_pool_id = tx_desc->pool_id;
  3270. pool = &soc->tx_desc[desc_pool_id];
  3271. qdf_spin_lock_bh(&pool->flow_pool_lock);
  3272. }
  3273. /**
  3274. * dp_tx_flow_pool_unlock() - release flow pool lock
  3275. * @soc: core txrx main context
  3276. * @tx_desc: tx desc
  3277. *
  3278. * Return: None
  3279. */
  3280. static inline
  3281. void dp_tx_flow_pool_unlock(struct dp_soc *soc,
  3282. struct dp_tx_desc_s *tx_desc)
  3283. {
  3284. struct dp_tx_desc_pool_s *pool;
  3285. uint8_t desc_pool_id;
  3286. desc_pool_id = tx_desc->pool_id;
  3287. pool = &soc->tx_desc[desc_pool_id];
  3288. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  3289. }
  3290. #else
  3291. static inline
  3292. void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  3293. {
  3294. }
  3295. static inline
  3296. void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  3297. {
  3298. }
  3299. #endif
  3300. /**
  3301. * dp_tx_notify_completion() - Notify tx completion for this desc
  3302. * @soc: core txrx main context
  3303. * @vdev: datapath vdev handle
  3304. * @tx_desc: tx desc
  3305. * @netbuf: buffer
  3306. * @status: tx status
  3307. *
  3308. * Return: none
  3309. */
  3310. static inline void dp_tx_notify_completion(struct dp_soc *soc,
  3311. struct dp_vdev *vdev,
  3312. struct dp_tx_desc_s *tx_desc,
  3313. qdf_nbuf_t netbuf,
  3314. uint8_t status)
  3315. {
  3316. void *osif_dev;
  3317. ol_txrx_completion_fp tx_compl_cbk = NULL;
  3318. uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
  3319. qdf_assert(tx_desc);
  3320. dp_tx_flow_pool_lock(soc, tx_desc);
  3321. if (!vdev ||
  3322. !vdev->osif_vdev) {
  3323. dp_tx_flow_pool_unlock(soc, tx_desc);
  3324. return;
  3325. }
  3326. osif_dev = vdev->osif_vdev;
  3327. tx_compl_cbk = vdev->tx_comp;
  3328. dp_tx_flow_pool_unlock(soc, tx_desc);
  3329. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  3330. flag |= BIT(QDF_TX_RX_STATUS_OK);
  3331. if (tx_compl_cbk)
  3332. tx_compl_cbk(netbuf, osif_dev, flag);
  3333. }
  3334. /** dp_tx_sojourn_stats_process() - Collect sojourn stats
  3335. * @pdev: pdev handle
  3336. * @tid: tid value
  3337. * @txdesc_ts: timestamp from txdesc
  3338. * @ppdu_id: ppdu id
  3339. *
  3340. * Return: none
  3341. */
  3342. #ifdef FEATURE_PERPKT_INFO
  3343. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  3344. struct dp_peer *peer,
  3345. uint8_t tid,
  3346. uint64_t txdesc_ts,
  3347. uint32_t ppdu_id)
  3348. {
  3349. uint64_t delta_ms;
  3350. struct cdp_tx_sojourn_stats *sojourn_stats;
  3351. if (qdf_unlikely(pdev->enhanced_stats_en == 0))
  3352. return;
  3353. if (qdf_unlikely(tid == HTT_INVALID_TID ||
  3354. tid >= CDP_DATA_TID_MAX))
  3355. return;
  3356. if (qdf_unlikely(!pdev->sojourn_buf))
  3357. return;
  3358. sojourn_stats = (struct cdp_tx_sojourn_stats *)
  3359. qdf_nbuf_data(pdev->sojourn_buf);
  3360. sojourn_stats->cookie = (void *)peer->rdkstats_ctx;
  3361. delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
  3362. txdesc_ts;
  3363. qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
  3364. delta_ms);
  3365. sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
  3366. sojourn_stats->num_msdus[tid] = 1;
  3367. sojourn_stats->avg_sojourn_msdu[tid].internal =
  3368. peer->avg_sojourn_msdu[tid].internal;
  3369. dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
  3370. pdev->sojourn_buf, HTT_INVALID_PEER,
  3371. WDI_NO_VAL, pdev->pdev_id);
  3372. sojourn_stats->sum_sojourn_msdu[tid] = 0;
  3373. sojourn_stats->num_msdus[tid] = 0;
  3374. sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
  3375. }
  3376. #else
  3377. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  3378. struct dp_peer *peer,
  3379. uint8_t tid,
  3380. uint64_t txdesc_ts,
  3381. uint32_t ppdu_id)
  3382. {
  3383. }
  3384. #endif
  3385. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  3386. /**
  3387. * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
  3388. * @soc: dp_soc handle
  3389. * @desc: Tx Descriptor
  3390. * @ts: HAL Tx completion descriptor contents
  3391. *
  3392. * This function is used to send tx completion to packet capture
  3393. */
  3394. void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
  3395. struct dp_tx_desc_s *desc,
  3396. struct hal_tx_completion_status *ts)
  3397. {
  3398. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
  3399. desc, ts->peer_id,
  3400. WDI_NO_VAL, desc->pdev->pdev_id);
  3401. }
  3402. #endif
  3403. /**
  3404. * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
  3405. * @soc: DP Soc handle
  3406. * @tx_desc: software Tx descriptor
  3407. * @ts : Tx completion status from HAL/HTT descriptor
  3408. *
  3409. * Return: none
  3410. */
  3411. static inline void
  3412. dp_tx_comp_process_desc(struct dp_soc *soc,
  3413. struct dp_tx_desc_s *desc,
  3414. struct hal_tx_completion_status *ts,
  3415. struct dp_peer *peer)
  3416. {
  3417. uint64_t time_latency = 0;
  3418. /*
  3419. * m_copy/tx_capture modes are not supported for
  3420. * scatter gather packets
  3421. */
  3422. if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
  3423. time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
  3424. desc->timestamp);
  3425. }
  3426. dp_send_completion_to_pkt_capture(soc, desc, ts);
  3427. if (!(desc->msdu_ext_desc)) {
  3428. dp_tx_enh_unmap(soc, desc);
  3429. if (QDF_STATUS_SUCCESS ==
  3430. dp_tx_add_to_comp_queue(soc, desc, ts, peer)) {
  3431. return;
  3432. }
  3433. if (QDF_STATUS_SUCCESS ==
  3434. dp_get_completion_indication_for_stack(soc,
  3435. desc->pdev,
  3436. peer, ts,
  3437. desc->nbuf,
  3438. time_latency)) {
  3439. dp_send_completion_to_stack(soc,
  3440. desc->pdev,
  3441. ts->peer_id,
  3442. ts->ppdu_id,
  3443. desc->nbuf);
  3444. return;
  3445. }
  3446. }
  3447. desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
  3448. dp_tx_comp_free_buf(soc, desc);
  3449. }
  3450. #ifdef DISABLE_DP_STATS
  3451. /**
  3452. * dp_tx_update_connectivity_stats() - update tx connectivity stats
  3453. * @soc: core txrx main context
  3454. * @tx_desc: tx desc
  3455. * @status: tx status
  3456. *
  3457. * Return: none
  3458. */
  3459. static inline
  3460. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  3461. struct dp_vdev *vdev,
  3462. struct dp_tx_desc_s *tx_desc,
  3463. uint8_t status)
  3464. {
  3465. }
  3466. #else
  3467. static inline
  3468. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  3469. struct dp_vdev *vdev,
  3470. struct dp_tx_desc_s *tx_desc,
  3471. uint8_t status)
  3472. {
  3473. void *osif_dev;
  3474. ol_txrx_stats_rx_fp stats_cbk;
  3475. uint8_t pkt_type;
  3476. qdf_assert(tx_desc);
  3477. if (!vdev ||
  3478. !vdev->osif_vdev ||
  3479. !vdev->stats_cb)
  3480. return;
  3481. osif_dev = vdev->osif_vdev;
  3482. stats_cbk = vdev->stats_cb;
  3483. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
  3484. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  3485. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
  3486. &pkt_type);
  3487. }
  3488. #endif
  3489. /**
  3490. * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
  3491. * @soc: DP soc handle
  3492. * @tx_desc: software descriptor head pointer
  3493. * @ts: Tx completion status
  3494. * @peer: peer handle
  3495. * @ring_id: ring number
  3496. *
  3497. * Return: none
  3498. */
  3499. static inline
  3500. void dp_tx_comp_process_tx_status(struct dp_soc *soc,
  3501. struct dp_tx_desc_s *tx_desc,
  3502. struct hal_tx_completion_status *ts,
  3503. struct dp_peer *peer, uint8_t ring_id)
  3504. {
  3505. uint32_t length;
  3506. qdf_ether_header_t *eh;
  3507. struct dp_vdev *vdev = NULL;
  3508. qdf_nbuf_t nbuf = tx_desc->nbuf;
  3509. enum qdf_dp_tx_rx_status dp_status;
  3510. if (!nbuf) {
  3511. dp_info_rl("invalid tx descriptor. nbuf NULL");
  3512. goto out;
  3513. }
  3514. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  3515. length = qdf_nbuf_len(nbuf);
  3516. dp_status = dp_tx_hw_to_qdf(ts->status);
  3517. DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
  3518. QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
  3519. QDF_TRACE_DEFAULT_PDEV_ID,
  3520. qdf_nbuf_data_addr(nbuf),
  3521. sizeof(qdf_nbuf_data(nbuf)),
  3522. tx_desc->id, ts->status, dp_status));
  3523. dp_tx_comp_debug("-------------------- \n"
  3524. "Tx Completion Stats: \n"
  3525. "-------------------- \n"
  3526. "ack_frame_rssi = %d \n"
  3527. "first_msdu = %d \n"
  3528. "last_msdu = %d \n"
  3529. "msdu_part_of_amsdu = %d \n"
  3530. "rate_stats valid = %d \n"
  3531. "bw = %d \n"
  3532. "pkt_type = %d \n"
  3533. "stbc = %d \n"
  3534. "ldpc = %d \n"
  3535. "sgi = %d \n"
  3536. "mcs = %d \n"
  3537. "ofdma = %d \n"
  3538. "tones_in_ru = %d \n"
  3539. "tsf = %d \n"
  3540. "ppdu_id = %d \n"
  3541. "transmit_cnt = %d \n"
  3542. "tid = %d \n"
  3543. "peer_id = %d\n",
  3544. ts->ack_frame_rssi, ts->first_msdu,
  3545. ts->last_msdu, ts->msdu_part_of_amsdu,
  3546. ts->valid, ts->bw, ts->pkt_type, ts->stbc,
  3547. ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
  3548. ts->tones_in_ru, ts->tsf, ts->ppdu_id,
  3549. ts->transmit_cnt, ts->tid, ts->peer_id);
  3550. /* Update SoC level stats */
  3551. DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
  3552. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  3553. if (!peer) {
  3554. dp_info_rl("peer is null or deletion in progress");
  3555. DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
  3556. goto out;
  3557. }
  3558. vdev = peer->vdev;
  3559. dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
  3560. /* Update per-packet stats for mesh mode */
  3561. if (qdf_unlikely(vdev->mesh_vdev) &&
  3562. !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
  3563. dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
  3564. /* Update peer level stats */
  3565. if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
  3566. if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
  3567. DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
  3568. if ((peer->vdev->tx_encap_type ==
  3569. htt_cmn_pkt_type_ethernet) &&
  3570. QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  3571. DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
  3572. }
  3573. }
  3574. } else {
  3575. DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
  3576. if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
  3577. DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
  3578. if (qdf_unlikely(peer->in_twt)) {
  3579. DP_STATS_INC_PKT(peer,
  3580. tx.tx_success_twt,
  3581. 1, length);
  3582. }
  3583. }
  3584. }
  3585. dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
  3586. dp_tx_update_peer_ext_stats(peer, tx_desc, ts->tid, ring_id);
  3587. #ifdef QCA_SUPPORT_RDK_STATS
  3588. if (soc->rdkstats_enabled)
  3589. dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
  3590. tx_desc->timestamp,
  3591. ts->ppdu_id);
  3592. #endif
  3593. out:
  3594. return;
  3595. }
  3596. /**
  3597. * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
  3598. * @soc: core txrx main context
  3599. * @comp_head: software descriptor head pointer
  3600. * @ring_id: ring number
  3601. *
  3602. * This function will process batch of descriptors reaped by dp_tx_comp_handler
  3603. * and release the software descriptors after processing is complete
  3604. *
  3605. * Return: none
  3606. */
  3607. static void
  3608. dp_tx_comp_process_desc_list(struct dp_soc *soc,
  3609. struct dp_tx_desc_s *comp_head, uint8_t ring_id)
  3610. {
  3611. struct dp_tx_desc_s *desc;
  3612. struct dp_tx_desc_s *next;
  3613. struct hal_tx_completion_status ts;
  3614. struct dp_peer *peer = NULL;
  3615. uint16_t peer_id = DP_INVALID_PEER;
  3616. qdf_nbuf_t netbuf;
  3617. desc = comp_head;
  3618. while (desc) {
  3619. if (peer_id != desc->peer_id) {
  3620. if (peer)
  3621. dp_peer_unref_delete(peer,
  3622. DP_MOD_ID_TX_COMP);
  3623. peer_id = desc->peer_id;
  3624. peer = dp_peer_get_ref_by_id(soc, peer_id,
  3625. DP_MOD_ID_TX_COMP);
  3626. }
  3627. if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
  3628. struct dp_pdev *pdev = desc->pdev;
  3629. if (qdf_likely(peer)) {
  3630. /*
  3631. * Increment peer statistics
  3632. * Minimal statistics update done here
  3633. */
  3634. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1,
  3635. desc->length);
  3636. if (desc->tx_status !=
  3637. HAL_TX_TQM_RR_FRAME_ACKED)
  3638. DP_STATS_INC(peer, tx.tx_failed, 1);
  3639. }
  3640. qdf_assert(pdev);
  3641. dp_tx_outstanding_dec(pdev);
  3642. /*
  3643. * Calling a QDF WRAPPER here is creating signifcant
  3644. * performance impact so avoided the wrapper call here
  3645. */
  3646. next = desc->next;
  3647. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
  3648. desc->id, DP_TX_COMP_UNMAP);
  3649. qdf_mem_unmap_nbytes_single(soc->osdev,
  3650. desc->dma_addr,
  3651. QDF_DMA_TO_DEVICE,
  3652. desc->length);
  3653. qdf_nbuf_free(desc->nbuf);
  3654. dp_tx_desc_free(soc, desc, desc->pool_id);
  3655. desc = next;
  3656. continue;
  3657. }
  3658. hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
  3659. dp_tx_comp_process_tx_status(soc, desc, &ts, peer, ring_id);
  3660. netbuf = desc->nbuf;
  3661. /* check tx complete notification */
  3662. if (peer && qdf_nbuf_tx_notify_comp_get(netbuf))
  3663. dp_tx_notify_completion(soc, peer->vdev, desc,
  3664. netbuf, ts.status);
  3665. dp_tx_comp_process_desc(soc, desc, &ts, peer);
  3666. next = desc->next;
  3667. dp_tx_desc_release(desc, desc->pool_id);
  3668. desc = next;
  3669. }
  3670. if (peer)
  3671. dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
  3672. }
  3673. /**
  3674. * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
  3675. * @soc: Handle to DP soc structure
  3676. * @tx_desc: software descriptor head pointer
  3677. * @status : Tx completion status from HTT descriptor
  3678. * @ring_id: ring number
  3679. *
  3680. * This function will process HTT Tx indication messages from Target
  3681. *
  3682. * Return: none
  3683. */
  3684. static
  3685. void dp_tx_process_htt_completion(struct dp_soc *soc,
  3686. struct dp_tx_desc_s *tx_desc, uint8_t *status,
  3687. uint8_t ring_id)
  3688. {
  3689. uint8_t tx_status;
  3690. struct dp_pdev *pdev;
  3691. struct dp_vdev *vdev;
  3692. struct hal_tx_completion_status ts = {0};
  3693. uint32_t *htt_desc = (uint32_t *)status;
  3694. struct dp_peer *peer;
  3695. struct cdp_tid_tx_stats *tid_stats = NULL;
  3696. struct htt_soc *htt_handle;
  3697. uint8_t vdev_id;
  3698. tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
  3699. htt_handle = (struct htt_soc *)soc->htt_handle;
  3700. htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
  3701. /*
  3702. * There can be scenario where WBM consuming descriptor enqueued
  3703. * from TQM2WBM first and TQM completion can happen before MEC
  3704. * notification comes from FW2WBM. Avoid access any field of tx
  3705. * descriptor in case of MEC notify.
  3706. */
  3707. if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY) {
  3708. /*
  3709. * Get vdev id from HTT status word in case of MEC
  3710. * notification
  3711. */
  3712. vdev_id = HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(htt_desc[3]);
  3713. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  3714. return;
  3715. vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  3716. DP_MOD_ID_HTT_COMP);
  3717. if (!vdev)
  3718. return;
  3719. dp_tx_mec_handler(vdev, status);
  3720. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
  3721. return;
  3722. }
  3723. /*
  3724. * If the descriptor is already freed in vdev_detach,
  3725. * continue to next descriptor
  3726. */
  3727. if ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) && !tx_desc->flags) {
  3728. dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d", tx_desc->id);
  3729. return;
  3730. }
  3731. pdev = tx_desc->pdev;
  3732. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  3733. dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
  3734. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  3735. dp_tx_comp_free_buf(soc, tx_desc);
  3736. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3737. return;
  3738. }
  3739. qdf_assert(tx_desc->pdev);
  3740. vdev_id = tx_desc->vdev_id;
  3741. vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  3742. DP_MOD_ID_HTT_COMP);
  3743. if (!vdev)
  3744. return;
  3745. switch (tx_status) {
  3746. case HTT_TX_FW2WBM_TX_STATUS_OK:
  3747. case HTT_TX_FW2WBM_TX_STATUS_DROP:
  3748. case HTT_TX_FW2WBM_TX_STATUS_TTL:
  3749. {
  3750. uint8_t tid;
  3751. if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
  3752. ts.peer_id =
  3753. HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
  3754. htt_desc[2]);
  3755. ts.tid =
  3756. HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
  3757. htt_desc[2]);
  3758. } else {
  3759. ts.peer_id = HTT_INVALID_PEER;
  3760. ts.tid = HTT_INVALID_TID;
  3761. }
  3762. ts.ppdu_id =
  3763. HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
  3764. htt_desc[1]);
  3765. ts.ack_frame_rssi =
  3766. HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
  3767. htt_desc[1]);
  3768. ts.tsf = htt_desc[3];
  3769. ts.first_msdu = 1;
  3770. ts.last_msdu = 1;
  3771. tid = ts.tid;
  3772. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3773. tid = CDP_MAX_DATA_TIDS - 1;
  3774. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  3775. if (qdf_unlikely(pdev->delay_stats_flag))
  3776. dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
  3777. if (tx_status < CDP_MAX_TX_HTT_STATUS) {
  3778. tid_stats->htt_status_cnt[tx_status]++;
  3779. }
  3780. peer = dp_peer_get_ref_by_id(soc, ts.peer_id,
  3781. DP_MOD_ID_HTT_COMP);
  3782. dp_tx_comp_process_tx_status(soc, tx_desc, &ts, peer, ring_id);
  3783. dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
  3784. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3785. if (qdf_likely(peer))
  3786. dp_peer_unref_delete(peer, DP_MOD_ID_HTT_COMP);
  3787. break;
  3788. }
  3789. case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
  3790. {
  3791. dp_tx_reinject_handler(soc, vdev, tx_desc, status);
  3792. break;
  3793. }
  3794. case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
  3795. {
  3796. dp_tx_inspect_handler(soc, vdev, tx_desc, status);
  3797. break;
  3798. }
  3799. default:
  3800. dp_tx_comp_debug("Invalid HTT tx_status %d\n",
  3801. tx_status);
  3802. break;
  3803. }
  3804. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
  3805. }
  3806. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  3807. static inline
  3808. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
  3809. {
  3810. bool limit_hit = false;
  3811. struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
  3812. limit_hit =
  3813. (num_reaped >= cfg->tx_comp_loop_pkt_limit) ? true : false;
  3814. if (limit_hit)
  3815. DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
  3816. return limit_hit;
  3817. }
  3818. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  3819. {
  3820. return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
  3821. }
  3822. #else
  3823. static inline
  3824. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
  3825. {
  3826. return false;
  3827. }
  3828. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  3829. {
  3830. return false;
  3831. }
  3832. #endif
  3833. uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  3834. hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
  3835. uint32_t quota)
  3836. {
  3837. void *tx_comp_hal_desc;
  3838. uint8_t buffer_src;
  3839. struct dp_tx_desc_s *tx_desc = NULL;
  3840. struct dp_tx_desc_s *head_desc = NULL;
  3841. struct dp_tx_desc_s *tail_desc = NULL;
  3842. uint32_t num_processed = 0;
  3843. uint32_t count;
  3844. uint32_t num_avail_for_reap = 0;
  3845. bool force_break = false;
  3846. DP_HIST_INIT();
  3847. more_data:
  3848. /* Re-initialize local variables to be re-used */
  3849. head_desc = NULL;
  3850. tail_desc = NULL;
  3851. count = 0;
  3852. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  3853. dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
  3854. return 0;
  3855. }
  3856. num_avail_for_reap = hal_srng_dst_num_valid(soc->hal_soc, hal_ring_hdl, 0);
  3857. if (num_avail_for_reap >= quota)
  3858. num_avail_for_reap = quota;
  3859. dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
  3860. /* Find head descriptor from completion ring */
  3861. while (qdf_likely(num_avail_for_reap--)) {
  3862. tx_comp_hal_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
  3863. if (qdf_unlikely(!tx_comp_hal_desc))
  3864. break;
  3865. buffer_src = hal_tx_comp_get_buffer_source(soc->hal_soc,
  3866. tx_comp_hal_desc);
  3867. /* If this buffer was not released by TQM or FW, then it is not
  3868. * Tx completion indication, assert */
  3869. if (qdf_unlikely(buffer_src !=
  3870. HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
  3871. (qdf_unlikely(buffer_src !=
  3872. HAL_TX_COMP_RELEASE_SOURCE_FW))) {
  3873. uint8_t wbm_internal_error;
  3874. dp_err_rl(
  3875. "Tx comp release_src != TQM | FW but from %d",
  3876. buffer_src);
  3877. hal_dump_comp_desc(tx_comp_hal_desc);
  3878. DP_STATS_INC(soc, tx.invalid_release_source, 1);
  3879. /* When WBM sees NULL buffer_addr_info in any of
  3880. * ingress rings it sends an error indication,
  3881. * with wbm_internal_error=1, to a specific ring.
  3882. * The WBM2SW ring used to indicate these errors is
  3883. * fixed in HW, and that ring is being used as Tx
  3884. * completion ring. These errors are not related to
  3885. * Tx completions, and should just be ignored
  3886. */
  3887. wbm_internal_error = hal_get_wbm_internal_error(
  3888. soc->hal_soc,
  3889. tx_comp_hal_desc);
  3890. if (wbm_internal_error) {
  3891. dp_err_rl("Tx comp wbm_internal_error!!");
  3892. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
  3893. if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
  3894. buffer_src)
  3895. dp_handle_wbm_internal_error(
  3896. soc,
  3897. tx_comp_hal_desc,
  3898. hal_tx_comp_get_buffer_type(
  3899. tx_comp_hal_desc));
  3900. } else {
  3901. dp_err_rl("Tx comp wbm_internal_error false");
  3902. DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
  3903. }
  3904. continue;
  3905. }
  3906. soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
  3907. tx_comp_hal_desc,
  3908. &tx_desc);
  3909. if (!tx_desc) {
  3910. dp_err("unable to retrieve tx_desc!");
  3911. QDF_BUG(0);
  3912. continue;
  3913. }
  3914. tx_desc->buffer_src = buffer_src;
  3915. /*
  3916. * If the release source is FW, process the HTT status
  3917. */
  3918. if (qdf_unlikely(buffer_src ==
  3919. HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  3920. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  3921. hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
  3922. htt_tx_status);
  3923. dp_tx_process_htt_completion(soc, tx_desc,
  3924. htt_tx_status, ring_id);
  3925. } else {
  3926. tx_desc->peer_id =
  3927. hal_tx_comp_get_peer_id(tx_comp_hal_desc);
  3928. tx_desc->tx_status =
  3929. hal_tx_comp_get_tx_status(tx_comp_hal_desc);
  3930. tx_desc->buffer_src = buffer_src;
  3931. /*
  3932. * If the fast completion mode is enabled extended
  3933. * metadata from descriptor is not copied
  3934. */
  3935. if (qdf_likely(tx_desc->flags &
  3936. DP_TX_DESC_FLAG_SIMPLE))
  3937. goto add_to_pool;
  3938. /*
  3939. * If the descriptor is already freed in vdev_detach,
  3940. * continue to next descriptor
  3941. */
  3942. if (qdf_unlikely
  3943. ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
  3944. !tx_desc->flags)) {
  3945. dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
  3946. tx_desc->id);
  3947. continue;
  3948. }
  3949. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  3950. dp_tx_comp_info_rl("pdev in down state %d",
  3951. tx_desc->id);
  3952. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  3953. dp_tx_comp_free_buf(soc, tx_desc);
  3954. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3955. goto next_desc;
  3956. }
  3957. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  3958. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  3959. dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
  3960. tx_desc->flags, tx_desc->id);
  3961. qdf_assert_always(0);
  3962. }
  3963. /* Collect hw completion contents */
  3964. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  3965. &tx_desc->comp, 1);
  3966. add_to_pool:
  3967. DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
  3968. /* First ring descriptor on the cycle */
  3969. if (!head_desc) {
  3970. head_desc = tx_desc;
  3971. tail_desc = tx_desc;
  3972. }
  3973. tail_desc->next = tx_desc;
  3974. tx_desc->next = NULL;
  3975. tail_desc = tx_desc;
  3976. }
  3977. next_desc:
  3978. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  3979. /*
  3980. * Processed packet count is more than given quota
  3981. * stop to processing
  3982. */
  3983. count++;
  3984. if (dp_tx_comp_loop_pkt_limit_hit(soc, count))
  3985. break;
  3986. }
  3987. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  3988. /* Process the reaped descriptors */
  3989. if (head_desc)
  3990. dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
  3991. if (dp_tx_comp_enable_eol_data_check(soc)) {
  3992. if (num_processed >= quota)
  3993. force_break = true;
  3994. if (!force_break &&
  3995. hal_srng_dst_peek_sync_locked(soc->hal_soc,
  3996. hal_ring_hdl)) {
  3997. DP_STATS_INC(soc, tx.hp_oos2, 1);
  3998. if (!hif_exec_should_yield(soc->hif_handle,
  3999. int_ctx->dp_intr_id))
  4000. goto more_data;
  4001. }
  4002. }
  4003. DP_TX_HIST_STATS_PER_PDEV();
  4004. return num_processed;
  4005. }
  4006. #ifdef FEATURE_WLAN_TDLS
  4007. qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  4008. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
  4009. {
  4010. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4011. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  4012. DP_MOD_ID_TDLS);
  4013. if (!vdev) {
  4014. dp_err("vdev handle for id %d is NULL", vdev_id);
  4015. return NULL;
  4016. }
  4017. if (tx_spec & OL_TX_SPEC_NO_FREE)
  4018. vdev->is_tdls_frame = true;
  4019. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  4020. return dp_tx_send(soc_hdl, vdev_id, msdu_list);
  4021. }
  4022. #endif
  4023. static void dp_tx_vdev_update_feature_flags(struct dp_vdev *vdev)
  4024. {
  4025. struct wlan_cfg_dp_soc_ctxt *cfg;
  4026. struct dp_soc *soc;
  4027. soc = vdev->pdev->soc;
  4028. if (!soc)
  4029. return;
  4030. cfg = soc->wlan_cfg_ctx;
  4031. if (!cfg)
  4032. return;
  4033. if (vdev->opmode == wlan_op_mode_ndi)
  4034. vdev->csum_enabled = wlan_cfg_get_nan_checksum_offload(cfg);
  4035. else if ((vdev->subtype == wlan_op_subtype_p2p_device) ||
  4036. (vdev->subtype == wlan_op_subtype_p2p_cli) ||
  4037. (vdev->subtype == wlan_op_subtype_p2p_go))
  4038. vdev->csum_enabled = wlan_cfg_get_p2p_checksum_offload(cfg);
  4039. else
  4040. vdev->csum_enabled = wlan_cfg_get_checksum_offload(cfg);
  4041. }
  4042. /**
  4043. * dp_tx_vdev_attach() - attach vdev to dp tx
  4044. * @vdev: virtual device instance
  4045. *
  4046. * Return: QDF_STATUS_SUCCESS: success
  4047. * QDF_STATUS_E_RESOURCES: Error return
  4048. */
  4049. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
  4050. {
  4051. int pdev_id;
  4052. /*
  4053. * Fill HTT TCL Metadata with Vdev ID and MAC ID
  4054. */
  4055. HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
  4056. HTT_TCL_METADATA_TYPE_VDEV_BASED);
  4057. HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
  4058. vdev->vdev_id);
  4059. pdev_id =
  4060. dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
  4061. vdev->pdev->pdev_id);
  4062. HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
  4063. /*
  4064. * Set HTT Extension Valid bit to 0 by default
  4065. */
  4066. HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
  4067. dp_tx_vdev_update_search_flags(vdev);
  4068. dp_tx_vdev_update_feature_flags(vdev);
  4069. return QDF_STATUS_SUCCESS;
  4070. }
  4071. #ifndef FEATURE_WDS
  4072. static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
  4073. {
  4074. return false;
  4075. }
  4076. #endif
  4077. /**
  4078. * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
  4079. * @vdev: virtual device instance
  4080. *
  4081. * Return: void
  4082. *
  4083. */
  4084. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
  4085. {
  4086. struct dp_soc *soc = vdev->pdev->soc;
  4087. /*
  4088. * Enable both AddrY (SA based search) and AddrX (Da based search)
  4089. * for TDLS link
  4090. *
  4091. * Enable AddrY (SA based search) only for non-WDS STA and
  4092. * ProxySTA VAP (in HKv1) modes.
  4093. *
  4094. * In all other VAP modes, only DA based search should be
  4095. * enabled
  4096. */
  4097. if (vdev->opmode == wlan_op_mode_sta &&
  4098. vdev->tdls_link_connected)
  4099. vdev->hal_desc_addr_search_flags =
  4100. (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
  4101. else if ((vdev->opmode == wlan_op_mode_sta) &&
  4102. !dp_tx_da_search_override(vdev))
  4103. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
  4104. else
  4105. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
  4106. /* Set search type only when peer map v2 messaging is enabled
  4107. * as we will have the search index (AST hash) only when v2 is
  4108. * enabled
  4109. */
  4110. if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
  4111. vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
  4112. else
  4113. vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
  4114. }
  4115. static inline bool
  4116. dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
  4117. struct dp_vdev *vdev,
  4118. struct dp_tx_desc_s *tx_desc)
  4119. {
  4120. if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
  4121. return false;
  4122. /*
  4123. * if vdev is given, then only check whether desc
  4124. * vdev match. if vdev is NULL, then check whether
  4125. * desc pdev match.
  4126. */
  4127. return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
  4128. (tx_desc->pdev == pdev);
  4129. }
  4130. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  4131. /**
  4132. * dp_tx_desc_flush() - release resources associated
  4133. * to TX Desc
  4134. *
  4135. * @dp_pdev: Handle to DP pdev structure
  4136. * @vdev: virtual device instance
  4137. * NULL: no specific Vdev is required and check all allcated TX desc
  4138. * on this pdev.
  4139. * Non-NULL: only check the allocated TX Desc associated to this Vdev.
  4140. *
  4141. * @force_free:
  4142. * true: flush the TX desc.
  4143. * false: only reset the Vdev in each allocated TX desc
  4144. * that associated to current Vdev.
  4145. *
  4146. * This function will go through the TX desc pool to flush
  4147. * the outstanding TX data or reset Vdev to NULL in associated TX
  4148. * Desc.
  4149. */
  4150. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  4151. bool force_free)
  4152. {
  4153. uint8_t i;
  4154. uint32_t j;
  4155. uint32_t num_desc, page_id, offset;
  4156. uint16_t num_desc_per_page;
  4157. struct dp_soc *soc = pdev->soc;
  4158. struct dp_tx_desc_s *tx_desc = NULL;
  4159. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  4160. if (!vdev && !force_free) {
  4161. dp_err("Reset TX desc vdev, Vdev param is required!");
  4162. return;
  4163. }
  4164. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  4165. tx_desc_pool = &soc->tx_desc[i];
  4166. if (!(tx_desc_pool->pool_size) ||
  4167. IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
  4168. !(tx_desc_pool->desc_pages.cacheable_pages))
  4169. continue;
  4170. /*
  4171. * Add flow pool lock protection in case pool is freed
  4172. * due to all tx_desc is recycled when handle TX completion.
  4173. * this is not necessary when do force flush as:
  4174. * a. double lock will happen if dp_tx_desc_release is
  4175. * also trying to acquire it.
  4176. * b. dp interrupt has been disabled before do force TX desc
  4177. * flush in dp_pdev_deinit().
  4178. */
  4179. if (!force_free)
  4180. qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
  4181. num_desc = tx_desc_pool->pool_size;
  4182. num_desc_per_page =
  4183. tx_desc_pool->desc_pages.num_element_per_page;
  4184. for (j = 0; j < num_desc; j++) {
  4185. page_id = j / num_desc_per_page;
  4186. offset = j % num_desc_per_page;
  4187. if (qdf_unlikely(!(tx_desc_pool->
  4188. desc_pages.cacheable_pages)))
  4189. break;
  4190. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  4191. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  4192. /*
  4193. * Free TX desc if force free is
  4194. * required, otherwise only reset vdev
  4195. * in this TX desc.
  4196. */
  4197. if (force_free) {
  4198. tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
  4199. dp_tx_comp_free_buf(soc, tx_desc);
  4200. dp_tx_desc_release(tx_desc, i);
  4201. } else {
  4202. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  4203. }
  4204. }
  4205. }
  4206. if (!force_free)
  4207. qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
  4208. }
  4209. }
  4210. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  4211. /**
  4212. * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
  4213. *
  4214. * @soc: Handle to DP soc structure
  4215. * @tx_desc: pointer of one TX desc
  4216. * @desc_pool_id: TX Desc pool id
  4217. */
  4218. static inline void
  4219. dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  4220. uint8_t desc_pool_id)
  4221. {
  4222. TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
  4223. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  4224. TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
  4225. }
  4226. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  4227. bool force_free)
  4228. {
  4229. uint8_t i, num_pool;
  4230. uint32_t j;
  4231. uint32_t num_desc, page_id, offset;
  4232. uint16_t num_desc_per_page;
  4233. struct dp_soc *soc = pdev->soc;
  4234. struct dp_tx_desc_s *tx_desc = NULL;
  4235. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  4236. if (!vdev && !force_free) {
  4237. dp_err("Reset TX desc vdev, Vdev param is required!");
  4238. return;
  4239. }
  4240. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4241. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4242. for (i = 0; i < num_pool; i++) {
  4243. tx_desc_pool = &soc->tx_desc[i];
  4244. if (!tx_desc_pool->desc_pages.cacheable_pages)
  4245. continue;
  4246. num_desc_per_page =
  4247. tx_desc_pool->desc_pages.num_element_per_page;
  4248. for (j = 0; j < num_desc; j++) {
  4249. page_id = j / num_desc_per_page;
  4250. offset = j % num_desc_per_page;
  4251. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  4252. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  4253. if (force_free) {
  4254. tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
  4255. dp_tx_comp_free_buf(soc, tx_desc);
  4256. dp_tx_desc_release(tx_desc, i);
  4257. } else {
  4258. dp_tx_desc_reset_vdev(soc, tx_desc,
  4259. i);
  4260. }
  4261. }
  4262. }
  4263. }
  4264. }
  4265. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  4266. /**
  4267. * dp_tx_vdev_detach() - detach vdev from dp tx
  4268. * @vdev: virtual device instance
  4269. *
  4270. * Return: QDF_STATUS_SUCCESS: success
  4271. * QDF_STATUS_E_RESOURCES: Error return
  4272. */
  4273. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
  4274. {
  4275. struct dp_pdev *pdev = vdev->pdev;
  4276. /* Reset TX desc associated to this Vdev as NULL */
  4277. dp_tx_desc_flush(pdev, vdev, false);
  4278. return QDF_STATUS_SUCCESS;
  4279. }
  4280. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  4281. /* Pools will be allocated dynamically */
  4282. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  4283. int num_desc)
  4284. {
  4285. uint8_t i;
  4286. for (i = 0; i < num_pool; i++) {
  4287. qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
  4288. soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
  4289. }
  4290. return QDF_STATUS_SUCCESS;
  4291. }
  4292. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  4293. int num_desc)
  4294. {
  4295. return QDF_STATUS_SUCCESS;
  4296. }
  4297. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  4298. {
  4299. }
  4300. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  4301. {
  4302. uint8_t i;
  4303. for (i = 0; i < num_pool; i++)
  4304. qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
  4305. }
  4306. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  4307. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  4308. int num_desc)
  4309. {
  4310. uint8_t i, count;
  4311. /* Allocate software Tx descriptor pools */
  4312. for (i = 0; i < num_pool; i++) {
  4313. if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
  4314. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4315. FL("Tx Desc Pool alloc %d failed %pK"),
  4316. i, soc);
  4317. goto fail;
  4318. }
  4319. }
  4320. return QDF_STATUS_SUCCESS;
  4321. fail:
  4322. for (count = 0; count < i; count++)
  4323. dp_tx_desc_pool_free(soc, count);
  4324. return QDF_STATUS_E_NOMEM;
  4325. }
  4326. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  4327. int num_desc)
  4328. {
  4329. uint8_t i;
  4330. for (i = 0; i < num_pool; i++) {
  4331. if (dp_tx_desc_pool_init(soc, i, num_desc)) {
  4332. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4333. FL("Tx Desc Pool init %d failed %pK"),
  4334. i, soc);
  4335. return QDF_STATUS_E_NOMEM;
  4336. }
  4337. }
  4338. return QDF_STATUS_SUCCESS;
  4339. }
  4340. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  4341. {
  4342. uint8_t i;
  4343. for (i = 0; i < num_pool; i++)
  4344. dp_tx_desc_pool_deinit(soc, i);
  4345. }
  4346. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  4347. {
  4348. uint8_t i;
  4349. for (i = 0; i < num_pool; i++)
  4350. dp_tx_desc_pool_free(soc, i);
  4351. }
  4352. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  4353. /**
  4354. * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
  4355. * @soc: core txrx main context
  4356. * @num_pool: number of pools
  4357. *
  4358. */
  4359. void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  4360. {
  4361. dp_tx_tso_desc_pool_deinit(soc, num_pool);
  4362. dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
  4363. }
  4364. /**
  4365. * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
  4366. * @soc: core txrx main context
  4367. * @num_pool: number of pools
  4368. *
  4369. */
  4370. void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  4371. {
  4372. dp_tx_tso_desc_pool_free(soc, num_pool);
  4373. dp_tx_tso_num_seg_pool_free(soc, num_pool);
  4374. }
  4375. /**
  4376. * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
  4377. * @soc: core txrx main context
  4378. *
  4379. * This function frees all tx related descriptors as below
  4380. * 1. Regular TX descriptors (static pools)
  4381. * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
  4382. * 3. TSO descriptors
  4383. *
  4384. */
  4385. void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
  4386. {
  4387. uint8_t num_pool;
  4388. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4389. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  4390. dp_tx_ext_desc_pool_free(soc, num_pool);
  4391. dp_tx_delete_static_pools(soc, num_pool);
  4392. }
  4393. /**
  4394. * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
  4395. * @soc: core txrx main context
  4396. *
  4397. * This function de-initializes all tx related descriptors as below
  4398. * 1. Regular TX descriptors (static pools)
  4399. * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
  4400. * 3. TSO descriptors
  4401. *
  4402. */
  4403. void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
  4404. {
  4405. uint8_t num_pool;
  4406. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4407. dp_tx_flow_control_deinit(soc);
  4408. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  4409. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  4410. dp_tx_deinit_static_pools(soc, num_pool);
  4411. }
  4412. /**
  4413. * dp_tso_attach() - TSO attach handler
  4414. * @txrx_soc: Opaque Dp handle
  4415. *
  4416. * Reserve TSO descriptor buffers
  4417. *
  4418. * Return: QDF_STATUS_E_FAILURE on failure or
  4419. * QDF_STATUS_SUCCESS on success
  4420. */
  4421. QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
  4422. uint8_t num_pool,
  4423. uint16_t num_desc)
  4424. {
  4425. if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
  4426. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  4427. return QDF_STATUS_E_FAILURE;
  4428. }
  4429. if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
  4430. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  4431. num_pool, soc);
  4432. return QDF_STATUS_E_FAILURE;
  4433. }
  4434. return QDF_STATUS_SUCCESS;
  4435. }
  4436. /**
  4437. * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
  4438. * @soc: DP soc handle
  4439. * @num_pool: Number of pools
  4440. * @num_desc: Number of descriptors
  4441. *
  4442. * Initialize TSO descriptor pools
  4443. *
  4444. * Return: QDF_STATUS_E_FAILURE on failure or
  4445. * QDF_STATUS_SUCCESS on success
  4446. */
  4447. QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
  4448. uint8_t num_pool,
  4449. uint16_t num_desc)
  4450. {
  4451. if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
  4452. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  4453. return QDF_STATUS_E_FAILURE;
  4454. }
  4455. if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
  4456. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  4457. num_pool, soc);
  4458. return QDF_STATUS_E_FAILURE;
  4459. }
  4460. return QDF_STATUS_SUCCESS;
  4461. }
  4462. /**
  4463. * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
  4464. * @soc: core txrx main context
  4465. *
  4466. * This function allocates memory for following descriptor pools
  4467. * 1. regular sw tx descriptor pools (static pools)
  4468. * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
  4469. * 3. TSO descriptor pools
  4470. *
  4471. * Return: QDF_STATUS_SUCCESS: success
  4472. * QDF_STATUS_E_RESOURCES: Error return
  4473. */
  4474. QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
  4475. {
  4476. uint8_t num_pool;
  4477. uint32_t num_desc;
  4478. uint32_t num_ext_desc;
  4479. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4480. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4481. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  4482. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  4483. "%s Tx Desc Alloc num_pool = %d, descs = %d",
  4484. __func__, num_pool, num_desc);
  4485. if ((num_pool > MAX_TXDESC_POOLS) ||
  4486. (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
  4487. goto fail1;
  4488. if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
  4489. goto fail1;
  4490. if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
  4491. goto fail2;
  4492. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  4493. return QDF_STATUS_SUCCESS;
  4494. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  4495. goto fail3;
  4496. return QDF_STATUS_SUCCESS;
  4497. fail3:
  4498. dp_tx_ext_desc_pool_free(soc, num_pool);
  4499. fail2:
  4500. dp_tx_delete_static_pools(soc, num_pool);
  4501. fail1:
  4502. return QDF_STATUS_E_RESOURCES;
  4503. }
  4504. /**
  4505. * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
  4506. * @soc: core txrx main context
  4507. *
  4508. * This function initializes the following TX descriptor pools
  4509. * 1. regular sw tx descriptor pools (static pools)
  4510. * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
  4511. * 3. TSO descriptor pools
  4512. *
  4513. * Return: QDF_STATUS_SUCCESS: success
  4514. * QDF_STATUS_E_RESOURCES: Error return
  4515. */
  4516. QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
  4517. {
  4518. uint8_t num_pool;
  4519. uint32_t num_desc;
  4520. uint32_t num_ext_desc;
  4521. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4522. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4523. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  4524. if (dp_tx_init_static_pools(soc, num_pool, num_desc))
  4525. goto fail1;
  4526. if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
  4527. goto fail2;
  4528. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  4529. return QDF_STATUS_SUCCESS;
  4530. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  4531. goto fail3;
  4532. dp_tx_flow_control_init(soc);
  4533. soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
  4534. return QDF_STATUS_SUCCESS;
  4535. fail3:
  4536. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  4537. fail2:
  4538. dp_tx_deinit_static_pools(soc, num_pool);
  4539. fail1:
  4540. return QDF_STATUS_E_RESOURCES;
  4541. }
  4542. /**
  4543. * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
  4544. * @txrx_soc: dp soc handle
  4545. *
  4546. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  4547. * QDF_STATUS_E_FAILURE
  4548. */
  4549. QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
  4550. {
  4551. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4552. uint8_t num_pool;
  4553. uint32_t num_desc;
  4554. uint32_t num_ext_desc;
  4555. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4556. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4557. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  4558. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  4559. return QDF_STATUS_E_FAILURE;
  4560. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  4561. return QDF_STATUS_E_FAILURE;
  4562. return QDF_STATUS_SUCCESS;
  4563. }
  4564. /**
  4565. * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
  4566. * @txrx_soc: dp soc handle
  4567. *
  4568. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  4569. */
  4570. QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
  4571. {
  4572. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4573. uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4574. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  4575. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  4576. return QDF_STATUS_SUCCESS;
  4577. }