dp_tx.c 143 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "htt.h"
  20. #include "dp_htt.h"
  21. #include "hal_hw_headers.h"
  22. #include "dp_tx.h"
  23. #include "dp_tx_desc.h"
  24. #include "dp_peer.h"
  25. #include "dp_types.h"
  26. #include "hal_tx.h"
  27. #include "qdf_mem.h"
  28. #include "qdf_nbuf.h"
  29. #include "qdf_net_types.h"
  30. #include "qdf_module.h"
  31. #include <wlan_cfg.h>
  32. #include "dp_ipa.h"
  33. #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
  34. #include "if_meta_hdr.h"
  35. #endif
  36. #include "enet.h"
  37. #include "dp_internal.h"
  38. #ifdef ATH_SUPPORT_IQUE
  39. #include "dp_txrx_me.h"
  40. #endif
  41. #include "dp_hist.h"
  42. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  43. #include <dp_swlm.h>
  44. #endif
  45. #ifdef WIFI_MONITOR_SUPPORT
  46. #include <dp_mon.h>
  47. #endif
  48. #ifdef FEATURE_WDS
  49. #include "dp_txrx_wds.h"
  50. #endif
  51. /* Flag to skip CCE classify when mesh or tid override enabled */
  52. #define DP_TX_SKIP_CCE_CLASSIFY \
  53. (DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
  54. /* TODO Add support in TSO */
  55. #define DP_DESC_NUM_FRAG(x) 0
  56. /* disable TQM_BYPASS */
  57. #define TQM_BYPASS_WAR 0
  58. /* invalid peer id for reinject*/
  59. #define DP_INVALID_PEER 0XFFFE
  60. #define DP_RETRY_COUNT 7
  61. #ifdef WLAN_MCAST_MLO
  62. #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
  63. HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
  64. #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
  65. HTT_TX_TCL_METADATA_V2_VALID_HTT_SET(_var, _val)
  66. #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
  67. HTT_TX_TCL_METADATA_TYPE_V2_SET(_var, _val)
  68. #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
  69. HTT_TX_TCL_METADATA_V2_HOST_INSPECTED_SET(_var, _val)
  70. #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
  71. HTT_TX_TCL_METADATA_V2_PEER_ID_SET(_var, _val)
  72. #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
  73. HTT_TX_TCL_METADATA_V2_VDEV_ID_SET(_var, _val)
  74. #define DP_TCL_METADATA_TYPE_PEER_BASED \
  75. HTT_TCL_METADATA_V2_TYPE_PEER_BASED
  76. #define DP_TCL_METADATA_TYPE_VDEV_BASED \
  77. HTT_TCL_METADATA_V2_TYPE_VDEV_BASED
  78. #else
  79. #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
  80. HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
  81. #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
  82. HTT_TX_TCL_METADATA_VALID_HTT_SET(_var, _val)
  83. #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
  84. HTT_TX_TCL_METADATA_TYPE_SET(_var, _val)
  85. #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
  86. HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val)
  87. #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
  88. HTT_TX_TCL_METADATA_PEER_ID_SET(_var, _val)
  89. #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
  90. HTT_TX_TCL_METADATA_VDEV_ID_SET(_var, _val)
  91. #define DP_TCL_METADATA_TYPE_PEER_BASED \
  92. HTT_TCL_METADATA_TYPE_PEER_BASED
  93. #define DP_TCL_METADATA_TYPE_VDEV_BASED \
  94. HTT_TCL_METADATA_TYPE_VDEV_BASED
  95. #endif
  96. /*mapping between hal encrypt type and cdp_sec_type*/
  97. uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
  98. HAL_TX_ENCRYPT_TYPE_WEP_128,
  99. HAL_TX_ENCRYPT_TYPE_WEP_104,
  100. HAL_TX_ENCRYPT_TYPE_WEP_40,
  101. HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
  102. HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
  103. HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
  104. HAL_TX_ENCRYPT_TYPE_WAPI,
  105. HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
  106. HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
  107. HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
  108. HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
  109. qdf_export_symbol(sec_type_map);
  110. #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
  111. /**
  112. * dp_update_tx_desc_stats - Update the increase or decrease in
  113. * outstanding tx desc count
  114. * values on pdev and soc
  115. * @vdev: DP pdev handle
  116. *
  117. * Return: void
  118. */
  119. static inline void
  120. dp_update_tx_desc_stats(struct dp_pdev *pdev)
  121. {
  122. int32_t tx_descs_cnt =
  123. qdf_atomic_read(&pdev->num_tx_outstanding);
  124. if (pdev->tx_descs_max < tx_descs_cnt)
  125. pdev->tx_descs_max = tx_descs_cnt;
  126. qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
  127. pdev->tx_descs_max);
  128. }
  129. #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
  130. static inline void
  131. dp_update_tx_desc_stats(struct dp_pdev *pdev)
  132. {
  133. }
  134. #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
  135. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
  136. static inline
  137. void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  138. {
  139. qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
  140. QDF_DMA_TO_DEVICE,
  141. desc->nbuf->len);
  142. desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
  143. }
  144. static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  145. {
  146. if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
  147. qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
  148. QDF_DMA_TO_DEVICE,
  149. desc->nbuf->len);
  150. }
  151. #else
  152. static inline
  153. void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  154. {
  155. }
  156. static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  157. {
  158. qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
  159. QDF_DMA_TO_DEVICE, desc->nbuf->len);
  160. }
  161. #endif
  162. #ifdef QCA_TX_LIMIT_CHECK
  163. /**
  164. * dp_tx_limit_check - Check if allocated tx descriptors reached
  165. * soc max limit and pdev max limit
  166. * @vdev: DP vdev handle
  167. *
  168. * Return: true if allocated tx descriptors reached max configured value, else
  169. * false
  170. */
  171. static inline bool
  172. dp_tx_limit_check(struct dp_vdev *vdev)
  173. {
  174. struct dp_pdev *pdev = vdev->pdev;
  175. struct dp_soc *soc = pdev->soc;
  176. if (qdf_atomic_read(&soc->num_tx_outstanding) >=
  177. soc->num_tx_allowed) {
  178. dp_tx_info("queued packets are more than max tx, drop the frame");
  179. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  180. return true;
  181. }
  182. if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
  183. pdev->num_tx_allowed) {
  184. dp_tx_info("queued packets are more than max tx, drop the frame");
  185. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  186. DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_outstand.num, 1);
  187. return true;
  188. }
  189. return false;
  190. }
  191. /**
  192. * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
  193. * reached soc max limit
  194. * @vdev: DP vdev handle
  195. *
  196. * Return: true if allocated tx descriptors reached max configured value, else
  197. * false
  198. */
  199. static inline bool
  200. dp_tx_exception_limit_check(struct dp_vdev *vdev)
  201. {
  202. struct dp_pdev *pdev = vdev->pdev;
  203. struct dp_soc *soc = pdev->soc;
  204. if (qdf_atomic_read(&soc->num_tx_exception) >=
  205. soc->num_msdu_exception_desc) {
  206. dp_info("exc packets are more than max drop the exc pkt");
  207. DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
  208. return true;
  209. }
  210. return false;
  211. }
  212. /**
  213. * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
  214. * @vdev: DP pdev handle
  215. *
  216. * Return: void
  217. */
  218. static inline void
  219. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  220. {
  221. struct dp_soc *soc = pdev->soc;
  222. qdf_atomic_inc(&pdev->num_tx_outstanding);
  223. qdf_atomic_inc(&soc->num_tx_outstanding);
  224. dp_update_tx_desc_stats(pdev);
  225. }
  226. /**
  227. * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
  228. * @vdev: DP pdev handle
  229. *
  230. * Return: void
  231. */
  232. static inline void
  233. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  234. {
  235. struct dp_soc *soc = pdev->soc;
  236. qdf_atomic_dec(&pdev->num_tx_outstanding);
  237. qdf_atomic_dec(&soc->num_tx_outstanding);
  238. dp_update_tx_desc_stats(pdev);
  239. }
  240. #else //QCA_TX_LIMIT_CHECK
  241. static inline bool
  242. dp_tx_limit_check(struct dp_vdev *vdev)
  243. {
  244. return false;
  245. }
  246. static inline bool
  247. dp_tx_exception_limit_check(struct dp_vdev *vdev)
  248. {
  249. return false;
  250. }
  251. static inline void
  252. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  253. {
  254. qdf_atomic_inc(&pdev->num_tx_outstanding);
  255. dp_update_tx_desc_stats(pdev);
  256. }
  257. static inline void
  258. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  259. {
  260. qdf_atomic_dec(&pdev->num_tx_outstanding);
  261. dp_update_tx_desc_stats(pdev);
  262. }
  263. #endif //QCA_TX_LIMIT_CHECK
  264. #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
  265. static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
  266. {
  267. enum dp_tx_event_type type;
  268. if (flags & DP_TX_DESC_FLAG_FLUSH)
  269. type = DP_TX_DESC_FLUSH;
  270. else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
  271. type = DP_TX_COMP_UNMAP_ERR;
  272. else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
  273. type = DP_TX_COMP_UNMAP;
  274. else
  275. type = DP_TX_DESC_UNMAP;
  276. return type;
  277. }
  278. static inline void
  279. dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
  280. qdf_nbuf_t skb, uint32_t sw_cookie,
  281. enum dp_tx_event_type type)
  282. {
  283. struct dp_tx_desc_event *entry;
  284. uint32_t idx;
  285. if (qdf_unlikely(!soc->tx_tcl_history || !soc->tx_comp_history))
  286. return;
  287. switch (type) {
  288. case DP_TX_COMP_UNMAP:
  289. case DP_TX_COMP_UNMAP_ERR:
  290. case DP_TX_COMP_MSDU_EXT:
  291. idx = dp_history_get_next_index(&soc->tx_comp_history->index,
  292. DP_TX_COMP_HISTORY_SIZE);
  293. entry = &soc->tx_comp_history->entry[idx];
  294. break;
  295. case DP_TX_DESC_MAP:
  296. case DP_TX_DESC_UNMAP:
  297. case DP_TX_DESC_COOKIE:
  298. case DP_TX_DESC_FLUSH:
  299. idx = dp_history_get_next_index(&soc->tx_tcl_history->index,
  300. DP_TX_TCL_HISTORY_SIZE);
  301. entry = &soc->tx_tcl_history->entry[idx];
  302. break;
  303. default:
  304. dp_info_rl("Invalid dp_tx_event_type: %d", type);
  305. return;
  306. }
  307. entry->skb = skb;
  308. entry->paddr = paddr;
  309. entry->sw_cookie = sw_cookie;
  310. entry->type = type;
  311. entry->ts = qdf_get_log_timestamp();
  312. }
  313. static inline void
  314. dp_tx_tso_seg_history_add(struct dp_soc *soc,
  315. struct qdf_tso_seg_elem_t *tso_seg,
  316. qdf_nbuf_t skb, uint32_t sw_cookie,
  317. enum dp_tx_event_type type)
  318. {
  319. int i;
  320. for (i = 1; i < tso_seg->seg.num_frags; i++) {
  321. dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
  322. skb, sw_cookie, type);
  323. }
  324. if (!tso_seg->next)
  325. dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
  326. skb, 0xFFFFFFFF, type);
  327. }
  328. static inline void
  329. dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
  330. qdf_nbuf_t skb, uint32_t sw_cookie,
  331. enum dp_tx_event_type type)
  332. {
  333. struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
  334. uint32_t num_segs = tso_info.num_segs;
  335. while (num_segs) {
  336. dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
  337. curr_seg = curr_seg->next;
  338. num_segs--;
  339. }
  340. }
  341. #else
  342. static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
  343. {
  344. return DP_TX_DESC_INVAL_EVT;
  345. }
  346. static inline void
  347. dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
  348. qdf_nbuf_t skb, uint32_t sw_cookie,
  349. enum dp_tx_event_type type)
  350. {
  351. }
  352. static inline void
  353. dp_tx_tso_seg_history_add(struct dp_soc *soc,
  354. struct qdf_tso_seg_elem_t *tso_seg,
  355. qdf_nbuf_t skb, uint32_t sw_cookie,
  356. enum dp_tx_event_type type)
  357. {
  358. }
  359. static inline void
  360. dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
  361. qdf_nbuf_t skb, uint32_t sw_cookie,
  362. enum dp_tx_event_type type)
  363. {
  364. }
  365. #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
  366. static int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc);
  367. /**
  368. * dp_is_tput_high() - Check if throughput is high
  369. *
  370. * @soc - core txrx main context
  371. *
  372. * The current function is based of the RTPM tput policy variable where RTPM is
  373. * avoided based on throughput.
  374. */
  375. static inline int dp_is_tput_high(struct dp_soc *soc)
  376. {
  377. return dp_get_rtpm_tput_policy_requirement(soc);
  378. }
  379. #if defined(FEATURE_TSO)
  380. /**
  381. * dp_tx_tso_unmap_segment() - Unmap TSO segment
  382. *
  383. * @soc - core txrx main context
  384. * @seg_desc - tso segment descriptor
  385. * @num_seg_desc - tso number segment descriptor
  386. */
  387. static void dp_tx_tso_unmap_segment(
  388. struct dp_soc *soc,
  389. struct qdf_tso_seg_elem_t *seg_desc,
  390. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  391. {
  392. TSO_DEBUG("%s: Unmap the tso segment", __func__);
  393. if (qdf_unlikely(!seg_desc)) {
  394. DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
  395. __func__, __LINE__);
  396. qdf_assert(0);
  397. } else if (qdf_unlikely(!num_seg_desc)) {
  398. DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
  399. __func__, __LINE__);
  400. qdf_assert(0);
  401. } else {
  402. bool is_last_seg;
  403. /* no tso segment left to do dma unmap */
  404. if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
  405. return;
  406. is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
  407. true : false;
  408. qdf_nbuf_unmap_tso_segment(soc->osdev,
  409. seg_desc, is_last_seg);
  410. num_seg_desc->num_seg.tso_cmn_num_seg--;
  411. }
  412. }
  413. /**
  414. * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
  415. * back to the freelist
  416. *
  417. * @soc - soc device handle
  418. * @tx_desc - Tx software descriptor
  419. */
  420. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  421. struct dp_tx_desc_s *tx_desc)
  422. {
  423. TSO_DEBUG("%s: Free the tso descriptor", __func__);
  424. if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) {
  425. dp_tx_err("SO desc is NULL!");
  426. qdf_assert(0);
  427. } else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) {
  428. dp_tx_err("TSO num desc is NULL!");
  429. qdf_assert(0);
  430. } else {
  431. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  432. (struct qdf_tso_num_seg_elem_t *)tx_desc->
  433. msdu_ext_desc->tso_num_desc;
  434. /* Add the tso num segment into the free list */
  435. if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
  436. dp_tso_num_seg_free(soc, tx_desc->pool_id,
  437. tx_desc->msdu_ext_desc->
  438. tso_num_desc);
  439. tx_desc->msdu_ext_desc->tso_num_desc = NULL;
  440. DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
  441. }
  442. /* Add the tso segment into the free list*/
  443. dp_tx_tso_desc_free(soc,
  444. tx_desc->pool_id, tx_desc->msdu_ext_desc->
  445. tso_desc);
  446. tx_desc->msdu_ext_desc->tso_desc = NULL;
  447. }
  448. }
  449. #else
  450. static void dp_tx_tso_unmap_segment(
  451. struct dp_soc *soc,
  452. struct qdf_tso_seg_elem_t *seg_desc,
  453. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  454. {
  455. }
  456. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  457. struct dp_tx_desc_s *tx_desc)
  458. {
  459. }
  460. #endif
  461. /**
  462. * dp_tx_desc_release() - Release Tx Descriptor
  463. * @tx_desc : Tx Descriptor
  464. * @desc_pool_id: Descriptor Pool ID
  465. *
  466. * Deallocate all resources attached to Tx descriptor and free the Tx
  467. * descriptor.
  468. *
  469. * Return:
  470. */
  471. void
  472. dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
  473. {
  474. struct dp_pdev *pdev = tx_desc->pdev;
  475. struct dp_soc *soc;
  476. uint8_t comp_status = 0;
  477. qdf_assert(pdev);
  478. soc = pdev->soc;
  479. dp_tx_outstanding_dec(pdev);
  480. if (tx_desc->msdu_ext_desc) {
  481. if (tx_desc->frm_type == dp_tx_frm_tso)
  482. dp_tx_tso_desc_release(soc, tx_desc);
  483. if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
  484. dp_tx_me_free_buf(tx_desc->pdev,
  485. tx_desc->msdu_ext_desc->me_buffer);
  486. dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
  487. }
  488. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  489. qdf_atomic_dec(&soc->num_tx_exception);
  490. if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
  491. tx_desc->buffer_src)
  492. comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
  493. soc->hal_soc);
  494. else
  495. comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
  496. dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
  497. tx_desc->id, comp_status,
  498. qdf_atomic_read(&pdev->num_tx_outstanding));
  499. dp_tx_desc_free(soc, tx_desc, desc_pool_id);
  500. return;
  501. }
  502. /**
  503. * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
  504. * @vdev: DP vdev Handle
  505. * @nbuf: skb
  506. * @msdu_info: msdu_info required to create HTT metadata
  507. *
  508. * Prepares and fills HTT metadata in the frame pre-header for special frames
  509. * that should be transmitted using varying transmit parameters.
  510. * There are 2 VDEV modes that currently needs this special metadata -
  511. * 1) Mesh Mode
  512. * 2) DSRC Mode
  513. *
  514. * Return: HTT metadata size
  515. *
  516. */
  517. static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  518. struct dp_tx_msdu_info_s *msdu_info)
  519. {
  520. uint32_t *meta_data = msdu_info->meta_data;
  521. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  522. (struct htt_tx_msdu_desc_ext2_t *) meta_data;
  523. uint8_t htt_desc_size;
  524. /* Size rounded of multiple of 8 bytes */
  525. uint8_t htt_desc_size_aligned;
  526. uint8_t *hdr = NULL;
  527. /*
  528. * Metadata - HTT MSDU Extension header
  529. */
  530. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  531. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  532. if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
  533. HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
  534. meta_data[0])) {
  535. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
  536. htt_desc_size_aligned)) {
  537. nbuf = qdf_nbuf_realloc_headroom(nbuf,
  538. htt_desc_size_aligned);
  539. if (!nbuf) {
  540. /*
  541. * qdf_nbuf_realloc_headroom won't do skb_clone
  542. * as skb_realloc_headroom does. so, no free is
  543. * needed here.
  544. */
  545. DP_STATS_INC(vdev,
  546. tx_i.dropped.headroom_insufficient,
  547. 1);
  548. qdf_print(" %s[%d] skb_realloc_headroom failed",
  549. __func__, __LINE__);
  550. return 0;
  551. }
  552. }
  553. /* Fill and add HTT metaheader */
  554. hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
  555. if (!hdr) {
  556. dp_tx_err("Error in filling HTT metadata");
  557. return 0;
  558. }
  559. qdf_mem_copy(hdr, desc_ext, htt_desc_size);
  560. } else if (vdev->opmode == wlan_op_mode_ocb) {
  561. /* Todo - Add support for DSRC */
  562. }
  563. return htt_desc_size_aligned;
  564. }
  565. /**
  566. * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
  567. * @tso_seg: TSO segment to process
  568. * @ext_desc: Pointer to MSDU extension descriptor
  569. *
  570. * Return: void
  571. */
  572. #if defined(FEATURE_TSO)
  573. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  574. void *ext_desc)
  575. {
  576. uint8_t num_frag;
  577. uint32_t tso_flags;
  578. /*
  579. * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
  580. * tcp_flag_mask
  581. *
  582. * Checksum enable flags are set in TCL descriptor and not in Extension
  583. * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
  584. */
  585. tso_flags = *(uint32_t *) &tso_seg->tso_flags;
  586. hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
  587. hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
  588. tso_seg->tso_flags.ip_len);
  589. hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
  590. hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
  591. for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
  592. uint32_t lo = 0;
  593. uint32_t hi = 0;
  594. qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
  595. (tso_seg->tso_frags[num_frag].length));
  596. qdf_dmaaddr_to_32s(
  597. tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
  598. hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
  599. tso_seg->tso_frags[num_frag].length);
  600. }
  601. return;
  602. }
  603. #else
  604. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  605. void *ext_desc)
  606. {
  607. return;
  608. }
  609. #endif
  610. #if defined(FEATURE_TSO)
  611. /**
  612. * dp_tx_free_tso_seg_list() - Loop through the tso segments
  613. * allocated and free them
  614. *
  615. * @soc: soc handle
  616. * @free_seg: list of tso segments
  617. * @msdu_info: msdu descriptor
  618. *
  619. * Return - void
  620. */
  621. static void dp_tx_free_tso_seg_list(
  622. struct dp_soc *soc,
  623. struct qdf_tso_seg_elem_t *free_seg,
  624. struct dp_tx_msdu_info_s *msdu_info)
  625. {
  626. struct qdf_tso_seg_elem_t *next_seg;
  627. while (free_seg) {
  628. next_seg = free_seg->next;
  629. dp_tx_tso_desc_free(soc,
  630. msdu_info->tx_queue.desc_pool_id,
  631. free_seg);
  632. free_seg = next_seg;
  633. }
  634. }
  635. /**
  636. * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
  637. * allocated and free them
  638. *
  639. * @soc: soc handle
  640. * @free_num_seg: list of tso number segments
  641. * @msdu_info: msdu descriptor
  642. * Return - void
  643. */
  644. static void dp_tx_free_tso_num_seg_list(
  645. struct dp_soc *soc,
  646. struct qdf_tso_num_seg_elem_t *free_num_seg,
  647. struct dp_tx_msdu_info_s *msdu_info)
  648. {
  649. struct qdf_tso_num_seg_elem_t *next_num_seg;
  650. while (free_num_seg) {
  651. next_num_seg = free_num_seg->next;
  652. dp_tso_num_seg_free(soc,
  653. msdu_info->tx_queue.desc_pool_id,
  654. free_num_seg);
  655. free_num_seg = next_num_seg;
  656. }
  657. }
  658. /**
  659. * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
  660. * do dma unmap for each segment
  661. *
  662. * @soc: soc handle
  663. * @free_seg: list of tso segments
  664. * @num_seg_desc: tso number segment descriptor
  665. *
  666. * Return - void
  667. */
  668. static void dp_tx_unmap_tso_seg_list(
  669. struct dp_soc *soc,
  670. struct qdf_tso_seg_elem_t *free_seg,
  671. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  672. {
  673. struct qdf_tso_seg_elem_t *next_seg;
  674. if (qdf_unlikely(!num_seg_desc)) {
  675. DP_TRACE(ERROR, "TSO number seg desc is NULL!");
  676. return;
  677. }
  678. while (free_seg) {
  679. next_seg = free_seg->next;
  680. dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
  681. free_seg = next_seg;
  682. }
  683. }
  684. #ifdef FEATURE_TSO_STATS
  685. /**
  686. * dp_tso_get_stats_idx: Retrieve the tso packet id
  687. * @pdev - pdev handle
  688. *
  689. * Return: id
  690. */
  691. static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
  692. {
  693. uint32_t stats_idx;
  694. stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
  695. % CDP_MAX_TSO_PACKETS);
  696. return stats_idx;
  697. }
  698. #else
  699. static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
  700. {
  701. return 0;
  702. }
  703. #endif /* FEATURE_TSO_STATS */
  704. /**
  705. * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
  706. * free the tso segments descriptor and
  707. * tso num segments descriptor
  708. *
  709. * @soc: soc handle
  710. * @msdu_info: msdu descriptor
  711. * @tso_seg_unmap: flag to show if dma unmap is necessary
  712. *
  713. * Return - void
  714. */
  715. static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
  716. struct dp_tx_msdu_info_s *msdu_info,
  717. bool tso_seg_unmap)
  718. {
  719. struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
  720. struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
  721. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  722. tso_info->tso_num_seg_list;
  723. /* do dma unmap for each segment */
  724. if (tso_seg_unmap)
  725. dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
  726. /* free all tso number segment descriptor though looks only have 1 */
  727. dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
  728. /* free all tso segment descriptor */
  729. dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
  730. }
  731. /**
  732. * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
  733. * @vdev: virtual device handle
  734. * @msdu: network buffer
  735. * @msdu_info: meta data associated with the msdu
  736. *
  737. * Return: QDF_STATUS_SUCCESS success
  738. */
  739. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  740. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  741. {
  742. struct qdf_tso_seg_elem_t *tso_seg;
  743. int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
  744. struct dp_soc *soc = vdev->pdev->soc;
  745. struct dp_pdev *pdev = vdev->pdev;
  746. struct qdf_tso_info_t *tso_info;
  747. struct qdf_tso_num_seg_elem_t *tso_num_seg;
  748. tso_info = &msdu_info->u.tso_info;
  749. tso_info->curr_seg = NULL;
  750. tso_info->tso_seg_list = NULL;
  751. tso_info->num_segs = num_seg;
  752. msdu_info->frm_type = dp_tx_frm_tso;
  753. tso_info->tso_num_seg_list = NULL;
  754. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  755. while (num_seg) {
  756. tso_seg = dp_tx_tso_desc_alloc(
  757. soc, msdu_info->tx_queue.desc_pool_id);
  758. if (tso_seg) {
  759. tso_seg->next = tso_info->tso_seg_list;
  760. tso_info->tso_seg_list = tso_seg;
  761. num_seg--;
  762. } else {
  763. dp_err_rl("Failed to alloc tso seg desc");
  764. DP_STATS_INC_PKT(vdev->pdev,
  765. tso_stats.tso_no_mem_dropped, 1,
  766. qdf_nbuf_len(msdu));
  767. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  768. return QDF_STATUS_E_NOMEM;
  769. }
  770. }
  771. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  772. tso_num_seg = dp_tso_num_seg_alloc(soc,
  773. msdu_info->tx_queue.desc_pool_id);
  774. if (tso_num_seg) {
  775. tso_num_seg->next = tso_info->tso_num_seg_list;
  776. tso_info->tso_num_seg_list = tso_num_seg;
  777. } else {
  778. DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
  779. __func__);
  780. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  781. return QDF_STATUS_E_NOMEM;
  782. }
  783. msdu_info->num_seg =
  784. qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
  785. TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
  786. msdu_info->num_seg);
  787. if (!(msdu_info->num_seg)) {
  788. /*
  789. * Free allocated TSO seg desc and number seg desc,
  790. * do unmap for segments if dma map has done.
  791. */
  792. DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
  793. dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
  794. return QDF_STATUS_E_INVAL;
  795. }
  796. dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
  797. msdu, 0, DP_TX_DESC_MAP);
  798. tso_info->curr_seg = tso_info->tso_seg_list;
  799. tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
  800. dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
  801. msdu, msdu_info->num_seg);
  802. dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
  803. tso_info->msdu_stats_idx);
  804. dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
  805. return QDF_STATUS_SUCCESS;
  806. }
  807. #else
  808. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  809. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  810. {
  811. return QDF_STATUS_E_NOMEM;
  812. }
  813. #endif
  814. QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
  815. (DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
  816. sizeof(struct htt_tx_msdu_desc_ext2_t)));
  817. /**
  818. * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
  819. * @vdev: DP Vdev handle
  820. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  821. * @desc_pool_id: Descriptor Pool ID
  822. *
  823. * Return:
  824. */
  825. static
  826. struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
  827. struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
  828. {
  829. uint8_t i;
  830. uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
  831. struct dp_tx_seg_info_s *seg_info;
  832. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  833. struct dp_soc *soc = vdev->pdev->soc;
  834. /* Allocate an extension descriptor */
  835. msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
  836. qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
  837. if (!msdu_ext_desc) {
  838. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  839. return NULL;
  840. }
  841. if (msdu_info->exception_fw &&
  842. qdf_unlikely(vdev->mesh_vdev)) {
  843. qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
  844. &msdu_info->meta_data[0],
  845. sizeof(struct htt_tx_msdu_desc_ext2_t));
  846. qdf_atomic_inc(&soc->num_tx_exception);
  847. msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
  848. }
  849. switch (msdu_info->frm_type) {
  850. case dp_tx_frm_sg:
  851. case dp_tx_frm_me:
  852. case dp_tx_frm_raw:
  853. seg_info = msdu_info->u.sg_info.curr_seg;
  854. /* Update the buffer pointers in MSDU Extension Descriptor */
  855. for (i = 0; i < seg_info->frag_cnt; i++) {
  856. hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
  857. seg_info->frags[i].paddr_lo,
  858. seg_info->frags[i].paddr_hi,
  859. seg_info->frags[i].len);
  860. }
  861. break;
  862. case dp_tx_frm_tso:
  863. dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
  864. &cached_ext_desc[0]);
  865. break;
  866. default:
  867. break;
  868. }
  869. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  870. cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
  871. hal_tx_ext_desc_sync(&cached_ext_desc[0],
  872. msdu_ext_desc->vaddr);
  873. return msdu_ext_desc;
  874. }
  875. /**
  876. * dp_tx_trace_pkt() - Trace TX packet at DP layer
  877. *
  878. * @skb: skb to be traced
  879. * @msdu_id: msdu_id of the packet
  880. * @vdev_id: vdev_id of the packet
  881. *
  882. * Return: None
  883. */
  884. #ifdef DP_DISABLE_TX_PKT_TRACE
  885. static void dp_tx_trace_pkt(struct dp_soc *soc,
  886. qdf_nbuf_t skb, uint16_t msdu_id,
  887. uint8_t vdev_id)
  888. {
  889. }
  890. #else
  891. static void dp_tx_trace_pkt(struct dp_soc *soc,
  892. qdf_nbuf_t skb, uint16_t msdu_id,
  893. uint8_t vdev_id)
  894. {
  895. if (dp_is_tput_high(soc))
  896. return;
  897. QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
  898. QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
  899. DPTRACE(qdf_dp_trace_ptr(skb,
  900. QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
  901. QDF_TRACE_DEFAULT_PDEV_ID,
  902. qdf_nbuf_data_addr(skb),
  903. sizeof(qdf_nbuf_data(skb)),
  904. msdu_id, vdev_id, 0));
  905. qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
  906. DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
  907. QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
  908. msdu_id, QDF_TX));
  909. }
  910. #endif
  911. #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
  912. /**
  913. * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
  914. * exception by the upper layer (OS_IF)
  915. * @soc: DP soc handle
  916. * @nbuf: packet to be transmitted
  917. *
  918. * Returns: 1 if the packet is marked as exception,
  919. * 0, if the packet is not marked as exception.
  920. */
  921. static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
  922. qdf_nbuf_t nbuf)
  923. {
  924. return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
  925. }
  926. #else
  927. static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
  928. qdf_nbuf_t nbuf)
  929. {
  930. return 0;
  931. }
  932. #endif
  933. /**
  934. * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
  935. * @vdev: DP vdev handle
  936. * @nbuf: skb
  937. * @desc_pool_id: Descriptor pool ID
  938. * @meta_data: Metadata to the fw
  939. * @tx_exc_metadata: Handle that holds exception path metadata
  940. * Allocate and prepare Tx descriptor with msdu information.
  941. *
  942. * Return: Pointer to Tx Descriptor on success,
  943. * NULL on failure
  944. */
  945. static
  946. struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
  947. qdf_nbuf_t nbuf, uint8_t desc_pool_id,
  948. struct dp_tx_msdu_info_s *msdu_info,
  949. struct cdp_tx_exception_metadata *tx_exc_metadata)
  950. {
  951. uint8_t align_pad;
  952. uint8_t is_exception = 0;
  953. uint8_t htt_hdr_size;
  954. struct dp_tx_desc_s *tx_desc;
  955. struct dp_pdev *pdev = vdev->pdev;
  956. struct dp_soc *soc = pdev->soc;
  957. if (dp_tx_limit_check(vdev))
  958. return NULL;
  959. /* Allocate software Tx descriptor */
  960. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  961. if (qdf_unlikely(!tx_desc)) {
  962. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  963. DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
  964. return NULL;
  965. }
  966. dp_tx_outstanding_inc(pdev);
  967. /* Initialize the SW tx descriptor */
  968. tx_desc->nbuf = nbuf;
  969. tx_desc->frm_type = dp_tx_frm_std;
  970. tx_desc->tx_encap_type = ((tx_exc_metadata &&
  971. (tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
  972. tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
  973. tx_desc->vdev_id = vdev->vdev_id;
  974. tx_desc->pdev = pdev;
  975. tx_desc->msdu_ext_desc = NULL;
  976. tx_desc->pkt_offset = 0;
  977. tx_desc->length = qdf_nbuf_headlen(nbuf);
  978. dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
  979. if (qdf_unlikely(vdev->multipass_en)) {
  980. if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
  981. goto failure;
  982. }
  983. /* Packets marked by upper layer (OS-IF) to be sent to FW */
  984. if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
  985. is_exception = 1;
  986. /*
  987. * For special modes (vdev_type == ocb or mesh), data frames should be
  988. * transmitted using varying transmit parameters (tx spec) which include
  989. * transmit rate, power, priority, channel, channel bandwidth , nss etc.
  990. * These are filled in HTT MSDU descriptor and sent in frame pre-header.
  991. * These frames are sent as exception packets to firmware.
  992. *
  993. * HW requirement is that metadata should always point to a
  994. * 8-byte aligned address. So we add alignment pad to start of buffer.
  995. * HTT Metadata should be ensured to be multiple of 8-bytes,
  996. * to get 8-byte aligned start address along with align_pad added
  997. *
  998. * |-----------------------------|
  999. * | |
  1000. * |-----------------------------| <-----Buffer Pointer Address given
  1001. * | | ^ in HW descriptor (aligned)
  1002. * | HTT Metadata | |
  1003. * | | |
  1004. * | | | Packet Offset given in descriptor
  1005. * | | |
  1006. * |-----------------------------| |
  1007. * | Alignment Pad | v
  1008. * |-----------------------------| <----- Actual buffer start address
  1009. * | SKB Data | (Unaligned)
  1010. * | |
  1011. * | |
  1012. * | |
  1013. * | |
  1014. * | |
  1015. * |-----------------------------|
  1016. */
  1017. if (qdf_unlikely((msdu_info->exception_fw)) ||
  1018. (vdev->opmode == wlan_op_mode_ocb) ||
  1019. (tx_exc_metadata &&
  1020. tx_exc_metadata->is_tx_sniffer)) {
  1021. align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
  1022. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
  1023. DP_STATS_INC(vdev,
  1024. tx_i.dropped.headroom_insufficient, 1);
  1025. goto failure;
  1026. }
  1027. if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
  1028. dp_tx_err("qdf_nbuf_push_head failed");
  1029. goto failure;
  1030. }
  1031. htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
  1032. msdu_info);
  1033. if (htt_hdr_size == 0)
  1034. goto failure;
  1035. tx_desc->length = qdf_nbuf_headlen(nbuf);
  1036. tx_desc->pkt_offset = align_pad + htt_hdr_size;
  1037. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1038. is_exception = 1;
  1039. tx_desc->length -= tx_desc->pkt_offset;
  1040. }
  1041. #if !TQM_BYPASS_WAR
  1042. if (is_exception || tx_exc_metadata)
  1043. #endif
  1044. {
  1045. /* Temporary WAR due to TQM VP issues */
  1046. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1047. qdf_atomic_inc(&soc->num_tx_exception);
  1048. }
  1049. return tx_desc;
  1050. failure:
  1051. dp_tx_desc_release(tx_desc, desc_pool_id);
  1052. return NULL;
  1053. }
  1054. /**
  1055. * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
  1056. * @vdev: DP vdev handle
  1057. * @nbuf: skb
  1058. * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
  1059. * @desc_pool_id : Descriptor Pool ID
  1060. *
  1061. * Allocate and prepare Tx descriptor with msdu and fragment descritor
  1062. * information. For frames wth fragments, allocate and prepare
  1063. * an MSDU extension descriptor
  1064. *
  1065. * Return: Pointer to Tx Descriptor on success,
  1066. * NULL on failure
  1067. */
  1068. static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
  1069. qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
  1070. uint8_t desc_pool_id)
  1071. {
  1072. struct dp_tx_desc_s *tx_desc;
  1073. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  1074. struct dp_pdev *pdev = vdev->pdev;
  1075. struct dp_soc *soc = pdev->soc;
  1076. if (dp_tx_limit_check(vdev))
  1077. return NULL;
  1078. /* Allocate software Tx descriptor */
  1079. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  1080. if (!tx_desc) {
  1081. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1082. return NULL;
  1083. }
  1084. dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
  1085. nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
  1086. dp_tx_outstanding_inc(pdev);
  1087. /* Initialize the SW tx descriptor */
  1088. tx_desc->nbuf = nbuf;
  1089. tx_desc->frm_type = msdu_info->frm_type;
  1090. tx_desc->tx_encap_type = vdev->tx_encap_type;
  1091. tx_desc->vdev_id = vdev->vdev_id;
  1092. tx_desc->pdev = pdev;
  1093. tx_desc->pkt_offset = 0;
  1094. dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
  1095. /* Handle scattered frames - TSO/SG/ME */
  1096. /* Allocate and prepare an extension descriptor for scattered frames */
  1097. msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
  1098. if (!msdu_ext_desc) {
  1099. dp_tx_info("Tx Extension Descriptor Alloc Fail");
  1100. goto failure;
  1101. }
  1102. #if TQM_BYPASS_WAR
  1103. /* Temporary WAR due to TQM VP issues */
  1104. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1105. qdf_atomic_inc(&soc->num_tx_exception);
  1106. #endif
  1107. if (qdf_unlikely(msdu_info->exception_fw))
  1108. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1109. tx_desc->msdu_ext_desc = msdu_ext_desc;
  1110. tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
  1111. msdu_ext_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
  1112. msdu_ext_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
  1113. tx_desc->dma_addr = msdu_ext_desc->paddr;
  1114. if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
  1115. tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
  1116. else
  1117. tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
  1118. return tx_desc;
  1119. failure:
  1120. dp_tx_desc_release(tx_desc, desc_pool_id);
  1121. return NULL;
  1122. }
  1123. /**
  1124. * dp_tx_prepare_raw() - Prepare RAW packet TX
  1125. * @vdev: DP vdev handle
  1126. * @nbuf: buffer pointer
  1127. * @seg_info: Pointer to Segment info Descriptor to be prepared
  1128. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
  1129. * descriptor
  1130. *
  1131. * Return:
  1132. */
  1133. static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1134. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  1135. {
  1136. qdf_nbuf_t curr_nbuf = NULL;
  1137. uint16_t total_len = 0;
  1138. qdf_dma_addr_t paddr;
  1139. int32_t i;
  1140. int32_t mapped_buf_num = 0;
  1141. struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
  1142. qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  1143. DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
  1144. /* Continue only if frames are of DATA type */
  1145. if (!DP_FRAME_IS_DATA(qos_wh)) {
  1146. DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
  1147. dp_tx_debug("Pkt. recd is of not data type");
  1148. goto error;
  1149. }
  1150. /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
  1151. if (vdev->raw_mode_war &&
  1152. (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
  1153. (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
  1154. qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
  1155. for (curr_nbuf = nbuf, i = 0; curr_nbuf;
  1156. curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
  1157. /*
  1158. * Number of nbuf's must not exceed the size of the frags
  1159. * array in seg_info.
  1160. */
  1161. if (i >= DP_TX_MAX_NUM_FRAGS) {
  1162. dp_err_rl("nbuf cnt exceeds the max number of segs");
  1163. DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
  1164. goto error;
  1165. }
  1166. if (QDF_STATUS_SUCCESS !=
  1167. qdf_nbuf_map_nbytes_single(vdev->osdev,
  1168. curr_nbuf,
  1169. QDF_DMA_TO_DEVICE,
  1170. curr_nbuf->len)) {
  1171. dp_tx_err("%s dma map error ", __func__);
  1172. DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
  1173. goto error;
  1174. }
  1175. /* Update the count of mapped nbuf's */
  1176. mapped_buf_num++;
  1177. paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
  1178. seg_info->frags[i].paddr_lo = paddr;
  1179. seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
  1180. seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
  1181. seg_info->frags[i].vaddr = (void *) curr_nbuf;
  1182. total_len += qdf_nbuf_len(curr_nbuf);
  1183. }
  1184. seg_info->frag_cnt = i;
  1185. seg_info->total_len = total_len;
  1186. seg_info->next = NULL;
  1187. sg_info->curr_seg = seg_info;
  1188. msdu_info->frm_type = dp_tx_frm_raw;
  1189. msdu_info->num_seg = 1;
  1190. return nbuf;
  1191. error:
  1192. i = 0;
  1193. while (nbuf) {
  1194. curr_nbuf = nbuf;
  1195. if (i < mapped_buf_num) {
  1196. qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
  1197. QDF_DMA_TO_DEVICE,
  1198. curr_nbuf->len);
  1199. i++;
  1200. }
  1201. nbuf = qdf_nbuf_next(nbuf);
  1202. qdf_nbuf_free(curr_nbuf);
  1203. }
  1204. return NULL;
  1205. }
  1206. /**
  1207. * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
  1208. * @soc: DP soc handle
  1209. * @nbuf: Buffer pointer
  1210. *
  1211. * unmap the chain of nbufs that belong to this RAW frame.
  1212. *
  1213. * Return: None
  1214. */
  1215. static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
  1216. qdf_nbuf_t nbuf)
  1217. {
  1218. qdf_nbuf_t cur_nbuf = nbuf;
  1219. do {
  1220. qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
  1221. QDF_DMA_TO_DEVICE,
  1222. cur_nbuf->len);
  1223. cur_nbuf = qdf_nbuf_next(cur_nbuf);
  1224. } while (cur_nbuf);
  1225. }
  1226. #ifdef VDEV_PEER_PROTOCOL_COUNT
  1227. void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
  1228. qdf_nbuf_t nbuf)
  1229. {
  1230. qdf_nbuf_t nbuf_local;
  1231. struct dp_vdev *vdev_local = vdev_hdl;
  1232. do {
  1233. if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
  1234. break;
  1235. nbuf_local = nbuf;
  1236. if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
  1237. htt_cmn_pkt_type_raw))
  1238. break;
  1239. else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
  1240. break;
  1241. else if (qdf_nbuf_is_tso((nbuf_local)))
  1242. break;
  1243. dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
  1244. (nbuf_local),
  1245. NULL, 1, 0);
  1246. } while (0);
  1247. }
  1248. #endif
  1249. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  1250. /**
  1251. * dp_tx_update_stats() - Update soc level tx stats
  1252. * @soc: DP soc handle
  1253. * @nbuf: packet being transmitted
  1254. *
  1255. * Returns: none
  1256. */
  1257. void dp_tx_update_stats(struct dp_soc *soc,
  1258. qdf_nbuf_t nbuf)
  1259. {
  1260. DP_STATS_INC_PKT(soc, tx.egress, 1, qdf_nbuf_len(nbuf));
  1261. }
  1262. int
  1263. dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
  1264. struct dp_tx_desc_s *tx_desc,
  1265. uint8_t tid)
  1266. {
  1267. struct dp_swlm *swlm = &soc->swlm;
  1268. union swlm_data swlm_query_data;
  1269. struct dp_swlm_tcl_data tcl_data;
  1270. QDF_STATUS status;
  1271. int ret;
  1272. if (qdf_unlikely(!swlm->is_enabled))
  1273. return 0;
  1274. tcl_data.nbuf = tx_desc->nbuf;
  1275. tcl_data.tid = tid;
  1276. tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
  1277. swlm_query_data.tcl_data = &tcl_data;
  1278. status = dp_swlm_tcl_pre_check(soc, &tcl_data);
  1279. if (QDF_IS_STATUS_ERROR(status)) {
  1280. dp_swlm_tcl_reset_session_data(soc);
  1281. DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
  1282. return 0;
  1283. }
  1284. ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
  1285. if (ret) {
  1286. DP_STATS_INC(swlm, tcl.coalesce_success, 1);
  1287. } else {
  1288. DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
  1289. }
  1290. return ret;
  1291. }
  1292. void
  1293. dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
  1294. int coalesce)
  1295. {
  1296. if (coalesce)
  1297. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1298. else
  1299. dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
  1300. }
  1301. #endif
  1302. #ifdef FEATURE_RUNTIME_PM
  1303. static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
  1304. {
  1305. return qdf_atomic_read(&soc->rtpm_high_tput_flag);
  1306. }
  1307. /**
  1308. * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
  1309. * @soc: Datapath soc handle
  1310. * @hal_ring_hdl: HAL ring handle
  1311. * @coalesce: Coalesce the current write or not
  1312. *
  1313. * Wrapper for HAL ring access end for data transmission for
  1314. * FEATURE_RUNTIME_PM
  1315. *
  1316. * Returns: none
  1317. */
  1318. void
  1319. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  1320. hal_ring_handle_t hal_ring_hdl,
  1321. int coalesce)
  1322. {
  1323. int ret;
  1324. /*
  1325. * Avoid runtime get and put APIs under high throughput scenarios.
  1326. */
  1327. if (dp_get_rtpm_tput_policy_requirement(soc)) {
  1328. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1329. return;
  1330. }
  1331. ret = hif_pm_runtime_get(soc->hif_handle,
  1332. RTPM_ID_DW_TX_HW_ENQUEUE, true);
  1333. switch (ret) {
  1334. case 0:
  1335. if (hif_system_pm_state_check(soc->hif_handle)) {
  1336. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1337. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1338. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1339. } else {
  1340. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1341. }
  1342. hif_pm_runtime_put(soc->hif_handle,
  1343. RTPM_ID_DW_TX_HW_ENQUEUE);
  1344. break;
  1345. /*
  1346. * If hif_pm_runtime_get returns -EBUSY or -EINPROGRESS,
  1347. * take the dp runtime refcount using dp_runtime_get,
  1348. * check link state,if up, write TX ring HP, else just set flush event.
  1349. * In dp_runtime_resume, wait until dp runtime refcount becomes
  1350. * zero or time out, then flush pending tx.
  1351. */
  1352. case -EBUSY:
  1353. case -EINPROGRESS:
  1354. dp_runtime_get(soc);
  1355. if (hif_pm_get_link_state(soc->hif_handle) ==
  1356. HIF_PM_LINK_STATE_UP) {
  1357. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1358. } else {
  1359. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1360. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1361. qdf_atomic_inc(&soc->tx_pending_rtpm);
  1362. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1363. }
  1364. dp_runtime_put(soc);
  1365. break;
  1366. default:
  1367. dp_runtime_get(soc);
  1368. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1369. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1370. qdf_atomic_inc(&soc->tx_pending_rtpm);
  1371. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1372. dp_runtime_put(soc);
  1373. }
  1374. }
  1375. #else
  1376. #ifdef DP_POWER_SAVE
  1377. void
  1378. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  1379. hal_ring_handle_t hal_ring_hdl,
  1380. int coalesce)
  1381. {
  1382. if (hif_system_pm_state_check(soc->hif_handle)) {
  1383. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1384. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1385. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1386. } else {
  1387. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1388. }
  1389. }
  1390. #endif
  1391. static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
  1392. {
  1393. return 0;
  1394. }
  1395. #endif
  1396. /**
  1397. * dp_tx_get_tid() - Obtain TID to be used for this frame
  1398. * @vdev: DP vdev handle
  1399. * @nbuf: skb
  1400. *
  1401. * Extract the DSCP or PCP information from frame and map into TID value.
  1402. *
  1403. * Return: void
  1404. */
  1405. static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1406. struct dp_tx_msdu_info_s *msdu_info)
  1407. {
  1408. uint8_t tos = 0, dscp_tid_override = 0;
  1409. uint8_t *hdr_ptr, *L3datap;
  1410. uint8_t is_mcast = 0;
  1411. qdf_ether_header_t *eh = NULL;
  1412. qdf_ethervlan_header_t *evh = NULL;
  1413. uint16_t ether_type;
  1414. qdf_llc_t *llcHdr;
  1415. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  1416. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1417. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1418. eh = (qdf_ether_header_t *)nbuf->data;
  1419. hdr_ptr = (uint8_t *)(eh->ether_dhost);
  1420. L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
  1421. } else {
  1422. qdf_dot3_qosframe_t *qos_wh =
  1423. (qdf_dot3_qosframe_t *) nbuf->data;
  1424. msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
  1425. qos_wh->i_qos[0] & DP_QOS_TID : 0;
  1426. return;
  1427. }
  1428. is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
  1429. ether_type = eh->ether_type;
  1430. llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
  1431. /*
  1432. * Check if packet is dot3 or eth2 type.
  1433. */
  1434. if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
  1435. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1436. sizeof(*llcHdr));
  1437. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1438. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
  1439. sizeof(*llcHdr);
  1440. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
  1441. + sizeof(*llcHdr) +
  1442. sizeof(qdf_net_vlanhdr_t));
  1443. } else {
  1444. L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
  1445. sizeof(*llcHdr);
  1446. }
  1447. } else {
  1448. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1449. evh = (qdf_ethervlan_header_t *) eh;
  1450. ether_type = evh->ether_type;
  1451. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
  1452. }
  1453. }
  1454. /*
  1455. * Find priority from IP TOS DSCP field
  1456. */
  1457. if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
  1458. qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
  1459. if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
  1460. /* Only for unicast frames */
  1461. if (!is_mcast) {
  1462. /* send it on VO queue */
  1463. msdu_info->tid = DP_VO_TID;
  1464. }
  1465. } else {
  1466. /*
  1467. * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
  1468. * from TOS byte.
  1469. */
  1470. tos = ip->ip_tos;
  1471. dscp_tid_override = 1;
  1472. }
  1473. } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
  1474. /* TODO
  1475. * use flowlabel
  1476. *igmpmld cases to be handled in phase 2
  1477. */
  1478. unsigned long ver_pri_flowlabel;
  1479. unsigned long pri;
  1480. ver_pri_flowlabel = *(unsigned long *) L3datap;
  1481. pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
  1482. DP_IPV6_PRIORITY_SHIFT;
  1483. tos = pri;
  1484. dscp_tid_override = 1;
  1485. } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
  1486. msdu_info->tid = DP_VO_TID;
  1487. else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
  1488. /* Only for unicast frames */
  1489. if (!is_mcast) {
  1490. /* send ucast arp on VO queue */
  1491. msdu_info->tid = DP_VO_TID;
  1492. }
  1493. }
  1494. /*
  1495. * Assign all MCAST packets to BE
  1496. */
  1497. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1498. if (is_mcast) {
  1499. tos = 0;
  1500. dscp_tid_override = 1;
  1501. }
  1502. }
  1503. if (dscp_tid_override == 1) {
  1504. tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  1505. msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
  1506. }
  1507. if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
  1508. msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
  1509. return;
  1510. }
  1511. /**
  1512. * dp_tx_classify_tid() - Obtain TID to be used for this frame
  1513. * @vdev: DP vdev handle
  1514. * @nbuf: skb
  1515. *
  1516. * Software based TID classification is required when more than 2 DSCP-TID
  1517. * mapping tables are needed.
  1518. * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
  1519. *
  1520. * Return: void
  1521. */
  1522. static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1523. struct dp_tx_msdu_info_s *msdu_info)
  1524. {
  1525. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1526. /*
  1527. * skip_sw_tid_classification flag will set in below cases-
  1528. * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
  1529. * 2. hlos_tid_override enabled for vdev
  1530. * 3. mesh mode enabled for vdev
  1531. */
  1532. if (qdf_likely(vdev->skip_sw_tid_classification)) {
  1533. /* Update tid in msdu_info from skb priority */
  1534. if (qdf_unlikely(vdev->skip_sw_tid_classification
  1535. & DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
  1536. uint32_t tid = qdf_nbuf_get_priority(nbuf);
  1537. if (tid == DP_TX_INVALID_QOS_TAG)
  1538. return;
  1539. msdu_info->tid = tid;
  1540. return;
  1541. }
  1542. return;
  1543. }
  1544. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1545. }
  1546. #ifdef FEATURE_WLAN_TDLS
  1547. /**
  1548. * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
  1549. * @soc: datapath SOC
  1550. * @vdev: datapath vdev
  1551. * @tx_desc: TX descriptor
  1552. *
  1553. * Return: None
  1554. */
  1555. static void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1556. struct dp_vdev *vdev,
  1557. struct dp_tx_desc_s *tx_desc)
  1558. {
  1559. if (vdev) {
  1560. if (vdev->is_tdls_frame) {
  1561. tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
  1562. vdev->is_tdls_frame = false;
  1563. }
  1564. }
  1565. }
  1566. /**
  1567. * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
  1568. * @soc: dp_soc handle
  1569. * @tx_desc: TX descriptor
  1570. * @vdev: datapath vdev handle
  1571. *
  1572. * Return: None
  1573. */
  1574. static void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
  1575. struct dp_tx_desc_s *tx_desc)
  1576. {
  1577. struct hal_tx_completion_status ts = {0};
  1578. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1579. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
  1580. DP_MOD_ID_TDLS);
  1581. if (qdf_unlikely(!vdev)) {
  1582. dp_err_rl("vdev is null!");
  1583. goto error;
  1584. }
  1585. hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
  1586. if (vdev->tx_non_std_data_callback.func) {
  1587. qdf_nbuf_set_next(nbuf, NULL);
  1588. vdev->tx_non_std_data_callback.func(
  1589. vdev->tx_non_std_data_callback.ctxt,
  1590. nbuf, ts.status);
  1591. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1592. return;
  1593. } else {
  1594. dp_err_rl("callback func is null");
  1595. }
  1596. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1597. error:
  1598. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  1599. qdf_nbuf_free(nbuf);
  1600. }
  1601. /**
  1602. * dp_tx_msdu_single_map() - do nbuf map
  1603. * @vdev: DP vdev handle
  1604. * @tx_desc: DP TX descriptor pointer
  1605. * @nbuf: skb pointer
  1606. *
  1607. * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
  1608. * operation done in other component.
  1609. *
  1610. * Return: QDF_STATUS
  1611. */
  1612. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1613. struct dp_tx_desc_s *tx_desc,
  1614. qdf_nbuf_t nbuf)
  1615. {
  1616. if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
  1617. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1618. nbuf,
  1619. QDF_DMA_TO_DEVICE,
  1620. nbuf->len);
  1621. else
  1622. return qdf_nbuf_map_single(vdev->osdev, nbuf,
  1623. QDF_DMA_TO_DEVICE);
  1624. }
  1625. #else
  1626. static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1627. struct dp_vdev *vdev,
  1628. struct dp_tx_desc_s *tx_desc)
  1629. {
  1630. }
  1631. static inline void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
  1632. struct dp_tx_desc_s *tx_desc)
  1633. {
  1634. }
  1635. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1636. struct dp_tx_desc_s *tx_desc,
  1637. qdf_nbuf_t nbuf)
  1638. {
  1639. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1640. nbuf,
  1641. QDF_DMA_TO_DEVICE,
  1642. nbuf->len);
  1643. }
  1644. #endif
  1645. #ifdef MESH_MODE_SUPPORT
  1646. /**
  1647. * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
  1648. * @soc: datapath SOC
  1649. * @vdev: datapath vdev
  1650. * @tx_desc: TX descriptor
  1651. *
  1652. * Return: None
  1653. */
  1654. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1655. struct dp_vdev *vdev,
  1656. struct dp_tx_desc_s *tx_desc)
  1657. {
  1658. if (qdf_unlikely(vdev->mesh_vdev))
  1659. tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
  1660. }
  1661. /**
  1662. * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
  1663. * @soc: dp_soc handle
  1664. * @tx_desc: TX descriptor
  1665. * @vdev: datapath vdev handle
  1666. *
  1667. * Return: None
  1668. */
  1669. static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1670. struct dp_tx_desc_s *tx_desc)
  1671. {
  1672. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1673. struct dp_vdev *vdev = NULL;
  1674. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
  1675. qdf_nbuf_free(nbuf);
  1676. DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
  1677. } else {
  1678. vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
  1679. DP_MOD_ID_MESH);
  1680. if (vdev && vdev->osif_tx_free_ext)
  1681. vdev->osif_tx_free_ext((nbuf));
  1682. else
  1683. qdf_nbuf_free(nbuf);
  1684. if (vdev)
  1685. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  1686. }
  1687. }
  1688. #else
  1689. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1690. struct dp_vdev *vdev,
  1691. struct dp_tx_desc_s *tx_desc)
  1692. {
  1693. }
  1694. static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1695. struct dp_tx_desc_s *tx_desc)
  1696. {
  1697. }
  1698. #endif
  1699. /**
  1700. * dp_tx_frame_is_drop() - checks if the packet is loopback
  1701. * @vdev: DP vdev handle
  1702. * @nbuf: skb
  1703. *
  1704. * Return: 1 if frame needs to be dropped else 0
  1705. */
  1706. int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
  1707. {
  1708. struct dp_pdev *pdev = NULL;
  1709. struct dp_ast_entry *src_ast_entry = NULL;
  1710. struct dp_ast_entry *dst_ast_entry = NULL;
  1711. struct dp_soc *soc = NULL;
  1712. qdf_assert(vdev);
  1713. pdev = vdev->pdev;
  1714. qdf_assert(pdev);
  1715. soc = pdev->soc;
  1716. dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1717. (soc, dstmac, vdev->pdev->pdev_id);
  1718. src_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1719. (soc, srcmac, vdev->pdev->pdev_id);
  1720. if (dst_ast_entry && src_ast_entry) {
  1721. if (dst_ast_entry->peer_id ==
  1722. src_ast_entry->peer_id)
  1723. return 1;
  1724. }
  1725. return 0;
  1726. }
  1727. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  1728. defined(WLAN_MCAST_MLO)
  1729. /* MLO peer id for reinject*/
  1730. #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
  1731. /* MLO vdev id inc offset */
  1732. #define DP_MLO_VDEV_ID_OFFSET 0x80
  1733. static inline void
  1734. dp_tx_update_mcast_param(uint16_t peer_id,
  1735. uint16_t *htt_tcl_metadata,
  1736. struct dp_vdev *vdev,
  1737. struct dp_tx_msdu_info_s *msdu_info)
  1738. {
  1739. if (peer_id == DP_MLO_MCAST_REINJECT_PEER_ID) {
  1740. *htt_tcl_metadata = 0;
  1741. DP_TX_TCL_METADATA_TYPE_SET(
  1742. *htt_tcl_metadata,
  1743. HTT_TCL_METADATA_V2_TYPE_GLOBAL_SEQ_BASED);
  1744. HTT_TX_TCL_METADATA_GLBL_SEQ_NO_SET(*htt_tcl_metadata,
  1745. msdu_info->gsn);
  1746. msdu_info->vdev_id = vdev->vdev_id + DP_MLO_VDEV_ID_OFFSET;
  1747. } else {
  1748. msdu_info->vdev_id = vdev->vdev_id;
  1749. }
  1750. }
  1751. #else
  1752. static inline void
  1753. dp_tx_update_mcast_param(uint16_t peer_id,
  1754. uint16_t *htt_tcl_metadata,
  1755. struct dp_vdev *vdev,
  1756. struct dp_tx_msdu_info_s *msdu_info)
  1757. {
  1758. }
  1759. #endif
  1760. /**
  1761. * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
  1762. * @vdev: DP vdev handle
  1763. * @nbuf: skb
  1764. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  1765. * @meta_data: Metadata to the fw
  1766. * @tx_q: Tx queue to be used for this Tx frame
  1767. * @peer_id: peer_id of the peer in case of NAWDS frames
  1768. * @tx_exc_metadata: Handle that holds exception path metadata
  1769. *
  1770. * Return: NULL on success,
  1771. * nbuf when it fails to send
  1772. */
  1773. qdf_nbuf_t
  1774. dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1775. struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
  1776. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1777. {
  1778. struct dp_pdev *pdev = vdev->pdev;
  1779. struct dp_soc *soc = pdev->soc;
  1780. struct dp_tx_desc_s *tx_desc;
  1781. QDF_STATUS status;
  1782. struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
  1783. uint16_t htt_tcl_metadata = 0;
  1784. enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
  1785. uint8_t tid = msdu_info->tid;
  1786. struct cdp_tid_tx_stats *tid_stats = NULL;
  1787. /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
  1788. tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
  1789. msdu_info, tx_exc_metadata);
  1790. if (!tx_desc) {
  1791. dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
  1792. vdev, tx_q->desc_pool_id);
  1793. drop_code = TX_DESC_ERR;
  1794. goto fail_return;
  1795. }
  1796. dp_tx_update_tdls_flags(soc, vdev, tx_desc);
  1797. if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
  1798. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1799. DP_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
  1800. } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
  1801. DP_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
  1802. DP_TCL_METADATA_TYPE_PEER_BASED);
  1803. DP_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
  1804. peer_id);
  1805. } else
  1806. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1807. if (msdu_info->exception_fw)
  1808. DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  1809. dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
  1810. !pdev->enhanced_stats_en);
  1811. dp_tx_update_mesh_flags(soc, vdev, tx_desc);
  1812. if (qdf_unlikely(QDF_STATUS_SUCCESS !=
  1813. dp_tx_msdu_single_map(vdev, tx_desc, nbuf))) {
  1814. /* Handle failure */
  1815. dp_err("qdf_nbuf_map failed");
  1816. DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
  1817. drop_code = TX_DMA_MAP_ERR;
  1818. goto release_desc;
  1819. }
  1820. tx_desc->dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
  1821. dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
  1822. tx_desc->id, DP_TX_DESC_MAP);
  1823. dp_tx_update_mcast_param(peer_id, &htt_tcl_metadata, vdev, msdu_info);
  1824. /* Enqueue the Tx MSDU descriptor to HW for transmit */
  1825. status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
  1826. htt_tcl_metadata,
  1827. tx_exc_metadata, msdu_info);
  1828. if (status != QDF_STATUS_SUCCESS) {
  1829. dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
  1830. tx_desc, tx_q->ring_id);
  1831. dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
  1832. tx_desc->id, DP_TX_DESC_UNMAP);
  1833. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  1834. QDF_DMA_TO_DEVICE,
  1835. nbuf->len);
  1836. drop_code = TX_HW_ENQUEUE;
  1837. goto release_desc;
  1838. }
  1839. return NULL;
  1840. release_desc:
  1841. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1842. fail_return:
  1843. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1844. tid_stats = &pdev->stats.tid_stats.
  1845. tid_tx_stats[tx_q->ring_id][tid];
  1846. tid_stats->swdrop_cnt[drop_code]++;
  1847. return nbuf;
  1848. }
  1849. /**
  1850. * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
  1851. * @soc: Soc handle
  1852. * @desc: software Tx descriptor to be processed
  1853. *
  1854. * Return: none
  1855. */
  1856. void dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1857. {
  1858. qdf_nbuf_t nbuf = desc->nbuf;
  1859. enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
  1860. /* nbuf already freed in vdev detach path */
  1861. if (!nbuf)
  1862. return;
  1863. /* If it is TDLS mgmt, don't unmap or free the frame */
  1864. if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
  1865. return dp_non_std_tx_comp_free_buff(soc, desc);
  1866. /* 0 : MSDU buffer, 1 : MLE */
  1867. if (desc->msdu_ext_desc) {
  1868. /* TSO free */
  1869. if (hal_tx_ext_desc_get_tso_enable(
  1870. desc->msdu_ext_desc->vaddr)) {
  1871. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
  1872. desc->id, DP_TX_COMP_MSDU_EXT);
  1873. dp_tx_tso_seg_history_add(soc,
  1874. desc->msdu_ext_desc->tso_desc,
  1875. desc->nbuf, desc->id, type);
  1876. /* unmap eash TSO seg before free the nbuf */
  1877. dp_tx_tso_unmap_segment(soc,
  1878. desc->msdu_ext_desc->tso_desc,
  1879. desc->msdu_ext_desc->
  1880. tso_num_desc);
  1881. qdf_nbuf_free(nbuf);
  1882. return;
  1883. }
  1884. }
  1885. /* If it's ME frame, dont unmap the cloned nbuf's */
  1886. if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
  1887. goto nbuf_free;
  1888. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
  1889. dp_tx_unmap(soc, desc);
  1890. if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
  1891. return dp_mesh_tx_comp_free_buff(soc, desc);
  1892. nbuf_free:
  1893. qdf_nbuf_free(nbuf);
  1894. }
  1895. /**
  1896. * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
  1897. * @vdev: DP vdev handle
  1898. * @nbuf: skb
  1899. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  1900. *
  1901. * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
  1902. *
  1903. * Return: NULL on success,
  1904. * nbuf when it fails to send
  1905. */
  1906. #if QDF_LOCK_STATS
  1907. noinline
  1908. #else
  1909. #endif
  1910. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1911. struct dp_tx_msdu_info_s *msdu_info)
  1912. {
  1913. uint32_t i;
  1914. struct dp_pdev *pdev = vdev->pdev;
  1915. struct dp_soc *soc = pdev->soc;
  1916. struct dp_tx_desc_s *tx_desc;
  1917. bool is_cce_classified = false;
  1918. QDF_STATUS status;
  1919. uint16_t htt_tcl_metadata = 0;
  1920. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  1921. struct cdp_tid_tx_stats *tid_stats = NULL;
  1922. uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
  1923. if (msdu_info->frm_type == dp_tx_frm_me)
  1924. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  1925. i = 0;
  1926. /* Print statement to track i and num_seg */
  1927. /*
  1928. * For each segment (maps to 1 MSDU) , prepare software and hardware
  1929. * descriptors using information in msdu_info
  1930. */
  1931. while (i < msdu_info->num_seg) {
  1932. /*
  1933. * Setup Tx descriptor for an MSDU, and MSDU extension
  1934. * descriptor
  1935. */
  1936. tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
  1937. tx_q->desc_pool_id);
  1938. if (!tx_desc) {
  1939. if (msdu_info->frm_type == dp_tx_frm_me) {
  1940. prep_desc_fail++;
  1941. dp_tx_me_free_buf(pdev,
  1942. (void *)(msdu_info->u.sg_info
  1943. .curr_seg->frags[0].vaddr));
  1944. if (prep_desc_fail == msdu_info->num_seg) {
  1945. /*
  1946. * Unmap is needed only if descriptor
  1947. * preparation failed for all segments.
  1948. */
  1949. qdf_nbuf_unmap(soc->osdev,
  1950. msdu_info->u.sg_info.
  1951. curr_seg->nbuf,
  1952. QDF_DMA_TO_DEVICE);
  1953. }
  1954. /*
  1955. * Free the nbuf for the current segment
  1956. * and make it point to the next in the list.
  1957. * For me, there are as many segments as there
  1958. * are no of clients.
  1959. */
  1960. qdf_nbuf_free(msdu_info->u.sg_info
  1961. .curr_seg->nbuf);
  1962. if (msdu_info->u.sg_info.curr_seg->next) {
  1963. msdu_info->u.sg_info.curr_seg =
  1964. msdu_info->u.sg_info
  1965. .curr_seg->next;
  1966. nbuf = msdu_info->u.sg_info
  1967. .curr_seg->nbuf;
  1968. }
  1969. i++;
  1970. continue;
  1971. }
  1972. if (msdu_info->frm_type == dp_tx_frm_tso) {
  1973. dp_tx_tso_seg_history_add(
  1974. soc,
  1975. msdu_info->u.tso_info.curr_seg,
  1976. nbuf, 0, DP_TX_DESC_UNMAP);
  1977. dp_tx_tso_unmap_segment(soc,
  1978. msdu_info->u.tso_info.
  1979. curr_seg,
  1980. msdu_info->u.tso_info.
  1981. tso_num_seg_list);
  1982. if (msdu_info->u.tso_info.curr_seg->next) {
  1983. msdu_info->u.tso_info.curr_seg =
  1984. msdu_info->u.tso_info.curr_seg->next;
  1985. i++;
  1986. continue;
  1987. }
  1988. }
  1989. goto done;
  1990. }
  1991. if (msdu_info->frm_type == dp_tx_frm_me) {
  1992. tx_desc->msdu_ext_desc->me_buffer =
  1993. (struct dp_tx_me_buf_t *)msdu_info->
  1994. u.sg_info.curr_seg->frags[0].vaddr;
  1995. tx_desc->flags |= DP_TX_DESC_FLAG_ME;
  1996. }
  1997. if (is_cce_classified)
  1998. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1999. htt_tcl_metadata = vdev->htt_tcl_metadata;
  2000. if (msdu_info->exception_fw) {
  2001. DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  2002. }
  2003. /*
  2004. * For frames with multiple segments (TSO, ME), jump to next
  2005. * segment.
  2006. */
  2007. if (msdu_info->frm_type == dp_tx_frm_tso) {
  2008. if (msdu_info->u.tso_info.curr_seg->next) {
  2009. msdu_info->u.tso_info.curr_seg =
  2010. msdu_info->u.tso_info.curr_seg->next;
  2011. /*
  2012. * If this is a jumbo nbuf, then increment the
  2013. * number of nbuf users for each additional
  2014. * segment of the msdu. This will ensure that
  2015. * the skb is freed only after receiving tx
  2016. * completion for all segments of an nbuf
  2017. */
  2018. qdf_nbuf_inc_users(nbuf);
  2019. /* Check with MCL if this is needed */
  2020. /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
  2021. */
  2022. }
  2023. }
  2024. dp_tx_update_mcast_param(DP_INVALID_PEER,
  2025. &htt_tcl_metadata,
  2026. vdev,
  2027. msdu_info);
  2028. /*
  2029. * Enqueue the Tx MSDU descriptor to HW for transmit
  2030. */
  2031. status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
  2032. htt_tcl_metadata,
  2033. NULL, msdu_info);
  2034. if (status != QDF_STATUS_SUCCESS) {
  2035. dp_info("Tx_hw_enqueue Fail tx_desc %pK queue %d",
  2036. tx_desc, tx_q->ring_id);
  2037. dp_tx_get_tid(vdev, nbuf, msdu_info);
  2038. tid_stats = &pdev->stats.tid_stats.
  2039. tid_tx_stats[tx_q->ring_id][msdu_info->tid];
  2040. tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
  2041. if (msdu_info->frm_type == dp_tx_frm_me) {
  2042. hw_enq_fail++;
  2043. if (hw_enq_fail == msdu_info->num_seg) {
  2044. /*
  2045. * Unmap is needed only if enqueue
  2046. * failed for all segments.
  2047. */
  2048. qdf_nbuf_unmap(soc->osdev,
  2049. msdu_info->u.sg_info.
  2050. curr_seg->nbuf,
  2051. QDF_DMA_TO_DEVICE);
  2052. }
  2053. /*
  2054. * Free the nbuf for the current segment
  2055. * and make it point to the next in the list.
  2056. * For me, there are as many segments as there
  2057. * are no of clients.
  2058. */
  2059. qdf_nbuf_free(msdu_info->u.sg_info
  2060. .curr_seg->nbuf);
  2061. if (msdu_info->u.sg_info.curr_seg->next) {
  2062. msdu_info->u.sg_info.curr_seg =
  2063. msdu_info->u.sg_info
  2064. .curr_seg->next;
  2065. nbuf = msdu_info->u.sg_info
  2066. .curr_seg->nbuf;
  2067. } else
  2068. break;
  2069. i++;
  2070. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2071. continue;
  2072. }
  2073. /*
  2074. * For TSO frames, the nbuf users increment done for
  2075. * the current segment has to be reverted, since the
  2076. * hw enqueue for this segment failed
  2077. */
  2078. if (msdu_info->frm_type == dp_tx_frm_tso &&
  2079. msdu_info->u.tso_info.curr_seg) {
  2080. /*
  2081. * unmap and free current,
  2082. * retransmit remaining segments
  2083. */
  2084. dp_tx_comp_free_buf(soc, tx_desc);
  2085. i++;
  2086. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2087. continue;
  2088. }
  2089. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2090. goto done;
  2091. }
  2092. /*
  2093. * TODO
  2094. * if tso_info structure can be modified to have curr_seg
  2095. * as first element, following 2 blocks of code (for TSO and SG)
  2096. * can be combined into 1
  2097. */
  2098. /*
  2099. * For Multicast-Unicast converted packets,
  2100. * each converted frame (for a client) is represented as
  2101. * 1 segment
  2102. */
  2103. if ((msdu_info->frm_type == dp_tx_frm_sg) ||
  2104. (msdu_info->frm_type == dp_tx_frm_me)) {
  2105. if (msdu_info->u.sg_info.curr_seg->next) {
  2106. msdu_info->u.sg_info.curr_seg =
  2107. msdu_info->u.sg_info.curr_seg->next;
  2108. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  2109. } else
  2110. break;
  2111. }
  2112. i++;
  2113. }
  2114. nbuf = NULL;
  2115. done:
  2116. return nbuf;
  2117. }
  2118. /**
  2119. * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
  2120. * for SG frames
  2121. * @vdev: DP vdev handle
  2122. * @nbuf: skb
  2123. * @seg_info: Pointer to Segment info Descriptor to be prepared
  2124. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2125. *
  2126. * Return: NULL on success,
  2127. * nbuf when it fails to send
  2128. */
  2129. static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2130. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  2131. {
  2132. uint32_t cur_frag, nr_frags, i;
  2133. qdf_dma_addr_t paddr;
  2134. struct dp_tx_sg_info_s *sg_info;
  2135. sg_info = &msdu_info->u.sg_info;
  2136. nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  2137. if (QDF_STATUS_SUCCESS !=
  2138. qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
  2139. QDF_DMA_TO_DEVICE,
  2140. qdf_nbuf_headlen(nbuf))) {
  2141. dp_tx_err("dma map error");
  2142. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  2143. qdf_nbuf_free(nbuf);
  2144. return NULL;
  2145. }
  2146. paddr = qdf_nbuf_mapped_paddr_get(nbuf);
  2147. seg_info->frags[0].paddr_lo = paddr;
  2148. seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
  2149. seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
  2150. seg_info->frags[0].vaddr = (void *) nbuf;
  2151. for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
  2152. if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
  2153. nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
  2154. dp_tx_err("frag dma map error");
  2155. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  2156. goto map_err;
  2157. }
  2158. paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
  2159. seg_info->frags[cur_frag + 1].paddr_lo = paddr;
  2160. seg_info->frags[cur_frag + 1].paddr_hi =
  2161. ((uint64_t) paddr) >> 32;
  2162. seg_info->frags[cur_frag + 1].len =
  2163. qdf_nbuf_get_frag_size(nbuf, cur_frag);
  2164. }
  2165. seg_info->frag_cnt = (cur_frag + 1);
  2166. seg_info->total_len = qdf_nbuf_len(nbuf);
  2167. seg_info->next = NULL;
  2168. sg_info->curr_seg = seg_info;
  2169. msdu_info->frm_type = dp_tx_frm_sg;
  2170. msdu_info->num_seg = 1;
  2171. return nbuf;
  2172. map_err:
  2173. /* restore paddr into nbuf before calling unmap */
  2174. qdf_nbuf_mapped_paddr_set(nbuf,
  2175. (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
  2176. ((uint64_t)
  2177. seg_info->frags[0].paddr_hi) << 32));
  2178. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  2179. QDF_DMA_TO_DEVICE,
  2180. seg_info->frags[0].len);
  2181. for (i = 1; i <= cur_frag; i++) {
  2182. qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
  2183. (seg_info->frags[i].paddr_lo | ((uint64_t)
  2184. seg_info->frags[i].paddr_hi) << 32),
  2185. seg_info->frags[i].len,
  2186. QDF_DMA_TO_DEVICE);
  2187. }
  2188. qdf_nbuf_free(nbuf);
  2189. return NULL;
  2190. }
  2191. /**
  2192. * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
  2193. * @vdev: DP vdev handle
  2194. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2195. * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
  2196. *
  2197. * Return: NULL on failure,
  2198. * nbuf when extracted successfully
  2199. */
  2200. static
  2201. void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
  2202. struct dp_tx_msdu_info_s *msdu_info,
  2203. uint16_t ppdu_cookie)
  2204. {
  2205. struct htt_tx_msdu_desc_ext2_t *meta_data =
  2206. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  2207. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  2208. HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
  2209. (msdu_info->meta_data[5], 1);
  2210. HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
  2211. (msdu_info->meta_data[5], 1);
  2212. HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
  2213. (msdu_info->meta_data[6], ppdu_cookie);
  2214. msdu_info->exception_fw = 1;
  2215. msdu_info->is_tx_sniffer = 1;
  2216. }
  2217. #ifdef MESH_MODE_SUPPORT
  2218. /**
  2219. * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
  2220. and prepare msdu_info for mesh frames.
  2221. * @vdev: DP vdev handle
  2222. * @nbuf: skb
  2223. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2224. *
  2225. * Return: NULL on failure,
  2226. * nbuf when extracted successfully
  2227. */
  2228. static
  2229. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2230. struct dp_tx_msdu_info_s *msdu_info)
  2231. {
  2232. struct meta_hdr_s *mhdr;
  2233. struct htt_tx_msdu_desc_ext2_t *meta_data =
  2234. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  2235. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  2236. if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
  2237. msdu_info->exception_fw = 0;
  2238. goto remove_meta_hdr;
  2239. }
  2240. msdu_info->exception_fw = 1;
  2241. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  2242. meta_data->host_tx_desc_pool = 1;
  2243. meta_data->update_peer_cache = 1;
  2244. meta_data->learning_frame = 1;
  2245. if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
  2246. meta_data->power = mhdr->power;
  2247. meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
  2248. meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
  2249. meta_data->pream_type = mhdr->rate_info[0].preamble_type;
  2250. meta_data->retry_limit = mhdr->rate_info[0].max_tries;
  2251. meta_data->dyn_bw = 1;
  2252. meta_data->valid_pwr = 1;
  2253. meta_data->valid_mcs_mask = 1;
  2254. meta_data->valid_nss_mask = 1;
  2255. meta_data->valid_preamble_type = 1;
  2256. meta_data->valid_retries = 1;
  2257. meta_data->valid_bw_info = 1;
  2258. }
  2259. if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
  2260. meta_data->encrypt_type = 0;
  2261. meta_data->valid_encrypt_type = 1;
  2262. meta_data->learning_frame = 0;
  2263. }
  2264. meta_data->valid_key_flags = 1;
  2265. meta_data->key_flags = (mhdr->keyix & 0x3);
  2266. remove_meta_hdr:
  2267. if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
  2268. dp_tx_err("qdf_nbuf_pull_head failed");
  2269. qdf_nbuf_free(nbuf);
  2270. return NULL;
  2271. }
  2272. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  2273. dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
  2274. " tid %d to_fw %d",
  2275. msdu_info->meta_data[0],
  2276. msdu_info->meta_data[1],
  2277. msdu_info->meta_data[2],
  2278. msdu_info->meta_data[3],
  2279. msdu_info->meta_data[4],
  2280. msdu_info->meta_data[5],
  2281. msdu_info->tid, msdu_info->exception_fw);
  2282. return nbuf;
  2283. }
  2284. #else
  2285. static
  2286. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2287. struct dp_tx_msdu_info_s *msdu_info)
  2288. {
  2289. return nbuf;
  2290. }
  2291. #endif
  2292. /**
  2293. * dp_check_exc_metadata() - Checks if parameters are valid
  2294. * @tx_exc - holds all exception path parameters
  2295. *
  2296. * Returns true when all the parameters are valid else false
  2297. *
  2298. */
  2299. static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
  2300. {
  2301. bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
  2302. HTT_INVALID_TID);
  2303. bool invalid_encap_type =
  2304. (tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
  2305. tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
  2306. bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
  2307. tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
  2308. bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
  2309. tx_exc->ppdu_cookie == 0);
  2310. if (invalid_tid || invalid_encap_type || invalid_sec_type ||
  2311. invalid_cookie) {
  2312. return false;
  2313. }
  2314. return true;
  2315. }
  2316. #ifdef ATH_SUPPORT_IQUE
  2317. /**
  2318. * dp_tx_mcast_enhance() - Multicast enhancement on TX
  2319. * @vdev: vdev handle
  2320. * @nbuf: skb
  2321. *
  2322. * Return: true on success,
  2323. * false on failure
  2324. */
  2325. static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2326. {
  2327. qdf_ether_header_t *eh;
  2328. /* Mcast to Ucast Conversion*/
  2329. if (qdf_likely(!vdev->mcast_enhancement_en))
  2330. return true;
  2331. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2332. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
  2333. !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
  2334. dp_verbose_debug("Mcast frm for ME %pK", vdev);
  2335. qdf_nbuf_set_next(nbuf, NULL);
  2336. DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
  2337. qdf_nbuf_len(nbuf));
  2338. if (dp_tx_prepare_send_me(vdev, nbuf) ==
  2339. QDF_STATUS_SUCCESS) {
  2340. return false;
  2341. }
  2342. if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
  2343. if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
  2344. QDF_STATUS_SUCCESS) {
  2345. return false;
  2346. }
  2347. }
  2348. }
  2349. return true;
  2350. }
  2351. #else
  2352. static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2353. {
  2354. return true;
  2355. }
  2356. #endif
  2357. /**
  2358. * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
  2359. * @nbuf: qdf_nbuf_t
  2360. * @vdev: struct dp_vdev *
  2361. *
  2362. * Allow packet for processing only if it is for peer client which is
  2363. * connected with same vap. Drop packet if client is connected to
  2364. * different vap.
  2365. *
  2366. * Return: QDF_STATUS
  2367. */
  2368. static inline QDF_STATUS
  2369. dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
  2370. {
  2371. struct dp_ast_entry *dst_ast_entry = NULL;
  2372. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2373. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
  2374. DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
  2375. return QDF_STATUS_SUCCESS;
  2376. qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
  2377. dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
  2378. eh->ether_dhost,
  2379. vdev->vdev_id);
  2380. /* If there is no ast entry, return failure */
  2381. if (qdf_unlikely(!dst_ast_entry)) {
  2382. qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
  2383. return QDF_STATUS_E_FAILURE;
  2384. }
  2385. qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
  2386. return QDF_STATUS_SUCCESS;
  2387. }
  2388. /**
  2389. * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
  2390. * @soc: DP soc handle
  2391. * @vdev_id: id of DP vdev handle
  2392. * @nbuf: skb
  2393. * @tx_exc_metadata: Handle that holds exception path meta data
  2394. *
  2395. * Entry point for Core Tx layer (DP_TX) invoked from
  2396. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  2397. *
  2398. * Return: NULL on success,
  2399. * nbuf when it fails to send
  2400. */
  2401. qdf_nbuf_t
  2402. dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2403. qdf_nbuf_t nbuf,
  2404. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2405. {
  2406. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2407. qdf_ether_header_t *eh = NULL;
  2408. struct dp_tx_msdu_info_s msdu_info;
  2409. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2410. DP_MOD_ID_TX_EXCEPTION);
  2411. if (qdf_unlikely(!vdev))
  2412. goto fail;
  2413. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  2414. if (!tx_exc_metadata)
  2415. goto fail;
  2416. msdu_info.tid = tx_exc_metadata->tid;
  2417. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2418. dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
  2419. QDF_MAC_ADDR_REF(nbuf->data));
  2420. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  2421. if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
  2422. dp_tx_err("Invalid parameters in exception path");
  2423. goto fail;
  2424. }
  2425. /* Basic sanity checks for unsupported packets */
  2426. /* MESH mode */
  2427. if (qdf_unlikely(vdev->mesh_vdev)) {
  2428. dp_tx_err("Mesh mode is not supported in exception path");
  2429. goto fail;
  2430. }
  2431. /*
  2432. * Classify the frame and call corresponding
  2433. * "prepare" function which extracts the segment (TSO)
  2434. * and fragmentation information (for TSO , SG, ME, or Raw)
  2435. * into MSDU_INFO structure which is later used to fill
  2436. * SW and HW descriptors.
  2437. */
  2438. if (qdf_nbuf_is_tso(nbuf)) {
  2439. dp_verbose_debug("TSO frame %pK", vdev);
  2440. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  2441. qdf_nbuf_len(nbuf));
  2442. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  2443. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  2444. qdf_nbuf_len(nbuf));
  2445. goto fail;
  2446. }
  2447. goto send_multiple;
  2448. }
  2449. /* SG */
  2450. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  2451. struct dp_tx_seg_info_s seg_info = {0};
  2452. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  2453. if (!nbuf)
  2454. goto fail;
  2455. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  2456. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  2457. qdf_nbuf_len(nbuf));
  2458. goto send_multiple;
  2459. }
  2460. if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
  2461. DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
  2462. qdf_nbuf_len(nbuf));
  2463. dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
  2464. tx_exc_metadata->ppdu_cookie);
  2465. }
  2466. /*
  2467. * Get HW Queue to use for this frame.
  2468. * TCL supports upto 4 DMA rings, out of which 3 rings are
  2469. * dedicated for data and 1 for command.
  2470. * "queue_id" maps to one hardware ring.
  2471. * With each ring, we also associate a unique Tx descriptor pool
  2472. * to minimize lock contention for these resources.
  2473. */
  2474. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2475. /*
  2476. * Check exception descriptors
  2477. */
  2478. if (dp_tx_exception_limit_check(vdev))
  2479. goto fail;
  2480. /* Single linear frame */
  2481. /*
  2482. * If nbuf is a simple linear frame, use send_single function to
  2483. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  2484. * SRNG. There is no need to setup a MSDU extension descriptor.
  2485. */
  2486. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
  2487. tx_exc_metadata->peer_id, tx_exc_metadata);
  2488. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2489. return nbuf;
  2490. send_multiple:
  2491. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2492. fail:
  2493. if (vdev)
  2494. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2495. dp_verbose_debug("pkt send failed");
  2496. return nbuf;
  2497. }
  2498. /**
  2499. * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
  2500. * in exception path in special case to avoid regular exception path chk.
  2501. * @soc: DP soc handle
  2502. * @vdev_id: id of DP vdev handle
  2503. * @nbuf: skb
  2504. * @tx_exc_metadata: Handle that holds exception path meta data
  2505. *
  2506. * Entry point for Core Tx layer (DP_TX) invoked from
  2507. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  2508. *
  2509. * Return: NULL on success,
  2510. * nbuf when it fails to send
  2511. */
  2512. qdf_nbuf_t
  2513. dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
  2514. uint8_t vdev_id, qdf_nbuf_t nbuf,
  2515. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2516. {
  2517. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2518. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2519. DP_MOD_ID_TX_EXCEPTION);
  2520. if (qdf_unlikely(!vdev))
  2521. goto fail;
  2522. if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
  2523. == QDF_STATUS_E_FAILURE)) {
  2524. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  2525. goto fail;
  2526. }
  2527. /* Unref count as it will agin be taken inside dp_tx_exception */
  2528. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2529. return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
  2530. fail:
  2531. if (vdev)
  2532. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2533. dp_verbose_debug("pkt send failed");
  2534. return nbuf;
  2535. }
  2536. /**
  2537. * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
  2538. * @soc: DP soc handle
  2539. * @vdev_id: DP vdev handle
  2540. * @nbuf: skb
  2541. *
  2542. * Entry point for Core Tx layer (DP_TX) invoked from
  2543. * hard_start_xmit in OSIF/HDD
  2544. *
  2545. * Return: NULL on success,
  2546. * nbuf when it fails to send
  2547. */
  2548. #ifdef MESH_MODE_SUPPORT
  2549. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2550. qdf_nbuf_t nbuf)
  2551. {
  2552. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2553. struct meta_hdr_s *mhdr;
  2554. qdf_nbuf_t nbuf_mesh = NULL;
  2555. qdf_nbuf_t nbuf_clone = NULL;
  2556. struct dp_vdev *vdev;
  2557. uint8_t no_enc_frame = 0;
  2558. nbuf_mesh = qdf_nbuf_unshare(nbuf);
  2559. if (!nbuf_mesh) {
  2560. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2561. "qdf_nbuf_unshare failed");
  2562. return nbuf;
  2563. }
  2564. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
  2565. if (!vdev) {
  2566. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2567. "vdev is NULL for vdev_id %d", vdev_id);
  2568. return nbuf;
  2569. }
  2570. nbuf = nbuf_mesh;
  2571. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  2572. if ((vdev->sec_type != cdp_sec_type_none) &&
  2573. (mhdr->flags & METAHDR_FLAG_NOENCRYPT))
  2574. no_enc_frame = 1;
  2575. if (mhdr->flags & METAHDR_FLAG_NOQOS)
  2576. qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
  2577. if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
  2578. !no_enc_frame) {
  2579. nbuf_clone = qdf_nbuf_clone(nbuf);
  2580. if (!nbuf_clone) {
  2581. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2582. "qdf_nbuf_clone failed");
  2583. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  2584. return nbuf;
  2585. }
  2586. qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
  2587. }
  2588. if (nbuf_clone) {
  2589. if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
  2590. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  2591. } else {
  2592. qdf_nbuf_free(nbuf_clone);
  2593. }
  2594. }
  2595. if (no_enc_frame)
  2596. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
  2597. else
  2598. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
  2599. nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
  2600. if ((!nbuf) && no_enc_frame) {
  2601. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  2602. }
  2603. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  2604. return nbuf;
  2605. }
  2606. #else
  2607. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
  2608. qdf_nbuf_t nbuf)
  2609. {
  2610. return dp_tx_send(soc, vdev_id, nbuf);
  2611. }
  2612. #endif
  2613. /**
  2614. * dp_tx_nawds_handler() - NAWDS handler
  2615. *
  2616. * @soc: DP soc handle
  2617. * @vdev_id: id of DP vdev handle
  2618. * @msdu_info: msdu_info required to create HTT metadata
  2619. * @nbuf: skb
  2620. *
  2621. * This API transfers the multicast frames with the peer id
  2622. * on NAWDS enabled peer.
  2623. * Return: none
  2624. */
  2625. static inline
  2626. void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
  2627. struct dp_tx_msdu_info_s *msdu_info, qdf_nbuf_t nbuf)
  2628. {
  2629. struct dp_peer *peer = NULL;
  2630. qdf_nbuf_t nbuf_clone = NULL;
  2631. uint16_t peer_id = DP_INVALID_PEER;
  2632. uint16_t sa_peer_id = DP_INVALID_PEER;
  2633. struct dp_ast_entry *ast_entry = NULL;
  2634. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2635. if (!soc->ast_offload_support) {
  2636. qdf_spin_lock_bh(&soc->ast_lock);
  2637. ast_entry = dp_peer_ast_hash_find_by_pdevid
  2638. (soc,
  2639. (uint8_t *)(eh->ether_shost),
  2640. vdev->pdev->pdev_id);
  2641. if (ast_entry)
  2642. sa_peer_id = ast_entry->peer_id;
  2643. qdf_spin_unlock_bh(&soc->ast_lock);
  2644. } else {
  2645. if ((qdf_nbuf_get_tx_ftype(nbuf) == CB_FTYPE_INTRABSS_FWD) &&
  2646. qdf_nbuf_get_tx_fctx(nbuf))
  2647. sa_peer_id = *(uint32_t *)qdf_nbuf_get_tx_fctx(nbuf);
  2648. }
  2649. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2650. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2651. if (!peer->bss_peer && peer->nawds_enabled) {
  2652. peer_id = peer->peer_id;
  2653. /* Multicast packets needs to be
  2654. * dropped in case of intra bss forwarding
  2655. */
  2656. if (sa_peer_id == peer->peer_id) {
  2657. dp_tx_debug("multicast packet");
  2658. DP_STATS_INC(peer, tx.nawds_mcast_drop,
  2659. 1);
  2660. continue;
  2661. }
  2662. nbuf_clone = qdf_nbuf_clone(nbuf);
  2663. if (!nbuf_clone) {
  2664. QDF_TRACE(QDF_MODULE_ID_DP,
  2665. QDF_TRACE_LEVEL_ERROR,
  2666. FL("nbuf clone failed"));
  2667. break;
  2668. }
  2669. nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
  2670. msdu_info, peer_id,
  2671. NULL);
  2672. if (nbuf_clone) {
  2673. dp_tx_debug("pkt send failed");
  2674. qdf_nbuf_free(nbuf_clone);
  2675. } else {
  2676. if (peer_id != DP_INVALID_PEER)
  2677. DP_STATS_INC_PKT(peer, tx.nawds_mcast,
  2678. 1, qdf_nbuf_len(nbuf));
  2679. }
  2680. }
  2681. }
  2682. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2683. }
  2684. #ifdef QCA_DP_TX_NBUF_AND_NBUF_DATA_PREFETCH
  2685. static inline
  2686. void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
  2687. {
  2688. if (nbuf) {
  2689. qdf_prefetch(&nbuf->len);
  2690. qdf_prefetch(&nbuf->data);
  2691. }
  2692. }
  2693. #else
  2694. static inline
  2695. void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
  2696. {
  2697. }
  2698. #endif
  2699. /**
  2700. * dp_tx_send() - Transmit a frame on a given VAP
  2701. * @soc: DP soc handle
  2702. * @vdev_id: id of DP vdev handle
  2703. * @nbuf: skb
  2704. *
  2705. * Entry point for Core Tx layer (DP_TX) invoked from
  2706. * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
  2707. * cases
  2708. *
  2709. * Return: NULL on success,
  2710. * nbuf when it fails to send
  2711. */
  2712. qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2713. qdf_nbuf_t nbuf)
  2714. {
  2715. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2716. uint16_t peer_id = HTT_INVALID_PEER;
  2717. /*
  2718. * doing a memzero is causing additional function call overhead
  2719. * so doing static stack clearing
  2720. */
  2721. struct dp_tx_msdu_info_s msdu_info = {0};
  2722. struct dp_vdev *vdev = NULL;
  2723. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  2724. return nbuf;
  2725. /*
  2726. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  2727. * this in per packet path.
  2728. *
  2729. * As in this path vdev memory is already protected with netdev
  2730. * tx lock
  2731. */
  2732. vdev = soc->vdev_id_map[vdev_id];
  2733. if (qdf_unlikely(!vdev))
  2734. return nbuf;
  2735. dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
  2736. QDF_MAC_ADDR_REF(nbuf->data));
  2737. /*
  2738. * Set Default Host TID value to invalid TID
  2739. * (TID override disabled)
  2740. */
  2741. msdu_info.tid = HTT_TX_EXT_TID_INVALID;
  2742. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  2743. if (qdf_unlikely(vdev->mesh_vdev)) {
  2744. qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
  2745. &msdu_info);
  2746. if (!nbuf_mesh) {
  2747. dp_verbose_debug("Extracting mesh metadata failed");
  2748. return nbuf;
  2749. }
  2750. nbuf = nbuf_mesh;
  2751. }
  2752. /*
  2753. * Get HW Queue to use for this frame.
  2754. * TCL supports upto 4 DMA rings, out of which 3 rings are
  2755. * dedicated for data and 1 for command.
  2756. * "queue_id" maps to one hardware ring.
  2757. * With each ring, we also associate a unique Tx descriptor pool
  2758. * to minimize lock contention for these resources.
  2759. */
  2760. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2761. /*
  2762. * TCL H/W supports 2 DSCP-TID mapping tables.
  2763. * Table 1 - Default DSCP-TID mapping table
  2764. * Table 2 - 1 DSCP-TID override table
  2765. *
  2766. * If we need a different DSCP-TID mapping for this vap,
  2767. * call tid_classify to extract DSCP/ToS from frame and
  2768. * map to a TID and store in msdu_info. This is later used
  2769. * to fill in TCL Input descriptor (per-packet TID override).
  2770. */
  2771. dp_tx_classify_tid(vdev, nbuf, &msdu_info);
  2772. /*
  2773. * Classify the frame and call corresponding
  2774. * "prepare" function which extracts the segment (TSO)
  2775. * and fragmentation information (for TSO , SG, ME, or Raw)
  2776. * into MSDU_INFO structure which is later used to fill
  2777. * SW and HW descriptors.
  2778. */
  2779. if (qdf_nbuf_is_tso(nbuf)) {
  2780. dp_verbose_debug("TSO frame %pK", vdev);
  2781. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  2782. qdf_nbuf_len(nbuf));
  2783. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  2784. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  2785. qdf_nbuf_len(nbuf));
  2786. return nbuf;
  2787. }
  2788. goto send_multiple;
  2789. }
  2790. /* SG */
  2791. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  2792. struct dp_tx_seg_info_s seg_info = {0};
  2793. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  2794. if (!nbuf)
  2795. return NULL;
  2796. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  2797. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  2798. qdf_nbuf_len(nbuf));
  2799. goto send_multiple;
  2800. }
  2801. if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
  2802. return NULL;
  2803. /* RAW */
  2804. if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
  2805. struct dp_tx_seg_info_s seg_info = {0};
  2806. nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
  2807. if (!nbuf)
  2808. return NULL;
  2809. dp_verbose_debug("Raw frame %pK", vdev);
  2810. goto send_multiple;
  2811. }
  2812. if (qdf_unlikely(vdev->nawds_enabled)) {
  2813. qdf_ether_header_t *eh = (qdf_ether_header_t *)
  2814. qdf_nbuf_data(nbuf);
  2815. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost))
  2816. dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf);
  2817. peer_id = DP_INVALID_PEER;
  2818. DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
  2819. 1, qdf_nbuf_len(nbuf));
  2820. }
  2821. /* Single linear frame */
  2822. /*
  2823. * If nbuf is a simple linear frame, use send_single function to
  2824. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  2825. * SRNG. There is no need to setup a MSDU extension descriptor.
  2826. */
  2827. dp_tx_prefetch_nbuf_data(nbuf);
  2828. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
  2829. return nbuf;
  2830. send_multiple:
  2831. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2832. if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
  2833. dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
  2834. return nbuf;
  2835. }
  2836. /**
  2837. * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
  2838. * case to vaoid check in perpkt path.
  2839. * @soc: DP soc handle
  2840. * @vdev_id: id of DP vdev handle
  2841. * @nbuf: skb
  2842. *
  2843. * Entry point for Core Tx layer (DP_TX) invoked from
  2844. * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
  2845. * with special condition to avoid per pkt check in dp_tx_send
  2846. *
  2847. * Return: NULL on success,
  2848. * nbuf when it fails to send
  2849. */
  2850. qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
  2851. uint8_t vdev_id, qdf_nbuf_t nbuf)
  2852. {
  2853. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2854. struct dp_vdev *vdev = NULL;
  2855. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  2856. return nbuf;
  2857. /*
  2858. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  2859. * this in per packet path.
  2860. *
  2861. * As in this path vdev memory is already protected with netdev
  2862. * tx lock
  2863. */
  2864. vdev = soc->vdev_id_map[vdev_id];
  2865. if (qdf_unlikely(!vdev))
  2866. return nbuf;
  2867. if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
  2868. == QDF_STATUS_E_FAILURE)) {
  2869. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  2870. return nbuf;
  2871. }
  2872. return dp_tx_send(soc_hdl, vdev_id, nbuf);
  2873. }
  2874. #ifdef UMAC_SUPPORT_PROXY_ARP
  2875. /**
  2876. * dp_tx_proxy_arp() - Tx proxy arp handler
  2877. * @vdev: datapath vdev handle
  2878. * @buf: sk buffer
  2879. *
  2880. * Return: status
  2881. */
  2882. static inline
  2883. int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2884. {
  2885. if (vdev->osif_proxy_arp)
  2886. return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
  2887. /*
  2888. * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
  2889. * osif_proxy_arp has a valid function pointer assigned
  2890. * to it
  2891. */
  2892. dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
  2893. return QDF_STATUS_NOT_INITIALIZED;
  2894. }
  2895. #else
  2896. /**
  2897. * dp_tx_proxy_arp() - Tx proxy arp handler
  2898. * @vdev: datapath vdev handle
  2899. * @buf: sk buffer
  2900. *
  2901. * This function always return 0 when UMAC_SUPPORT_PROXY_ARP
  2902. * is not defined.
  2903. *
  2904. * Return: status
  2905. */
  2906. static inline
  2907. int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2908. {
  2909. return QDF_STATUS_SUCCESS;
  2910. }
  2911. #endif
  2912. /**
  2913. * dp_tx_reinject_handler() - Tx Reinject Handler
  2914. * @soc: datapath soc handle
  2915. * @vdev: datapath vdev handle
  2916. * @tx_desc: software descriptor head pointer
  2917. * @status : Tx completion status from HTT descriptor
  2918. * @reinject_reason : reinject reason from HTT descriptor
  2919. *
  2920. * This function reinjects frames back to Target.
  2921. * Todo - Host queue needs to be added
  2922. *
  2923. * Return: none
  2924. */
  2925. void dp_tx_reinject_handler(struct dp_soc *soc,
  2926. struct dp_vdev *vdev,
  2927. struct dp_tx_desc_s *tx_desc,
  2928. uint8_t *status,
  2929. uint8_t reinject_reason)
  2930. {
  2931. struct dp_peer *peer = NULL;
  2932. uint32_t peer_id = HTT_INVALID_PEER;
  2933. qdf_nbuf_t nbuf = tx_desc->nbuf;
  2934. qdf_nbuf_t nbuf_copy = NULL;
  2935. struct dp_tx_msdu_info_s msdu_info;
  2936. #ifdef WDS_VENDOR_EXTENSION
  2937. int is_mcast = 0, is_ucast = 0;
  2938. int num_peers_3addr = 0;
  2939. qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
  2940. struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
  2941. #endif
  2942. qdf_assert(vdev);
  2943. dp_tx_debug("Tx reinject path");
  2944. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  2945. qdf_nbuf_len(tx_desc->nbuf));
  2946. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  2947. #ifdef WLAN_MCAST_MLO
  2948. if (reinject_reason == HTT_TX_FW2WBM_REINJECT_REASON_MLO_MCAST) {
  2949. if (soc->arch_ops.dp_tx_mcast_handler)
  2950. soc->arch_ops.dp_tx_mcast_handler(soc, vdev, nbuf);
  2951. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2952. return;
  2953. }
  2954. #endif
  2955. #endif
  2956. #ifdef WDS_VENDOR_EXTENSION
  2957. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  2958. is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
  2959. } else {
  2960. is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
  2961. }
  2962. is_ucast = !is_mcast;
  2963. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2964. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2965. if (peer->bss_peer)
  2966. continue;
  2967. /* Detect wds peers that use 3-addr framing for mcast.
  2968. * if there are any, the bss_peer is used to send the
  2969. * the mcast frame using 3-addr format. all wds enabled
  2970. * peers that use 4-addr framing for mcast frames will
  2971. * be duplicated and sent as 4-addr frames below.
  2972. */
  2973. if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
  2974. num_peers_3addr = 1;
  2975. break;
  2976. }
  2977. }
  2978. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2979. #endif
  2980. if (qdf_unlikely(vdev->mesh_vdev)) {
  2981. DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
  2982. } else {
  2983. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2984. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2985. if ((peer->peer_id != HTT_INVALID_PEER) &&
  2986. #ifdef WDS_VENDOR_EXTENSION
  2987. /*
  2988. * . if 3-addr STA, then send on BSS Peer
  2989. * . if Peer WDS enabled and accept 4-addr mcast,
  2990. * send mcast on that peer only
  2991. * . if Peer WDS enabled and accept 4-addr ucast,
  2992. * send ucast on that peer only
  2993. */
  2994. ((peer->bss_peer && num_peers_3addr && is_mcast) ||
  2995. (peer->wds_enabled &&
  2996. ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
  2997. (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
  2998. #else
  2999. (peer->bss_peer &&
  3000. (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
  3001. #endif
  3002. peer_id = DP_INVALID_PEER;
  3003. nbuf_copy = qdf_nbuf_copy(nbuf);
  3004. if (!nbuf_copy) {
  3005. dp_tx_debug("nbuf copy failed");
  3006. break;
  3007. }
  3008. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  3009. dp_tx_get_queue(vdev, nbuf,
  3010. &msdu_info.tx_queue);
  3011. nbuf_copy = dp_tx_send_msdu_single(vdev,
  3012. nbuf_copy,
  3013. &msdu_info,
  3014. peer_id,
  3015. NULL);
  3016. if (nbuf_copy) {
  3017. dp_tx_debug("pkt send failed");
  3018. qdf_nbuf_free(nbuf_copy);
  3019. }
  3020. }
  3021. }
  3022. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  3023. }
  3024. qdf_nbuf_free(nbuf);
  3025. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3026. }
  3027. /**
  3028. * dp_tx_inspect_handler() - Tx Inspect Handler
  3029. * @soc: datapath soc handle
  3030. * @vdev: datapath vdev handle
  3031. * @tx_desc: software descriptor head pointer
  3032. * @status : Tx completion status from HTT descriptor
  3033. *
  3034. * Handles Tx frames sent back to Host for inspection
  3035. * (ProxyARP)
  3036. *
  3037. * Return: none
  3038. */
  3039. void dp_tx_inspect_handler(struct dp_soc *soc,
  3040. struct dp_vdev *vdev,
  3041. struct dp_tx_desc_s *tx_desc,
  3042. uint8_t *status)
  3043. {
  3044. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3045. "%s Tx inspect path",
  3046. __func__);
  3047. DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
  3048. qdf_nbuf_len(tx_desc->nbuf));
  3049. DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
  3050. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3051. }
  3052. #ifdef MESH_MODE_SUPPORT
  3053. /**
  3054. * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
  3055. * in mesh meta header
  3056. * @tx_desc: software descriptor head pointer
  3057. * @ts: pointer to tx completion stats
  3058. * Return: none
  3059. */
  3060. static
  3061. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  3062. struct hal_tx_completion_status *ts)
  3063. {
  3064. qdf_nbuf_t netbuf = tx_desc->nbuf;
  3065. if (!tx_desc->msdu_ext_desc) {
  3066. if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
  3067. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3068. "netbuf %pK offset %d",
  3069. netbuf, tx_desc->pkt_offset);
  3070. return;
  3071. }
  3072. }
  3073. }
  3074. #else
  3075. static
  3076. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  3077. struct hal_tx_completion_status *ts)
  3078. {
  3079. }
  3080. #endif
  3081. #ifdef QCA_PEER_EXT_STATS
  3082. /*
  3083. * dp_tx_compute_tid_delay() - Compute per TID delay
  3084. * @stats: Per TID delay stats
  3085. * @tx_desc: Software Tx descriptor
  3086. *
  3087. * Compute the software enqueue and hw enqueue delays and
  3088. * update the respective histograms
  3089. *
  3090. * Return: void
  3091. */
  3092. static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  3093. struct dp_tx_desc_s *tx_desc)
  3094. {
  3095. struct cdp_delay_tx_stats *tx_delay = &stats->tx_delay;
  3096. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  3097. uint32_t sw_enqueue_delay, fwhw_transmit_delay;
  3098. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  3099. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  3100. timestamp_hw_enqueue = tx_desc->timestamp;
  3101. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3102. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  3103. timestamp_hw_enqueue);
  3104. /*
  3105. * Update the Tx software enqueue delay and HW enque-Completion delay.
  3106. */
  3107. dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
  3108. dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
  3109. }
  3110. /*
  3111. * dp_tx_update_peer_ext_stats() - Update the peer extended stats
  3112. * @peer: DP peer context
  3113. * @tx_desc: Tx software descriptor
  3114. * @tid: Transmission ID
  3115. * @ring_id: Rx CPU context ID/CPU_ID
  3116. *
  3117. * Update the peer extended stats. These are enhanced other
  3118. * delay stats per msdu level.
  3119. *
  3120. * Return: void
  3121. */
  3122. static void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
  3123. struct dp_tx_desc_s *tx_desc,
  3124. uint8_t tid, uint8_t ring_id)
  3125. {
  3126. struct dp_pdev *pdev = peer->vdev->pdev;
  3127. struct dp_soc *soc = NULL;
  3128. struct cdp_peer_ext_stats *pext_stats = NULL;
  3129. soc = pdev->soc;
  3130. if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
  3131. return;
  3132. pext_stats = peer->pext_stats;
  3133. qdf_assert(pext_stats);
  3134. qdf_assert(ring < CDP_MAX_TXRX_CTX);
  3135. /*
  3136. * For non-TID packets use the TID 9
  3137. */
  3138. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3139. tid = CDP_MAX_DATA_TIDS - 1;
  3140. dp_tx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
  3141. tx_desc);
  3142. }
  3143. #else
  3144. static inline void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
  3145. struct dp_tx_desc_s *tx_desc,
  3146. uint8_t tid, uint8_t ring_id)
  3147. {
  3148. }
  3149. #endif
  3150. /**
  3151. * dp_tx_compute_delay() - Compute and fill in all timestamps
  3152. * to pass in correct fields
  3153. *
  3154. * @vdev: pdev handle
  3155. * @tx_desc: tx descriptor
  3156. * @tid: tid value
  3157. * @ring_id: TCL or WBM ring number for transmit path
  3158. * Return: none
  3159. */
  3160. void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
  3161. uint8_t tid, uint8_t ring_id)
  3162. {
  3163. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  3164. uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
  3165. if (qdf_likely(!vdev->pdev->delay_stats_flag))
  3166. return;
  3167. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  3168. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  3169. timestamp_hw_enqueue = tx_desc->timestamp;
  3170. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3171. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  3172. timestamp_hw_enqueue);
  3173. interframe_delay = (uint32_t)(timestamp_ingress -
  3174. vdev->prev_tx_enq_tstamp);
  3175. /*
  3176. * Delay in software enqueue
  3177. */
  3178. dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
  3179. CDP_DELAY_STATS_SW_ENQ, ring_id);
  3180. /*
  3181. * Delay between packet enqueued to HW and Tx completion
  3182. */
  3183. dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
  3184. CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
  3185. /*
  3186. * Update interframe delay stats calculated at hardstart receive point.
  3187. * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
  3188. * interframe delay will not be calculate correctly for 1st frame.
  3189. * On the other side, this will help in avoiding extra per packet check
  3190. * of !vdev->prev_tx_enq_tstamp.
  3191. */
  3192. dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
  3193. CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
  3194. vdev->prev_tx_enq_tstamp = timestamp_ingress;
  3195. }
  3196. #ifdef DISABLE_DP_STATS
  3197. static
  3198. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
  3199. {
  3200. }
  3201. #else
  3202. static
  3203. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
  3204. {
  3205. enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
  3206. DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
  3207. if (subtype != QDF_PROTO_INVALID)
  3208. DP_STATS_INC(peer, tx.no_ack_count[subtype], 1);
  3209. }
  3210. #endif
  3211. /**
  3212. * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
  3213. * per wbm ring
  3214. *
  3215. * @tx_desc: software descriptor head pointer
  3216. * @ts: Tx completion status
  3217. * @peer: peer handle
  3218. * @ring_id: ring number
  3219. *
  3220. * Return: None
  3221. */
  3222. static inline void
  3223. dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
  3224. struct hal_tx_completion_status *ts,
  3225. struct dp_peer *peer, uint8_t ring_id)
  3226. {
  3227. struct dp_pdev *pdev = peer->vdev->pdev;
  3228. struct dp_soc *soc = NULL;
  3229. uint8_t mcs, pkt_type;
  3230. uint8_t tid = ts->tid;
  3231. uint32_t length;
  3232. struct cdp_tid_tx_stats *tid_stats;
  3233. if (!pdev)
  3234. return;
  3235. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3236. tid = CDP_MAX_DATA_TIDS - 1;
  3237. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  3238. soc = pdev->soc;
  3239. mcs = ts->mcs;
  3240. pkt_type = ts->pkt_type;
  3241. if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
  3242. dp_err("Release source is not from TQM");
  3243. return;
  3244. }
  3245. length = qdf_nbuf_len(tx_desc->nbuf);
  3246. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
  3247. if (qdf_unlikely(pdev->delay_stats_flag))
  3248. dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
  3249. DP_STATS_INCC(peer, tx.dropped.age_out, 1,
  3250. (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
  3251. DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
  3252. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  3253. DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
  3254. (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
  3255. DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
  3256. (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
  3257. DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
  3258. (ts->status == HAL_TX_TQM_RR_FW_REASON1));
  3259. DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
  3260. (ts->status == HAL_TX_TQM_RR_FW_REASON2));
  3261. DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
  3262. (ts->status == HAL_TX_TQM_RR_FW_REASON3));
  3263. /*
  3264. * tx_failed is ideally supposed to be updated from HTT ppdu completion
  3265. * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
  3266. * are no completions for failed cases. Hence updating tx_failed from
  3267. * data path. Please note that if tx_failed is fixed to be from ppdu,
  3268. * then this has to be removed
  3269. */
  3270. peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
  3271. peer->stats.tx.dropped.fw_rem_notx +
  3272. peer->stats.tx.dropped.fw_rem_tx +
  3273. peer->stats.tx.dropped.age_out +
  3274. peer->stats.tx.dropped.fw_reason1 +
  3275. peer->stats.tx.dropped.fw_reason2 +
  3276. peer->stats.tx.dropped.fw_reason3;
  3277. if (ts->status < CDP_MAX_TX_TQM_STATUS) {
  3278. tid_stats->tqm_status_cnt[ts->status]++;
  3279. }
  3280. if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
  3281. DP_STATS_INCC(peer, tx.failed_retry_count, 1,
  3282. ts->transmit_cnt > DP_RETRY_COUNT);
  3283. dp_update_no_ack_stats(tx_desc->nbuf, peer);
  3284. return;
  3285. }
  3286. DP_STATS_INCC(peer, tx.retry_count, 1, ts->transmit_cnt > 1);
  3287. DP_STATS_INCC(peer, tx.multiple_retry_count, 1, ts->transmit_cnt > 2);
  3288. DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
  3289. DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
  3290. DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
  3291. /*
  3292. * Following Rate Statistics are updated from HTT PPDU events from FW.
  3293. * Return from here if HTT PPDU events are enabled.
  3294. */
  3295. if (!(soc->process_tx_status))
  3296. return;
  3297. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3298. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
  3299. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3300. ((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
  3301. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3302. ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
  3303. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3304. ((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
  3305. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3306. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
  3307. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3308. ((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
  3309. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3310. ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  3311. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3312. ((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  3313. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3314. ((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  3315. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3316. ((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  3317. DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
  3318. DP_STATS_INC(peer, tx.bw[ts->bw], 1);
  3319. DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
  3320. DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
  3321. DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
  3322. DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
  3323. DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
  3324. if (ts->first_msdu) {
  3325. DP_STATS_INCC(peer, tx.retries_mpdu, 1, ts->transmit_cnt > 1);
  3326. DP_STATS_INCC(peer, tx.mpdu_success_with_retries,
  3327. qdf_do_div(ts->transmit_cnt, DP_RETRY_COUNT),
  3328. ts->transmit_cnt > DP_RETRY_COUNT);
  3329. }
  3330. peer->stats.tx.last_tx_ts = qdf_system_ticks();
  3331. }
  3332. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  3333. /**
  3334. * dp_tx_flow_pool_lock() - take flow pool lock
  3335. * @soc: core txrx main context
  3336. * @tx_desc: tx desc
  3337. *
  3338. * Return: None
  3339. */
  3340. static inline
  3341. void dp_tx_flow_pool_lock(struct dp_soc *soc,
  3342. struct dp_tx_desc_s *tx_desc)
  3343. {
  3344. struct dp_tx_desc_pool_s *pool;
  3345. uint8_t desc_pool_id;
  3346. desc_pool_id = tx_desc->pool_id;
  3347. pool = &soc->tx_desc[desc_pool_id];
  3348. qdf_spin_lock_bh(&pool->flow_pool_lock);
  3349. }
  3350. /**
  3351. * dp_tx_flow_pool_unlock() - release flow pool lock
  3352. * @soc: core txrx main context
  3353. * @tx_desc: tx desc
  3354. *
  3355. * Return: None
  3356. */
  3357. static inline
  3358. void dp_tx_flow_pool_unlock(struct dp_soc *soc,
  3359. struct dp_tx_desc_s *tx_desc)
  3360. {
  3361. struct dp_tx_desc_pool_s *pool;
  3362. uint8_t desc_pool_id;
  3363. desc_pool_id = tx_desc->pool_id;
  3364. pool = &soc->tx_desc[desc_pool_id];
  3365. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  3366. }
  3367. #else
  3368. static inline
  3369. void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  3370. {
  3371. }
  3372. static inline
  3373. void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  3374. {
  3375. }
  3376. #endif
  3377. /**
  3378. * dp_tx_notify_completion() - Notify tx completion for this desc
  3379. * @soc: core txrx main context
  3380. * @vdev: datapath vdev handle
  3381. * @tx_desc: tx desc
  3382. * @netbuf: buffer
  3383. * @status: tx status
  3384. *
  3385. * Return: none
  3386. */
  3387. static inline void dp_tx_notify_completion(struct dp_soc *soc,
  3388. struct dp_vdev *vdev,
  3389. struct dp_tx_desc_s *tx_desc,
  3390. qdf_nbuf_t netbuf,
  3391. uint8_t status)
  3392. {
  3393. void *osif_dev;
  3394. ol_txrx_completion_fp tx_compl_cbk = NULL;
  3395. uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
  3396. qdf_assert(tx_desc);
  3397. dp_tx_flow_pool_lock(soc, tx_desc);
  3398. if (!vdev ||
  3399. !vdev->osif_vdev) {
  3400. dp_tx_flow_pool_unlock(soc, tx_desc);
  3401. return;
  3402. }
  3403. osif_dev = vdev->osif_vdev;
  3404. tx_compl_cbk = vdev->tx_comp;
  3405. dp_tx_flow_pool_unlock(soc, tx_desc);
  3406. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  3407. flag |= BIT(QDF_TX_RX_STATUS_OK);
  3408. if (tx_compl_cbk)
  3409. tx_compl_cbk(netbuf, osif_dev, flag);
  3410. }
  3411. /** dp_tx_sojourn_stats_process() - Collect sojourn stats
  3412. * @pdev: pdev handle
  3413. * @tid: tid value
  3414. * @txdesc_ts: timestamp from txdesc
  3415. * @ppdu_id: ppdu id
  3416. *
  3417. * Return: none
  3418. */
  3419. #ifdef FEATURE_PERPKT_INFO
  3420. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  3421. struct dp_peer *peer,
  3422. uint8_t tid,
  3423. uint64_t txdesc_ts,
  3424. uint32_t ppdu_id)
  3425. {
  3426. uint64_t delta_ms;
  3427. struct cdp_tx_sojourn_stats *sojourn_stats;
  3428. if (qdf_unlikely(!pdev->enhanced_stats_en))
  3429. return;
  3430. if (qdf_unlikely(tid == HTT_INVALID_TID ||
  3431. tid >= CDP_DATA_TID_MAX))
  3432. return;
  3433. if (qdf_unlikely(!pdev->sojourn_buf))
  3434. return;
  3435. sojourn_stats = (struct cdp_tx_sojourn_stats *)
  3436. qdf_nbuf_data(pdev->sojourn_buf);
  3437. sojourn_stats->cookie = (void *)peer->rdkstats_ctx;
  3438. delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) -
  3439. txdesc_ts;
  3440. qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
  3441. delta_ms);
  3442. sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
  3443. sojourn_stats->num_msdus[tid] = 1;
  3444. sojourn_stats->avg_sojourn_msdu[tid].internal =
  3445. peer->avg_sojourn_msdu[tid].internal;
  3446. dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
  3447. pdev->sojourn_buf, HTT_INVALID_PEER,
  3448. WDI_NO_VAL, pdev->pdev_id);
  3449. sojourn_stats->sum_sojourn_msdu[tid] = 0;
  3450. sojourn_stats->num_msdus[tid] = 0;
  3451. sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
  3452. }
  3453. #else
  3454. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  3455. struct dp_peer *peer,
  3456. uint8_t tid,
  3457. uint64_t txdesc_ts,
  3458. uint32_t ppdu_id)
  3459. {
  3460. }
  3461. #endif
  3462. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  3463. /**
  3464. * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
  3465. * @soc: dp_soc handle
  3466. * @desc: Tx Descriptor
  3467. * @ts: HAL Tx completion descriptor contents
  3468. *
  3469. * This function is used to send tx completion to packet capture
  3470. */
  3471. void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
  3472. struct dp_tx_desc_s *desc,
  3473. struct hal_tx_completion_status *ts)
  3474. {
  3475. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
  3476. desc, ts->peer_id,
  3477. WDI_NO_VAL, desc->pdev->pdev_id);
  3478. }
  3479. #endif
  3480. /**
  3481. * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
  3482. * @soc: DP Soc handle
  3483. * @tx_desc: software Tx descriptor
  3484. * @ts : Tx completion status from HAL/HTT descriptor
  3485. *
  3486. * Return: none
  3487. */
  3488. void
  3489. dp_tx_comp_process_desc(struct dp_soc *soc,
  3490. struct dp_tx_desc_s *desc,
  3491. struct hal_tx_completion_status *ts,
  3492. struct dp_peer *peer)
  3493. {
  3494. uint64_t time_latency = 0;
  3495. /*
  3496. * m_copy/tx_capture modes are not supported for
  3497. * scatter gather packets
  3498. */
  3499. if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
  3500. time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
  3501. desc->timestamp);
  3502. }
  3503. dp_send_completion_to_pkt_capture(soc, desc, ts);
  3504. if (dp_tx_pkt_tracepoints_enabled())
  3505. qdf_trace_dp_packet(desc->nbuf, QDF_TX,
  3506. desc->msdu_ext_desc ?
  3507. desc->msdu_ext_desc->tso_desc : NULL,
  3508. desc->timestamp);
  3509. if (!(desc->msdu_ext_desc)) {
  3510. dp_tx_enh_unmap(soc, desc);
  3511. if (QDF_STATUS_SUCCESS ==
  3512. dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer)) {
  3513. return;
  3514. }
  3515. if (QDF_STATUS_SUCCESS ==
  3516. dp_get_completion_indication_for_stack(soc,
  3517. desc->pdev,
  3518. peer, ts,
  3519. desc->nbuf,
  3520. time_latency)) {
  3521. dp_send_completion_to_stack(soc,
  3522. desc->pdev,
  3523. ts->peer_id,
  3524. ts->ppdu_id,
  3525. desc->nbuf);
  3526. return;
  3527. }
  3528. }
  3529. desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
  3530. dp_tx_comp_free_buf(soc, desc);
  3531. }
  3532. #ifdef DISABLE_DP_STATS
  3533. /**
  3534. * dp_tx_update_connectivity_stats() - update tx connectivity stats
  3535. * @soc: core txrx main context
  3536. * @tx_desc: tx desc
  3537. * @status: tx status
  3538. *
  3539. * Return: none
  3540. */
  3541. static inline
  3542. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  3543. struct dp_vdev *vdev,
  3544. struct dp_tx_desc_s *tx_desc,
  3545. uint8_t status)
  3546. {
  3547. }
  3548. #else
  3549. static inline
  3550. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  3551. struct dp_vdev *vdev,
  3552. struct dp_tx_desc_s *tx_desc,
  3553. uint8_t status)
  3554. {
  3555. void *osif_dev;
  3556. ol_txrx_stats_rx_fp stats_cbk;
  3557. uint8_t pkt_type;
  3558. qdf_assert(tx_desc);
  3559. if (!vdev ||
  3560. !vdev->osif_vdev ||
  3561. !vdev->stats_cb)
  3562. return;
  3563. osif_dev = vdev->osif_vdev;
  3564. stats_cbk = vdev->stats_cb;
  3565. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
  3566. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  3567. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
  3568. &pkt_type);
  3569. }
  3570. #endif
  3571. #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
  3572. void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3573. uint32_t delta_tsf)
  3574. {
  3575. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3576. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  3577. DP_MOD_ID_CDP);
  3578. if (!vdev) {
  3579. dp_err_rl("vdev %d does not exist", vdev_id);
  3580. return;
  3581. }
  3582. vdev->delta_tsf = delta_tsf;
  3583. dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
  3584. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  3585. }
  3586. QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
  3587. uint8_t vdev_id, bool enable)
  3588. {
  3589. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3590. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  3591. DP_MOD_ID_CDP);
  3592. if (!vdev) {
  3593. dp_err_rl("vdev %d does not exist", vdev_id);
  3594. return QDF_STATUS_E_FAILURE;
  3595. }
  3596. qdf_atomic_set(&vdev->ul_delay_report, enable);
  3597. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  3598. return QDF_STATUS_SUCCESS;
  3599. }
  3600. QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3601. uint32_t *val)
  3602. {
  3603. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3604. struct dp_vdev *vdev;
  3605. uint32_t delay_accum;
  3606. uint32_t pkts_accum;
  3607. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
  3608. if (!vdev) {
  3609. dp_err_rl("vdev %d does not exist", vdev_id);
  3610. return QDF_STATUS_E_FAILURE;
  3611. }
  3612. if (!qdf_atomic_read(&vdev->ul_delay_report)) {
  3613. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  3614. return QDF_STATUS_E_FAILURE;
  3615. }
  3616. /* Average uplink delay based on current accumulated values */
  3617. delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
  3618. pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
  3619. *val = delay_accum / pkts_accum;
  3620. dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
  3621. delay_accum, pkts_accum);
  3622. /* Reset accumulated values to 0 */
  3623. qdf_atomic_set(&vdev->ul_delay_accum, 0);
  3624. qdf_atomic_set(&vdev->ul_pkts_accum, 0);
  3625. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  3626. return QDF_STATUS_SUCCESS;
  3627. }
  3628. static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
  3629. struct hal_tx_completion_status *ts)
  3630. {
  3631. uint32_t buffer_ts;
  3632. uint32_t delta_tsf;
  3633. uint32_t ul_delay;
  3634. /* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
  3635. if (!ts->valid)
  3636. return;
  3637. if (qdf_unlikely(!vdev)) {
  3638. dp_info_rl("vdev is null or delete in progrss");
  3639. return;
  3640. }
  3641. if (!qdf_atomic_read(&vdev->ul_delay_report))
  3642. return;
  3643. delta_tsf = vdev->delta_tsf;
  3644. /* buffer_timestamp is in units of 1024 us and is [31:13] of
  3645. * WBM_RELEASE_RING_4. After left shift 10 bits, it's
  3646. * valid up to 29 bits.
  3647. */
  3648. buffer_ts = ts->buffer_timestamp << 10;
  3649. ul_delay = ts->tsf - buffer_ts - delta_tsf;
  3650. ul_delay &= 0x1FFFFFFF; /* mask 29 BITS */
  3651. if (ul_delay > 0x1000000) {
  3652. dp_info_rl("----------------------\n"
  3653. "Tx completion status:\n"
  3654. "----------------------\n"
  3655. "release_src = %d\n"
  3656. "ppdu_id = 0x%x\n"
  3657. "release_reason = %d\n"
  3658. "tsf = %u (0x%x)\n"
  3659. "buffer_timestamp = %u (0x%x)\n"
  3660. "delta_tsf = %u (0x%x)\n",
  3661. ts->release_src, ts->ppdu_id, ts->status,
  3662. ts->tsf, ts->tsf, ts->buffer_timestamp,
  3663. ts->buffer_timestamp, delta_tsf, delta_tsf);
  3664. return;
  3665. }
  3666. ul_delay /= 1000; /* in unit of ms */
  3667. qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
  3668. qdf_atomic_inc(&vdev->ul_pkts_accum);
  3669. }
  3670. #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
  3671. static inline
  3672. void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
  3673. struct hal_tx_completion_status *ts)
  3674. {
  3675. }
  3676. #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
  3677. /**
  3678. * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
  3679. * @soc: DP soc handle
  3680. * @tx_desc: software descriptor head pointer
  3681. * @ts: Tx completion status
  3682. * @peer: peer handle
  3683. * @ring_id: ring number
  3684. *
  3685. * Return: none
  3686. */
  3687. void dp_tx_comp_process_tx_status(struct dp_soc *soc,
  3688. struct dp_tx_desc_s *tx_desc,
  3689. struct hal_tx_completion_status *ts,
  3690. struct dp_peer *peer, uint8_t ring_id)
  3691. {
  3692. uint32_t length;
  3693. qdf_ether_header_t *eh;
  3694. struct dp_vdev *vdev = NULL;
  3695. qdf_nbuf_t nbuf = tx_desc->nbuf;
  3696. enum qdf_dp_tx_rx_status dp_status;
  3697. if (!nbuf) {
  3698. dp_info_rl("invalid tx descriptor. nbuf NULL");
  3699. goto out;
  3700. }
  3701. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  3702. length = qdf_nbuf_len(nbuf);
  3703. dp_status = dp_tx_hw_to_qdf(ts->status);
  3704. DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
  3705. QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
  3706. QDF_TRACE_DEFAULT_PDEV_ID,
  3707. qdf_nbuf_data_addr(nbuf),
  3708. sizeof(qdf_nbuf_data(nbuf)),
  3709. tx_desc->id, ts->status, dp_status));
  3710. dp_tx_comp_debug("-------------------- \n"
  3711. "Tx Completion Stats: \n"
  3712. "-------------------- \n"
  3713. "ack_frame_rssi = %d \n"
  3714. "first_msdu = %d \n"
  3715. "last_msdu = %d \n"
  3716. "msdu_part_of_amsdu = %d \n"
  3717. "rate_stats valid = %d \n"
  3718. "bw = %d \n"
  3719. "pkt_type = %d \n"
  3720. "stbc = %d \n"
  3721. "ldpc = %d \n"
  3722. "sgi = %d \n"
  3723. "mcs = %d \n"
  3724. "ofdma = %d \n"
  3725. "tones_in_ru = %d \n"
  3726. "tsf = %d \n"
  3727. "ppdu_id = %d \n"
  3728. "transmit_cnt = %d \n"
  3729. "tid = %d \n"
  3730. "peer_id = %d\n",
  3731. ts->ack_frame_rssi, ts->first_msdu,
  3732. ts->last_msdu, ts->msdu_part_of_amsdu,
  3733. ts->valid, ts->bw, ts->pkt_type, ts->stbc,
  3734. ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
  3735. ts->tones_in_ru, ts->tsf, ts->ppdu_id,
  3736. ts->transmit_cnt, ts->tid, ts->peer_id);
  3737. /* Update SoC level stats */
  3738. DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
  3739. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  3740. if (!peer) {
  3741. dp_info_rl("peer is null or deletion in progress");
  3742. DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
  3743. goto out;
  3744. }
  3745. vdev = peer->vdev;
  3746. dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
  3747. dp_tx_update_uplink_delay(soc, vdev, ts);
  3748. /* Update per-packet stats for mesh mode */
  3749. if (qdf_unlikely(vdev->mesh_vdev) &&
  3750. !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
  3751. dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
  3752. /* Update peer level stats */
  3753. if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
  3754. if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
  3755. DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
  3756. if ((peer->vdev->tx_encap_type ==
  3757. htt_cmn_pkt_type_ethernet) &&
  3758. QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  3759. DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
  3760. }
  3761. }
  3762. } else {
  3763. DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
  3764. if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
  3765. DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
  3766. if (qdf_unlikely(peer->in_twt)) {
  3767. DP_STATS_INC_PKT(peer,
  3768. tx.tx_success_twt,
  3769. 1, length);
  3770. }
  3771. }
  3772. }
  3773. dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
  3774. dp_tx_update_peer_ext_stats(peer, tx_desc, ts->tid, ring_id);
  3775. #ifdef QCA_SUPPORT_RDK_STATS
  3776. if (soc->rdkstats_enabled)
  3777. dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
  3778. tx_desc->timestamp,
  3779. ts->ppdu_id);
  3780. #endif
  3781. out:
  3782. return;
  3783. }
  3784. #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \
  3785. defined(QCA_ENHANCED_STATS_SUPPORT)
  3786. /*
  3787. * dp_tx_update_peer_basic_stats(): Update peer basic stats
  3788. * @peer: Datapath peer handle
  3789. * @length: Length of the packet
  3790. * @tx_status: Tx status from TQM/FW
  3791. * @update: enhanced flag value present in dp_pdev
  3792. *
  3793. * Return: none
  3794. */
  3795. void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length,
  3796. uint8_t tx_status, bool update)
  3797. {
  3798. if ((!peer->hw_txrx_stats_en) || update) {
  3799. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
  3800. DP_STATS_INCC(peer, tx.tx_failed, 1,
  3801. tx_status != HAL_TX_TQM_RR_FRAME_ACKED);
  3802. }
  3803. }
  3804. #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
  3805. void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length,
  3806. uint8_t tx_status, bool update)
  3807. {
  3808. if (!peer->hw_txrx_stats_en) {
  3809. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
  3810. DP_STATS_INCC(peer, tx.tx_failed, 1,
  3811. tx_status != HAL_TX_TQM_RR_FRAME_ACKED);
  3812. }
  3813. }
  3814. #else
  3815. void dp_tx_update_peer_basic_stats(struct dp_peer *peer, uint32_t length,
  3816. uint8_t tx_status, bool update)
  3817. {
  3818. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
  3819. DP_STATS_INCC(peer, tx.tx_failed, 1,
  3820. tx_status != HAL_TX_TQM_RR_FRAME_ACKED);
  3821. }
  3822. #endif
  3823. /*
  3824. * dp_tx_prefetch_next_nbuf_data(): Prefetch nbuf and nbuf data
  3825. * @nbuf: skb buffer
  3826. *
  3827. * Return: none
  3828. */
  3829. #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
  3830. static inline
  3831. void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
  3832. {
  3833. qdf_nbuf_t nbuf = NULL;
  3834. if (next)
  3835. nbuf = next->nbuf;
  3836. if (nbuf) {
  3837. /* prefetch skb->next and first few bytes of skb->cb */
  3838. qdf_prefetch(nbuf);
  3839. /* prefetch skb fields present in different cachelines */
  3840. qdf_prefetch(&nbuf->len);
  3841. qdf_prefetch(&nbuf->users);
  3842. }
  3843. }
  3844. #else
  3845. static inline
  3846. void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
  3847. {
  3848. }
  3849. #endif
  3850. /**
  3851. * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
  3852. * @soc: core txrx main context
  3853. * @comp_head: software descriptor head pointer
  3854. * @ring_id: ring number
  3855. *
  3856. * This function will process batch of descriptors reaped by dp_tx_comp_handler
  3857. * and release the software descriptors after processing is complete
  3858. *
  3859. * Return: none
  3860. */
  3861. static void
  3862. dp_tx_comp_process_desc_list(struct dp_soc *soc,
  3863. struct dp_tx_desc_s *comp_head, uint8_t ring_id)
  3864. {
  3865. struct dp_tx_desc_s *desc;
  3866. struct dp_tx_desc_s *next;
  3867. struct hal_tx_completion_status ts;
  3868. struct dp_peer *peer = NULL;
  3869. uint16_t peer_id = DP_INVALID_PEER;
  3870. qdf_nbuf_t netbuf;
  3871. desc = comp_head;
  3872. while (desc) {
  3873. next = desc->next;
  3874. dp_tx_prefetch_next_nbuf_data(next);
  3875. if (peer_id != desc->peer_id) {
  3876. if (peer)
  3877. dp_peer_unref_delete(peer,
  3878. DP_MOD_ID_TX_COMP);
  3879. peer_id = desc->peer_id;
  3880. peer = dp_peer_get_ref_by_id(soc, peer_id,
  3881. DP_MOD_ID_TX_COMP);
  3882. }
  3883. if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
  3884. struct dp_pdev *pdev = desc->pdev;
  3885. if (qdf_likely(peer))
  3886. dp_tx_update_peer_basic_stats(peer,
  3887. desc->length,
  3888. desc->tx_status,
  3889. false);
  3890. qdf_assert(pdev);
  3891. dp_tx_outstanding_dec(pdev);
  3892. /*
  3893. * Calling a QDF WRAPPER here is creating signifcant
  3894. * performance impact so avoided the wrapper call here
  3895. */
  3896. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
  3897. desc->id, DP_TX_COMP_UNMAP);
  3898. qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev,
  3899. desc->nbuf,
  3900. desc->dma_addr,
  3901. QDF_DMA_TO_DEVICE,
  3902. desc->length);
  3903. qdf_nbuf_free(desc->nbuf);
  3904. dp_tx_desc_free(soc, desc, desc->pool_id);
  3905. desc = next;
  3906. continue;
  3907. }
  3908. hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
  3909. dp_tx_comp_process_tx_status(soc, desc, &ts, peer, ring_id);
  3910. netbuf = desc->nbuf;
  3911. /* check tx complete notification */
  3912. if (peer && qdf_nbuf_tx_notify_comp_get(netbuf))
  3913. dp_tx_notify_completion(soc, peer->vdev, desc,
  3914. netbuf, ts.status);
  3915. dp_tx_comp_process_desc(soc, desc, &ts, peer);
  3916. dp_tx_desc_release(desc, desc->pool_id);
  3917. desc = next;
  3918. }
  3919. if (peer)
  3920. dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
  3921. }
  3922. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  3923. static inline
  3924. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
  3925. int max_reap_limit)
  3926. {
  3927. bool limit_hit = false;
  3928. limit_hit =
  3929. (num_reaped >= max_reap_limit) ? true : false;
  3930. if (limit_hit)
  3931. DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
  3932. return limit_hit;
  3933. }
  3934. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  3935. {
  3936. return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
  3937. }
  3938. static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
  3939. {
  3940. struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
  3941. return cfg->tx_comp_loop_pkt_limit;
  3942. }
  3943. #else
  3944. static inline
  3945. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
  3946. int max_reap_limit)
  3947. {
  3948. return false;
  3949. }
  3950. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  3951. {
  3952. return false;
  3953. }
  3954. static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
  3955. {
  3956. return 0;
  3957. }
  3958. #endif
  3959. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  3960. static inline int
  3961. dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
  3962. int *max_reap_limit)
  3963. {
  3964. return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
  3965. max_reap_limit);
  3966. }
  3967. #else
  3968. static inline int
  3969. dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
  3970. int *max_reap_limit)
  3971. {
  3972. return 0;
  3973. }
  3974. #endif
  3975. uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  3976. hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
  3977. uint32_t quota)
  3978. {
  3979. void *tx_comp_hal_desc;
  3980. void *last_prefetched_hw_desc = NULL;
  3981. struct dp_tx_desc_s *last_prefetched_sw_desc = NULL;
  3982. hal_soc_handle_t hal_soc;
  3983. uint8_t buffer_src;
  3984. struct dp_tx_desc_s *tx_desc = NULL;
  3985. struct dp_tx_desc_s *head_desc = NULL;
  3986. struct dp_tx_desc_s *tail_desc = NULL;
  3987. uint32_t num_processed = 0;
  3988. uint32_t count;
  3989. uint32_t num_avail_for_reap = 0;
  3990. bool force_break = false;
  3991. struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
  3992. int max_reap_limit, ring_near_full;
  3993. DP_HIST_INIT();
  3994. more_data:
  3995. hal_soc = soc->hal_soc;
  3996. /* Re-initialize local variables to be re-used */
  3997. head_desc = NULL;
  3998. tail_desc = NULL;
  3999. count = 0;
  4000. max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
  4001. ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
  4002. &max_reap_limit);
  4003. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  4004. dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
  4005. return 0;
  4006. }
  4007. num_avail_for_reap = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
  4008. if (num_avail_for_reap >= quota)
  4009. num_avail_for_reap = quota;
  4010. dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
  4011. last_prefetched_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl,
  4012. num_avail_for_reap);
  4013. /* Find head descriptor from completion ring */
  4014. while (qdf_likely(num_avail_for_reap--)) {
  4015. tx_comp_hal_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
  4016. if (qdf_unlikely(!tx_comp_hal_desc))
  4017. break;
  4018. buffer_src = hal_tx_comp_get_buffer_source(hal_soc,
  4019. tx_comp_hal_desc);
  4020. /* If this buffer was not released by TQM or FW, then it is not
  4021. * Tx completion indication, assert */
  4022. if (qdf_unlikely(buffer_src !=
  4023. HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
  4024. (qdf_unlikely(buffer_src !=
  4025. HAL_TX_COMP_RELEASE_SOURCE_FW))) {
  4026. uint8_t wbm_internal_error;
  4027. dp_err_rl(
  4028. "Tx comp release_src != TQM | FW but from %d",
  4029. buffer_src);
  4030. hal_dump_comp_desc(tx_comp_hal_desc);
  4031. DP_STATS_INC(soc, tx.invalid_release_source, 1);
  4032. /* When WBM sees NULL buffer_addr_info in any of
  4033. * ingress rings it sends an error indication,
  4034. * with wbm_internal_error=1, to a specific ring.
  4035. * The WBM2SW ring used to indicate these errors is
  4036. * fixed in HW, and that ring is being used as Tx
  4037. * completion ring. These errors are not related to
  4038. * Tx completions, and should just be ignored
  4039. */
  4040. wbm_internal_error = hal_get_wbm_internal_error(
  4041. hal_soc,
  4042. tx_comp_hal_desc);
  4043. if (wbm_internal_error) {
  4044. dp_err_rl("Tx comp wbm_internal_error!!");
  4045. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
  4046. if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
  4047. buffer_src)
  4048. dp_handle_wbm_internal_error(
  4049. soc,
  4050. tx_comp_hal_desc,
  4051. hal_tx_comp_get_buffer_type(
  4052. tx_comp_hal_desc));
  4053. } else {
  4054. dp_err_rl("Tx comp wbm_internal_error false");
  4055. DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
  4056. }
  4057. continue;
  4058. }
  4059. soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
  4060. tx_comp_hal_desc,
  4061. &tx_desc);
  4062. if (!tx_desc) {
  4063. dp_err("unable to retrieve tx_desc!");
  4064. QDF_BUG(0);
  4065. continue;
  4066. }
  4067. tx_desc->buffer_src = buffer_src;
  4068. /*
  4069. * If the release source is FW, process the HTT status
  4070. */
  4071. if (qdf_unlikely(buffer_src ==
  4072. HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  4073. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  4074. hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
  4075. htt_tx_status);
  4076. soc->arch_ops.dp_tx_process_htt_completion(
  4077. soc,
  4078. tx_desc,
  4079. htt_tx_status,
  4080. ring_id);
  4081. } else {
  4082. tx_desc->tx_status =
  4083. hal_tx_comp_get_tx_status(tx_comp_hal_desc);
  4084. tx_desc->buffer_src = buffer_src;
  4085. /*
  4086. * If the fast completion mode is enabled extended
  4087. * metadata from descriptor is not copied
  4088. */
  4089. if (qdf_likely(tx_desc->flags &
  4090. DP_TX_DESC_FLAG_SIMPLE))
  4091. goto add_to_pool;
  4092. /*
  4093. * If the descriptor is already freed in vdev_detach,
  4094. * continue to next descriptor
  4095. */
  4096. if (qdf_unlikely
  4097. ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
  4098. !tx_desc->flags)) {
  4099. dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
  4100. tx_desc->id);
  4101. DP_STATS_INC(soc, tx.tx_comp_exception, 1);
  4102. continue;
  4103. }
  4104. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  4105. dp_tx_comp_info_rl("pdev in down state %d",
  4106. tx_desc->id);
  4107. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  4108. dp_tx_comp_free_buf(soc, tx_desc);
  4109. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  4110. goto next_desc;
  4111. }
  4112. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  4113. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  4114. dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
  4115. tx_desc->flags, tx_desc->id);
  4116. qdf_assert_always(0);
  4117. }
  4118. /* Collect hw completion contents */
  4119. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  4120. &tx_desc->comp, 1);
  4121. add_to_pool:
  4122. DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
  4123. /* First ring descriptor on the cycle */
  4124. if (!head_desc) {
  4125. head_desc = tx_desc;
  4126. tail_desc = tx_desc;
  4127. }
  4128. tail_desc->next = tx_desc;
  4129. tx_desc->next = NULL;
  4130. tail_desc = tx_desc;
  4131. }
  4132. next_desc:
  4133. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  4134. /*
  4135. * Processed packet count is more than given quota
  4136. * stop to processing
  4137. */
  4138. count++;
  4139. dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
  4140. num_avail_for_reap,
  4141. hal_ring_hdl,
  4142. &last_prefetched_hw_desc,
  4143. &last_prefetched_sw_desc);
  4144. if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
  4145. break;
  4146. }
  4147. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  4148. /* Process the reaped descriptors */
  4149. if (head_desc)
  4150. dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
  4151. /*
  4152. * If we are processing in near-full condition, there are 3 scenario
  4153. * 1) Ring entries has reached critical state
  4154. * 2) Ring entries are still near high threshold
  4155. * 3) Ring entries are below the safe level
  4156. *
  4157. * One more loop will move te state to normal processing and yield
  4158. */
  4159. if (ring_near_full)
  4160. goto more_data;
  4161. if (dp_tx_comp_enable_eol_data_check(soc)) {
  4162. if (num_processed >= quota)
  4163. force_break = true;
  4164. if (!force_break &&
  4165. hal_srng_dst_peek_sync_locked(soc->hal_soc,
  4166. hal_ring_hdl)) {
  4167. DP_STATS_INC(soc, tx.hp_oos2, 1);
  4168. if (!hif_exec_should_yield(soc->hif_handle,
  4169. int_ctx->dp_intr_id))
  4170. goto more_data;
  4171. }
  4172. }
  4173. DP_TX_HIST_STATS_PER_PDEV();
  4174. return num_processed;
  4175. }
  4176. #ifdef FEATURE_WLAN_TDLS
  4177. qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  4178. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
  4179. {
  4180. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4181. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  4182. DP_MOD_ID_TDLS);
  4183. if (!vdev) {
  4184. dp_err("vdev handle for id %d is NULL", vdev_id);
  4185. return NULL;
  4186. }
  4187. if (tx_spec & OL_TX_SPEC_NO_FREE)
  4188. vdev->is_tdls_frame = true;
  4189. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  4190. return dp_tx_send(soc_hdl, vdev_id, msdu_list);
  4191. }
  4192. #endif
  4193. /**
  4194. * dp_tx_vdev_attach() - attach vdev to dp tx
  4195. * @vdev: virtual device instance
  4196. *
  4197. * Return: QDF_STATUS_SUCCESS: success
  4198. * QDF_STATUS_E_RESOURCES: Error return
  4199. */
  4200. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
  4201. {
  4202. int pdev_id;
  4203. /*
  4204. * Fill HTT TCL Metadata with Vdev ID and MAC ID
  4205. */
  4206. DP_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
  4207. DP_TCL_METADATA_TYPE_VDEV_BASED);
  4208. DP_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
  4209. vdev->vdev_id);
  4210. pdev_id =
  4211. dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
  4212. vdev->pdev->pdev_id);
  4213. DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
  4214. /*
  4215. * Set HTT Extension Valid bit to 0 by default
  4216. */
  4217. DP_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
  4218. dp_tx_vdev_update_search_flags(vdev);
  4219. return QDF_STATUS_SUCCESS;
  4220. }
  4221. #ifndef FEATURE_WDS
  4222. static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
  4223. {
  4224. return false;
  4225. }
  4226. #endif
  4227. /**
  4228. * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
  4229. * @vdev: virtual device instance
  4230. *
  4231. * Return: void
  4232. *
  4233. */
  4234. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
  4235. {
  4236. struct dp_soc *soc = vdev->pdev->soc;
  4237. /*
  4238. * Enable both AddrY (SA based search) and AddrX (Da based search)
  4239. * for TDLS link
  4240. *
  4241. * Enable AddrY (SA based search) only for non-WDS STA and
  4242. * ProxySTA VAP (in HKv1) modes.
  4243. *
  4244. * In all other VAP modes, only DA based search should be
  4245. * enabled
  4246. */
  4247. if (vdev->opmode == wlan_op_mode_sta &&
  4248. vdev->tdls_link_connected)
  4249. vdev->hal_desc_addr_search_flags =
  4250. (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
  4251. else if ((vdev->opmode == wlan_op_mode_sta) &&
  4252. !dp_tx_da_search_override(vdev))
  4253. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
  4254. else
  4255. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
  4256. if (vdev->opmode == wlan_op_mode_sta)
  4257. vdev->search_type = soc->sta_mode_search_policy;
  4258. else
  4259. vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
  4260. }
  4261. static inline bool
  4262. dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
  4263. struct dp_vdev *vdev,
  4264. struct dp_tx_desc_s *tx_desc)
  4265. {
  4266. if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
  4267. return false;
  4268. /*
  4269. * if vdev is given, then only check whether desc
  4270. * vdev match. if vdev is NULL, then check whether
  4271. * desc pdev match.
  4272. */
  4273. return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
  4274. (tx_desc->pdev == pdev);
  4275. }
  4276. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  4277. /**
  4278. * dp_tx_desc_flush() - release resources associated
  4279. * to TX Desc
  4280. *
  4281. * @dp_pdev: Handle to DP pdev structure
  4282. * @vdev: virtual device instance
  4283. * NULL: no specific Vdev is required and check all allcated TX desc
  4284. * on this pdev.
  4285. * Non-NULL: only check the allocated TX Desc associated to this Vdev.
  4286. *
  4287. * @force_free:
  4288. * true: flush the TX desc.
  4289. * false: only reset the Vdev in each allocated TX desc
  4290. * that associated to current Vdev.
  4291. *
  4292. * This function will go through the TX desc pool to flush
  4293. * the outstanding TX data or reset Vdev to NULL in associated TX
  4294. * Desc.
  4295. */
  4296. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  4297. bool force_free)
  4298. {
  4299. uint8_t i;
  4300. uint32_t j;
  4301. uint32_t num_desc, page_id, offset;
  4302. uint16_t num_desc_per_page;
  4303. struct dp_soc *soc = pdev->soc;
  4304. struct dp_tx_desc_s *tx_desc = NULL;
  4305. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  4306. if (!vdev && !force_free) {
  4307. dp_err("Reset TX desc vdev, Vdev param is required!");
  4308. return;
  4309. }
  4310. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  4311. tx_desc_pool = &soc->tx_desc[i];
  4312. if (!(tx_desc_pool->pool_size) ||
  4313. IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
  4314. !(tx_desc_pool->desc_pages.cacheable_pages))
  4315. continue;
  4316. /*
  4317. * Add flow pool lock protection in case pool is freed
  4318. * due to all tx_desc is recycled when handle TX completion.
  4319. * this is not necessary when do force flush as:
  4320. * a. double lock will happen if dp_tx_desc_release is
  4321. * also trying to acquire it.
  4322. * b. dp interrupt has been disabled before do force TX desc
  4323. * flush in dp_pdev_deinit().
  4324. */
  4325. if (!force_free)
  4326. qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
  4327. num_desc = tx_desc_pool->pool_size;
  4328. num_desc_per_page =
  4329. tx_desc_pool->desc_pages.num_element_per_page;
  4330. for (j = 0; j < num_desc; j++) {
  4331. page_id = j / num_desc_per_page;
  4332. offset = j % num_desc_per_page;
  4333. if (qdf_unlikely(!(tx_desc_pool->
  4334. desc_pages.cacheable_pages)))
  4335. break;
  4336. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  4337. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  4338. /*
  4339. * Free TX desc if force free is
  4340. * required, otherwise only reset vdev
  4341. * in this TX desc.
  4342. */
  4343. if (force_free) {
  4344. tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
  4345. dp_tx_comp_free_buf(soc, tx_desc);
  4346. dp_tx_desc_release(tx_desc, i);
  4347. } else {
  4348. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  4349. }
  4350. }
  4351. }
  4352. if (!force_free)
  4353. qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
  4354. }
  4355. }
  4356. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  4357. /**
  4358. * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
  4359. *
  4360. * @soc: Handle to DP soc structure
  4361. * @tx_desc: pointer of one TX desc
  4362. * @desc_pool_id: TX Desc pool id
  4363. */
  4364. static inline void
  4365. dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  4366. uint8_t desc_pool_id)
  4367. {
  4368. TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
  4369. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  4370. TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
  4371. }
  4372. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  4373. bool force_free)
  4374. {
  4375. uint8_t i, num_pool;
  4376. uint32_t j;
  4377. uint32_t num_desc, page_id, offset;
  4378. uint16_t num_desc_per_page;
  4379. struct dp_soc *soc = pdev->soc;
  4380. struct dp_tx_desc_s *tx_desc = NULL;
  4381. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  4382. if (!vdev && !force_free) {
  4383. dp_err("Reset TX desc vdev, Vdev param is required!");
  4384. return;
  4385. }
  4386. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4387. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4388. for (i = 0; i < num_pool; i++) {
  4389. tx_desc_pool = &soc->tx_desc[i];
  4390. if (!tx_desc_pool->desc_pages.cacheable_pages)
  4391. continue;
  4392. num_desc_per_page =
  4393. tx_desc_pool->desc_pages.num_element_per_page;
  4394. for (j = 0; j < num_desc; j++) {
  4395. page_id = j / num_desc_per_page;
  4396. offset = j % num_desc_per_page;
  4397. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  4398. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  4399. if (force_free) {
  4400. tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
  4401. dp_tx_comp_free_buf(soc, tx_desc);
  4402. dp_tx_desc_release(tx_desc, i);
  4403. } else {
  4404. dp_tx_desc_reset_vdev(soc, tx_desc,
  4405. i);
  4406. }
  4407. }
  4408. }
  4409. }
  4410. }
  4411. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  4412. /**
  4413. * dp_tx_vdev_detach() - detach vdev from dp tx
  4414. * @vdev: virtual device instance
  4415. *
  4416. * Return: QDF_STATUS_SUCCESS: success
  4417. * QDF_STATUS_E_RESOURCES: Error return
  4418. */
  4419. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
  4420. {
  4421. struct dp_pdev *pdev = vdev->pdev;
  4422. /* Reset TX desc associated to this Vdev as NULL */
  4423. dp_tx_desc_flush(pdev, vdev, false);
  4424. return QDF_STATUS_SUCCESS;
  4425. }
  4426. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  4427. /* Pools will be allocated dynamically */
  4428. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  4429. int num_desc)
  4430. {
  4431. uint8_t i;
  4432. for (i = 0; i < num_pool; i++) {
  4433. qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
  4434. soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
  4435. }
  4436. return QDF_STATUS_SUCCESS;
  4437. }
  4438. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  4439. int num_desc)
  4440. {
  4441. return QDF_STATUS_SUCCESS;
  4442. }
  4443. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  4444. {
  4445. }
  4446. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  4447. {
  4448. uint8_t i;
  4449. for (i = 0; i < num_pool; i++)
  4450. qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
  4451. }
  4452. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  4453. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  4454. int num_desc)
  4455. {
  4456. uint8_t i, count;
  4457. /* Allocate software Tx descriptor pools */
  4458. for (i = 0; i < num_pool; i++) {
  4459. if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
  4460. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4461. FL("Tx Desc Pool alloc %d failed %pK"),
  4462. i, soc);
  4463. goto fail;
  4464. }
  4465. }
  4466. return QDF_STATUS_SUCCESS;
  4467. fail:
  4468. for (count = 0; count < i; count++)
  4469. dp_tx_desc_pool_free(soc, count);
  4470. return QDF_STATUS_E_NOMEM;
  4471. }
  4472. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  4473. int num_desc)
  4474. {
  4475. uint8_t i;
  4476. for (i = 0; i < num_pool; i++) {
  4477. if (dp_tx_desc_pool_init(soc, i, num_desc)) {
  4478. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4479. FL("Tx Desc Pool init %d failed %pK"),
  4480. i, soc);
  4481. return QDF_STATUS_E_NOMEM;
  4482. }
  4483. }
  4484. return QDF_STATUS_SUCCESS;
  4485. }
  4486. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  4487. {
  4488. uint8_t i;
  4489. for (i = 0; i < num_pool; i++)
  4490. dp_tx_desc_pool_deinit(soc, i);
  4491. }
  4492. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  4493. {
  4494. uint8_t i;
  4495. for (i = 0; i < num_pool; i++)
  4496. dp_tx_desc_pool_free(soc, i);
  4497. }
  4498. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  4499. /**
  4500. * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
  4501. * @soc: core txrx main context
  4502. * @num_pool: number of pools
  4503. *
  4504. */
  4505. void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  4506. {
  4507. dp_tx_tso_desc_pool_deinit(soc, num_pool);
  4508. dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
  4509. }
  4510. /**
  4511. * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
  4512. * @soc: core txrx main context
  4513. * @num_pool: number of pools
  4514. *
  4515. */
  4516. void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  4517. {
  4518. dp_tx_tso_desc_pool_free(soc, num_pool);
  4519. dp_tx_tso_num_seg_pool_free(soc, num_pool);
  4520. }
  4521. /**
  4522. * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
  4523. * @soc: core txrx main context
  4524. *
  4525. * This function frees all tx related descriptors as below
  4526. * 1. Regular TX descriptors (static pools)
  4527. * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
  4528. * 3. TSO descriptors
  4529. *
  4530. */
  4531. void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
  4532. {
  4533. uint8_t num_pool;
  4534. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4535. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  4536. dp_tx_ext_desc_pool_free(soc, num_pool);
  4537. dp_tx_delete_static_pools(soc, num_pool);
  4538. }
  4539. /**
  4540. * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
  4541. * @soc: core txrx main context
  4542. *
  4543. * This function de-initializes all tx related descriptors as below
  4544. * 1. Regular TX descriptors (static pools)
  4545. * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
  4546. * 3. TSO descriptors
  4547. *
  4548. */
  4549. void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
  4550. {
  4551. uint8_t num_pool;
  4552. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4553. dp_tx_flow_control_deinit(soc);
  4554. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  4555. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  4556. dp_tx_deinit_static_pools(soc, num_pool);
  4557. }
  4558. /**
  4559. * dp_tso_attach() - TSO attach handler
  4560. * @txrx_soc: Opaque Dp handle
  4561. *
  4562. * Reserve TSO descriptor buffers
  4563. *
  4564. * Return: QDF_STATUS_E_FAILURE on failure or
  4565. * QDF_STATUS_SUCCESS on success
  4566. */
  4567. QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
  4568. uint8_t num_pool,
  4569. uint16_t num_desc)
  4570. {
  4571. if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
  4572. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  4573. return QDF_STATUS_E_FAILURE;
  4574. }
  4575. if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
  4576. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  4577. num_pool, soc);
  4578. return QDF_STATUS_E_FAILURE;
  4579. }
  4580. return QDF_STATUS_SUCCESS;
  4581. }
  4582. /**
  4583. * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
  4584. * @soc: DP soc handle
  4585. * @num_pool: Number of pools
  4586. * @num_desc: Number of descriptors
  4587. *
  4588. * Initialize TSO descriptor pools
  4589. *
  4590. * Return: QDF_STATUS_E_FAILURE on failure or
  4591. * QDF_STATUS_SUCCESS on success
  4592. */
  4593. QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
  4594. uint8_t num_pool,
  4595. uint16_t num_desc)
  4596. {
  4597. if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
  4598. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  4599. return QDF_STATUS_E_FAILURE;
  4600. }
  4601. if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
  4602. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  4603. num_pool, soc);
  4604. return QDF_STATUS_E_FAILURE;
  4605. }
  4606. return QDF_STATUS_SUCCESS;
  4607. }
  4608. /**
  4609. * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
  4610. * @soc: core txrx main context
  4611. *
  4612. * This function allocates memory for following descriptor pools
  4613. * 1. regular sw tx descriptor pools (static pools)
  4614. * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
  4615. * 3. TSO descriptor pools
  4616. *
  4617. * Return: QDF_STATUS_SUCCESS: success
  4618. * QDF_STATUS_E_RESOURCES: Error return
  4619. */
  4620. QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
  4621. {
  4622. uint8_t num_pool;
  4623. uint32_t num_desc;
  4624. uint32_t num_ext_desc;
  4625. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4626. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4627. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  4628. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  4629. "%s Tx Desc Alloc num_pool = %d, descs = %d",
  4630. __func__, num_pool, num_desc);
  4631. if ((num_pool > MAX_TXDESC_POOLS) ||
  4632. (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
  4633. goto fail1;
  4634. if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
  4635. goto fail1;
  4636. if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
  4637. goto fail2;
  4638. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  4639. return QDF_STATUS_SUCCESS;
  4640. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  4641. goto fail3;
  4642. return QDF_STATUS_SUCCESS;
  4643. fail3:
  4644. dp_tx_ext_desc_pool_free(soc, num_pool);
  4645. fail2:
  4646. dp_tx_delete_static_pools(soc, num_pool);
  4647. fail1:
  4648. return QDF_STATUS_E_RESOURCES;
  4649. }
  4650. /**
  4651. * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
  4652. * @soc: core txrx main context
  4653. *
  4654. * This function initializes the following TX descriptor pools
  4655. * 1. regular sw tx descriptor pools (static pools)
  4656. * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
  4657. * 3. TSO descriptor pools
  4658. *
  4659. * Return: QDF_STATUS_SUCCESS: success
  4660. * QDF_STATUS_E_RESOURCES: Error return
  4661. */
  4662. QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
  4663. {
  4664. uint8_t num_pool;
  4665. uint32_t num_desc;
  4666. uint32_t num_ext_desc;
  4667. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4668. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4669. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  4670. if (dp_tx_init_static_pools(soc, num_pool, num_desc))
  4671. goto fail1;
  4672. if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
  4673. goto fail2;
  4674. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  4675. return QDF_STATUS_SUCCESS;
  4676. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  4677. goto fail3;
  4678. dp_tx_flow_control_init(soc);
  4679. soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
  4680. return QDF_STATUS_SUCCESS;
  4681. fail3:
  4682. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  4683. fail2:
  4684. dp_tx_deinit_static_pools(soc, num_pool);
  4685. fail1:
  4686. return QDF_STATUS_E_RESOURCES;
  4687. }
  4688. /**
  4689. * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
  4690. * @txrx_soc: dp soc handle
  4691. *
  4692. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  4693. * QDF_STATUS_E_FAILURE
  4694. */
  4695. QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
  4696. {
  4697. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4698. uint8_t num_pool;
  4699. uint32_t num_desc;
  4700. uint32_t num_ext_desc;
  4701. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4702. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4703. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  4704. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  4705. return QDF_STATUS_E_FAILURE;
  4706. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  4707. return QDF_STATUS_E_FAILURE;
  4708. return QDF_STATUS_SUCCESS;
  4709. }
  4710. /**
  4711. * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
  4712. * @txrx_soc: dp soc handle
  4713. *
  4714. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  4715. */
  4716. QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
  4717. {
  4718. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4719. uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4720. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  4721. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  4722. return QDF_STATUS_SUCCESS;
  4723. }