dp_tx.c 181 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "htt.h"
  20. #include "dp_htt.h"
  21. #include "hal_hw_headers.h"
  22. #include "dp_tx.h"
  23. #include "dp_tx_desc.h"
  24. #include "dp_peer.h"
  25. #include "dp_types.h"
  26. #include "hal_tx.h"
  27. #include "qdf_mem.h"
  28. #include "qdf_nbuf.h"
  29. #include "qdf_net_types.h"
  30. #include "qdf_module.h"
  31. #include <wlan_cfg.h>
  32. #include "dp_ipa.h"
  33. #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
  34. #include "if_meta_hdr.h"
  35. #endif
  36. #include "enet.h"
  37. #include "dp_internal.h"
  38. #ifdef ATH_SUPPORT_IQUE
  39. #include "dp_txrx_me.h"
  40. #endif
  41. #include "dp_hist.h"
  42. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  43. #include <wlan_dp_swlm.h>
  44. #endif
  45. #ifdef WIFI_MONITOR_SUPPORT
  46. #include <dp_mon.h>
  47. #endif
  48. #ifdef FEATURE_WDS
  49. #include "dp_txrx_wds.h"
  50. #endif
  51. #include "cdp_txrx_cmn_reg.h"
  52. #ifdef CONFIG_SAWF
  53. #include <dp_sawf.h>
  54. #endif
  55. /* Flag to skip CCE classify when mesh or tid override enabled */
  56. #define DP_TX_SKIP_CCE_CLASSIFY \
  57. (DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
  58. /* TODO Add support in TSO */
  59. #define DP_DESC_NUM_FRAG(x) 0
  60. /* disable TQM_BYPASS */
  61. #define TQM_BYPASS_WAR 0
  62. #define DP_RETRY_COUNT 7
  63. #ifdef WLAN_PEER_JITTER
  64. #define DP_AVG_JITTER_WEIGHT_DENOM 4
  65. #define DP_AVG_DELAY_WEIGHT_DENOM 3
  66. #endif
  67. #ifdef QCA_DP_TX_FW_METADATA_V2
  68. #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
  69. HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
  70. #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
  71. HTT_TX_TCL_METADATA_V2_VALID_HTT_SET(_var, _val)
  72. #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
  73. HTT_TX_TCL_METADATA_TYPE_V2_SET(_var, _val)
  74. #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
  75. HTT_TX_TCL_METADATA_V2_HOST_INSPECTED_SET(_var, _val)
  76. #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
  77. HTT_TX_TCL_METADATA_V2_PEER_ID_SET(_var, _val)
  78. #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
  79. HTT_TX_TCL_METADATA_V2_VDEV_ID_SET(_var, _val)
  80. #define DP_TCL_METADATA_TYPE_PEER_BASED \
  81. HTT_TCL_METADATA_V2_TYPE_PEER_BASED
  82. #define DP_TCL_METADATA_TYPE_VDEV_BASED \
  83. HTT_TCL_METADATA_V2_TYPE_VDEV_BASED
  84. #else
  85. #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
  86. HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
  87. #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
  88. HTT_TX_TCL_METADATA_VALID_HTT_SET(_var, _val)
  89. #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
  90. HTT_TX_TCL_METADATA_TYPE_SET(_var, _val)
  91. #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
  92. HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val)
  93. #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
  94. HTT_TX_TCL_METADATA_PEER_ID_SET(_var, _val)
  95. #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
  96. HTT_TX_TCL_METADATA_VDEV_ID_SET(_var, _val)
  97. #define DP_TCL_METADATA_TYPE_PEER_BASED \
  98. HTT_TCL_METADATA_TYPE_PEER_BASED
  99. #define DP_TCL_METADATA_TYPE_VDEV_BASED \
  100. HTT_TCL_METADATA_TYPE_VDEV_BASED
  101. #endif
  102. #define DP_GET_HW_LINK_ID_FRM_PPDU_ID(PPDU_ID, LINK_ID_OFFSET, LINK_ID_BITS) \
  103. (((PPDU_ID) >> (LINK_ID_OFFSET)) & ((1 << (LINK_ID_BITS)) - 1))
  104. /*mapping between hal encrypt type and cdp_sec_type*/
  105. uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
  106. HAL_TX_ENCRYPT_TYPE_WEP_128,
  107. HAL_TX_ENCRYPT_TYPE_WEP_104,
  108. HAL_TX_ENCRYPT_TYPE_WEP_40,
  109. HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
  110. HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
  111. HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
  112. HAL_TX_ENCRYPT_TYPE_WAPI,
  113. HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
  114. HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
  115. HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
  116. HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
  117. qdf_export_symbol(sec_type_map);
  118. #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
  119. static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
  120. {
  121. enum dp_tx_event_type type;
  122. if (flags & DP_TX_DESC_FLAG_FLUSH)
  123. type = DP_TX_DESC_FLUSH;
  124. else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
  125. type = DP_TX_COMP_UNMAP_ERR;
  126. else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
  127. type = DP_TX_COMP_UNMAP;
  128. else
  129. type = DP_TX_DESC_UNMAP;
  130. return type;
  131. }
  132. static inline void
  133. dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
  134. qdf_nbuf_t skb, uint32_t sw_cookie,
  135. enum dp_tx_event_type type)
  136. {
  137. struct dp_tx_tcl_history *tx_tcl_history = &soc->tx_tcl_history;
  138. struct dp_tx_comp_history *tx_comp_history = &soc->tx_comp_history;
  139. struct dp_tx_desc_event *entry;
  140. uint32_t idx;
  141. uint16_t slot;
  142. switch (type) {
  143. case DP_TX_COMP_UNMAP:
  144. case DP_TX_COMP_UNMAP_ERR:
  145. case DP_TX_COMP_MSDU_EXT:
  146. if (qdf_unlikely(!tx_comp_history->allocated))
  147. return;
  148. dp_get_frag_hist_next_atomic_idx(&tx_comp_history->index, &idx,
  149. &slot,
  150. DP_TX_COMP_HIST_SLOT_SHIFT,
  151. DP_TX_COMP_HIST_PER_SLOT_MAX,
  152. DP_TX_COMP_HISTORY_SIZE);
  153. entry = &tx_comp_history->entry[slot][idx];
  154. break;
  155. case DP_TX_DESC_MAP:
  156. case DP_TX_DESC_UNMAP:
  157. case DP_TX_DESC_COOKIE:
  158. case DP_TX_DESC_FLUSH:
  159. if (qdf_unlikely(!tx_tcl_history->allocated))
  160. return;
  161. dp_get_frag_hist_next_atomic_idx(&tx_tcl_history->index, &idx,
  162. &slot,
  163. DP_TX_TCL_HIST_SLOT_SHIFT,
  164. DP_TX_TCL_HIST_PER_SLOT_MAX,
  165. DP_TX_TCL_HISTORY_SIZE);
  166. entry = &tx_tcl_history->entry[slot][idx];
  167. break;
  168. default:
  169. dp_info_rl("Invalid dp_tx_event_type: %d", type);
  170. return;
  171. }
  172. entry->skb = skb;
  173. entry->paddr = paddr;
  174. entry->sw_cookie = sw_cookie;
  175. entry->type = type;
  176. entry->ts = qdf_get_log_timestamp();
  177. }
  178. static inline void
  179. dp_tx_tso_seg_history_add(struct dp_soc *soc,
  180. struct qdf_tso_seg_elem_t *tso_seg,
  181. qdf_nbuf_t skb, uint32_t sw_cookie,
  182. enum dp_tx_event_type type)
  183. {
  184. int i;
  185. for (i = 1; i < tso_seg->seg.num_frags; i++) {
  186. dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
  187. skb, sw_cookie, type);
  188. }
  189. if (!tso_seg->next)
  190. dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
  191. skb, 0xFFFFFFFF, type);
  192. }
  193. static inline void
  194. dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
  195. qdf_nbuf_t skb, uint32_t sw_cookie,
  196. enum dp_tx_event_type type)
  197. {
  198. struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
  199. uint32_t num_segs = tso_info.num_segs;
  200. while (num_segs) {
  201. dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
  202. curr_seg = curr_seg->next;
  203. num_segs--;
  204. }
  205. }
  206. #else
  207. static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
  208. {
  209. return DP_TX_DESC_INVAL_EVT;
  210. }
  211. static inline void
  212. dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
  213. qdf_nbuf_t skb, uint32_t sw_cookie,
  214. enum dp_tx_event_type type)
  215. {
  216. }
  217. static inline void
  218. dp_tx_tso_seg_history_add(struct dp_soc *soc,
  219. struct qdf_tso_seg_elem_t *tso_seg,
  220. qdf_nbuf_t skb, uint32_t sw_cookie,
  221. enum dp_tx_event_type type)
  222. {
  223. }
  224. static inline void
  225. dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
  226. qdf_nbuf_t skb, uint32_t sw_cookie,
  227. enum dp_tx_event_type type)
  228. {
  229. }
  230. #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
  231. /**
  232. * dp_is_tput_high() - Check if throughput is high
  233. *
  234. * @soc: core txrx main context
  235. *
  236. * The current function is based of the RTPM tput policy variable where RTPM is
  237. * avoided based on throughput.
  238. */
  239. static inline int dp_is_tput_high(struct dp_soc *soc)
  240. {
  241. return dp_get_rtpm_tput_policy_requirement(soc);
  242. }
  243. #if defined(FEATURE_TSO)
  244. /**
  245. * dp_tx_tso_unmap_segment() - Unmap TSO segment
  246. *
  247. * @soc: core txrx main context
  248. * @seg_desc: tso segment descriptor
  249. * @num_seg_desc: tso number segment descriptor
  250. */
  251. static void dp_tx_tso_unmap_segment(
  252. struct dp_soc *soc,
  253. struct qdf_tso_seg_elem_t *seg_desc,
  254. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  255. {
  256. TSO_DEBUG("%s: Unmap the tso segment", __func__);
  257. if (qdf_unlikely(!seg_desc)) {
  258. DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
  259. __func__, __LINE__);
  260. qdf_assert(0);
  261. } else if (qdf_unlikely(!num_seg_desc)) {
  262. DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
  263. __func__, __LINE__);
  264. qdf_assert(0);
  265. } else {
  266. bool is_last_seg;
  267. /* no tso segment left to do dma unmap */
  268. if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
  269. return;
  270. is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
  271. true : false;
  272. qdf_nbuf_unmap_tso_segment(soc->osdev,
  273. seg_desc, is_last_seg);
  274. num_seg_desc->num_seg.tso_cmn_num_seg--;
  275. }
  276. }
  277. /**
  278. * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
  279. * back to the freelist
  280. *
  281. * @soc: soc device handle
  282. * @tx_desc: Tx software descriptor
  283. */
  284. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  285. struct dp_tx_desc_s *tx_desc)
  286. {
  287. TSO_DEBUG("%s: Free the tso descriptor", __func__);
  288. if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) {
  289. dp_tx_err("SO desc is NULL!");
  290. qdf_assert(0);
  291. } else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) {
  292. dp_tx_err("TSO num desc is NULL!");
  293. qdf_assert(0);
  294. } else {
  295. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  296. (struct qdf_tso_num_seg_elem_t *)tx_desc->
  297. msdu_ext_desc->tso_num_desc;
  298. /* Add the tso num segment into the free list */
  299. if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
  300. dp_tso_num_seg_free(soc, tx_desc->pool_id,
  301. tx_desc->msdu_ext_desc->
  302. tso_num_desc);
  303. tx_desc->msdu_ext_desc->tso_num_desc = NULL;
  304. DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
  305. }
  306. /* Add the tso segment into the free list*/
  307. dp_tx_tso_desc_free(soc,
  308. tx_desc->pool_id, tx_desc->msdu_ext_desc->
  309. tso_desc);
  310. tx_desc->msdu_ext_desc->tso_desc = NULL;
  311. }
  312. }
  313. #else
  314. static void dp_tx_tso_unmap_segment(
  315. struct dp_soc *soc,
  316. struct qdf_tso_seg_elem_t *seg_desc,
  317. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  318. {
  319. }
  320. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  321. struct dp_tx_desc_s *tx_desc)
  322. {
  323. }
  324. #endif
  325. void
  326. dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
  327. {
  328. struct dp_pdev *pdev = tx_desc->pdev;
  329. struct dp_soc *soc;
  330. uint8_t comp_status = 0;
  331. qdf_assert(pdev);
  332. soc = pdev->soc;
  333. dp_tx_outstanding_dec(pdev);
  334. if (tx_desc->msdu_ext_desc) {
  335. if (tx_desc->frm_type == dp_tx_frm_tso)
  336. dp_tx_tso_desc_release(soc, tx_desc);
  337. if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
  338. dp_tx_me_free_buf(tx_desc->pdev,
  339. tx_desc->msdu_ext_desc->me_buffer);
  340. dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
  341. }
  342. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  343. qdf_atomic_dec(&soc->num_tx_exception);
  344. if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
  345. tx_desc->buffer_src)
  346. comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
  347. soc->hal_soc);
  348. else
  349. comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
  350. dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
  351. tx_desc->id, comp_status,
  352. qdf_atomic_read(&pdev->num_tx_outstanding));
  353. dp_tx_desc_free(soc, tx_desc, desc_pool_id);
  354. return;
  355. }
  356. /**
  357. * dp_tx_prepare_htt_metadata() - Prepare HTT metadata for special frames
  358. * @vdev: DP vdev Handle
  359. * @nbuf: skb
  360. * @msdu_info: msdu_info required to create HTT metadata
  361. *
  362. * Prepares and fills HTT metadata in the frame pre-header for special frames
  363. * that should be transmitted using varying transmit parameters.
  364. * There are 2 VDEV modes that currently needs this special metadata -
  365. * 1) Mesh Mode
  366. * 2) DSRC Mode
  367. *
  368. * Return: HTT metadata size
  369. *
  370. */
  371. static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  372. struct dp_tx_msdu_info_s *msdu_info)
  373. {
  374. uint32_t *meta_data = msdu_info->meta_data;
  375. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  376. (struct htt_tx_msdu_desc_ext2_t *) meta_data;
  377. uint8_t htt_desc_size;
  378. /* Size rounded of multiple of 8 bytes */
  379. uint8_t htt_desc_size_aligned;
  380. uint8_t *hdr = NULL;
  381. /*
  382. * Metadata - HTT MSDU Extension header
  383. */
  384. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  385. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  386. if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
  387. HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
  388. meta_data[0]) ||
  389. msdu_info->exception_fw) {
  390. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
  391. htt_desc_size_aligned)) {
  392. nbuf = qdf_nbuf_realloc_headroom(nbuf,
  393. htt_desc_size_aligned);
  394. if (!nbuf) {
  395. /*
  396. * qdf_nbuf_realloc_headroom won't do skb_clone
  397. * as skb_realloc_headroom does. so, no free is
  398. * needed here.
  399. */
  400. DP_STATS_INC(vdev,
  401. tx_i.dropped.headroom_insufficient,
  402. 1);
  403. qdf_print(" %s[%d] skb_realloc_headroom failed",
  404. __func__, __LINE__);
  405. return 0;
  406. }
  407. }
  408. /* Fill and add HTT metaheader */
  409. hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
  410. if (!hdr) {
  411. dp_tx_err("Error in filling HTT metadata");
  412. return 0;
  413. }
  414. qdf_mem_copy(hdr, desc_ext, htt_desc_size);
  415. } else if (vdev->opmode == wlan_op_mode_ocb) {
  416. /* Todo - Add support for DSRC */
  417. }
  418. return htt_desc_size_aligned;
  419. }
  420. /**
  421. * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
  422. * @tso_seg: TSO segment to process
  423. * @ext_desc: Pointer to MSDU extension descriptor
  424. *
  425. * Return: void
  426. */
  427. #if defined(FEATURE_TSO)
  428. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  429. void *ext_desc)
  430. {
  431. uint8_t num_frag;
  432. uint32_t tso_flags;
  433. /*
  434. * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
  435. * tcp_flag_mask
  436. *
  437. * Checksum enable flags are set in TCL descriptor and not in Extension
  438. * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
  439. */
  440. tso_flags = *(uint32_t *) &tso_seg->tso_flags;
  441. hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
  442. hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
  443. tso_seg->tso_flags.ip_len);
  444. hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
  445. hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
  446. for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
  447. uint32_t lo = 0;
  448. uint32_t hi = 0;
  449. qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
  450. (tso_seg->tso_frags[num_frag].length));
  451. qdf_dmaaddr_to_32s(
  452. tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
  453. hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
  454. tso_seg->tso_frags[num_frag].length);
  455. }
  456. return;
  457. }
  458. #else
  459. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  460. void *ext_desc)
  461. {
  462. return;
  463. }
  464. #endif
  465. #if defined(FEATURE_TSO)
  466. /**
  467. * dp_tx_free_tso_seg_list() - Loop through the tso segments
  468. * allocated and free them
  469. * @soc: soc handle
  470. * @free_seg: list of tso segments
  471. * @msdu_info: msdu descriptor
  472. *
  473. * Return: void
  474. */
  475. static void dp_tx_free_tso_seg_list(
  476. struct dp_soc *soc,
  477. struct qdf_tso_seg_elem_t *free_seg,
  478. struct dp_tx_msdu_info_s *msdu_info)
  479. {
  480. struct qdf_tso_seg_elem_t *next_seg;
  481. while (free_seg) {
  482. next_seg = free_seg->next;
  483. dp_tx_tso_desc_free(soc,
  484. msdu_info->tx_queue.desc_pool_id,
  485. free_seg);
  486. free_seg = next_seg;
  487. }
  488. }
  489. /**
  490. * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
  491. * allocated and free them
  492. * @soc: soc handle
  493. * @free_num_seg: list of tso number segments
  494. * @msdu_info: msdu descriptor
  495. *
  496. * Return: void
  497. */
  498. static void dp_tx_free_tso_num_seg_list(
  499. struct dp_soc *soc,
  500. struct qdf_tso_num_seg_elem_t *free_num_seg,
  501. struct dp_tx_msdu_info_s *msdu_info)
  502. {
  503. struct qdf_tso_num_seg_elem_t *next_num_seg;
  504. while (free_num_seg) {
  505. next_num_seg = free_num_seg->next;
  506. dp_tso_num_seg_free(soc,
  507. msdu_info->tx_queue.desc_pool_id,
  508. free_num_seg);
  509. free_num_seg = next_num_seg;
  510. }
  511. }
  512. /**
  513. * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
  514. * do dma unmap for each segment
  515. * @soc: soc handle
  516. * @free_seg: list of tso segments
  517. * @num_seg_desc: tso number segment descriptor
  518. *
  519. * Return: void
  520. */
  521. static void dp_tx_unmap_tso_seg_list(
  522. struct dp_soc *soc,
  523. struct qdf_tso_seg_elem_t *free_seg,
  524. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  525. {
  526. struct qdf_tso_seg_elem_t *next_seg;
  527. if (qdf_unlikely(!num_seg_desc)) {
  528. DP_TRACE(ERROR, "TSO number seg desc is NULL!");
  529. return;
  530. }
  531. while (free_seg) {
  532. next_seg = free_seg->next;
  533. dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
  534. free_seg = next_seg;
  535. }
  536. }
  537. #ifdef FEATURE_TSO_STATS
  538. /**
  539. * dp_tso_get_stats_idx() - Retrieve the tso packet id
  540. * @pdev: pdev handle
  541. *
  542. * Return: id
  543. */
  544. static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
  545. {
  546. uint32_t stats_idx;
  547. stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
  548. % CDP_MAX_TSO_PACKETS);
  549. return stats_idx;
  550. }
  551. #else
  552. static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
  553. {
  554. return 0;
  555. }
  556. #endif /* FEATURE_TSO_STATS */
  557. /**
  558. * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
  559. * free the tso segments descriptor and
  560. * tso num segments descriptor
  561. * @soc: soc handle
  562. * @msdu_info: msdu descriptor
  563. * @tso_seg_unmap: flag to show if dma unmap is necessary
  564. *
  565. * Return: void
  566. */
  567. static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
  568. struct dp_tx_msdu_info_s *msdu_info,
  569. bool tso_seg_unmap)
  570. {
  571. struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
  572. struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
  573. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  574. tso_info->tso_num_seg_list;
  575. /* do dma unmap for each segment */
  576. if (tso_seg_unmap)
  577. dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
  578. /* free all tso number segment descriptor though looks only have 1 */
  579. dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
  580. /* free all tso segment descriptor */
  581. dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
  582. }
  583. /**
  584. * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
  585. * @vdev: virtual device handle
  586. * @msdu: network buffer
  587. * @msdu_info: meta data associated with the msdu
  588. *
  589. * Return: QDF_STATUS_SUCCESS success
  590. */
  591. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  592. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  593. {
  594. struct qdf_tso_seg_elem_t *tso_seg;
  595. int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
  596. struct dp_soc *soc = vdev->pdev->soc;
  597. struct dp_pdev *pdev = vdev->pdev;
  598. struct qdf_tso_info_t *tso_info;
  599. struct qdf_tso_num_seg_elem_t *tso_num_seg;
  600. tso_info = &msdu_info->u.tso_info;
  601. tso_info->curr_seg = NULL;
  602. tso_info->tso_seg_list = NULL;
  603. tso_info->num_segs = num_seg;
  604. msdu_info->frm_type = dp_tx_frm_tso;
  605. tso_info->tso_num_seg_list = NULL;
  606. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  607. while (num_seg) {
  608. tso_seg = dp_tx_tso_desc_alloc(
  609. soc, msdu_info->tx_queue.desc_pool_id);
  610. if (tso_seg) {
  611. tso_seg->next = tso_info->tso_seg_list;
  612. tso_info->tso_seg_list = tso_seg;
  613. num_seg--;
  614. } else {
  615. dp_err_rl("Failed to alloc tso seg desc");
  616. DP_STATS_INC_PKT(vdev->pdev,
  617. tso_stats.tso_no_mem_dropped, 1,
  618. qdf_nbuf_len(msdu));
  619. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  620. return QDF_STATUS_E_NOMEM;
  621. }
  622. }
  623. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  624. tso_num_seg = dp_tso_num_seg_alloc(soc,
  625. msdu_info->tx_queue.desc_pool_id);
  626. if (tso_num_seg) {
  627. tso_num_seg->next = tso_info->tso_num_seg_list;
  628. tso_info->tso_num_seg_list = tso_num_seg;
  629. } else {
  630. DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
  631. __func__);
  632. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  633. return QDF_STATUS_E_NOMEM;
  634. }
  635. msdu_info->num_seg =
  636. qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
  637. TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
  638. msdu_info->num_seg);
  639. if (!(msdu_info->num_seg)) {
  640. /*
  641. * Free allocated TSO seg desc and number seg desc,
  642. * do unmap for segments if dma map has done.
  643. */
  644. DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
  645. dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
  646. return QDF_STATUS_E_INVAL;
  647. }
  648. dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
  649. msdu, 0, DP_TX_DESC_MAP);
  650. tso_info->curr_seg = tso_info->tso_seg_list;
  651. tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
  652. dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
  653. msdu, msdu_info->num_seg);
  654. dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
  655. tso_info->msdu_stats_idx);
  656. dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
  657. return QDF_STATUS_SUCCESS;
  658. }
  659. #else
  660. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  661. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  662. {
  663. return QDF_STATUS_E_NOMEM;
  664. }
  665. #endif
  666. QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
  667. (DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
  668. sizeof(struct htt_tx_msdu_desc_ext2_t)));
  669. /**
  670. * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
  671. * @vdev: DP Vdev handle
  672. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  673. * @desc_pool_id: Descriptor Pool ID
  674. *
  675. * Return:
  676. */
  677. static
  678. struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
  679. struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
  680. {
  681. uint8_t i;
  682. uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
  683. struct dp_tx_seg_info_s *seg_info;
  684. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  685. struct dp_soc *soc = vdev->pdev->soc;
  686. /* Allocate an extension descriptor */
  687. msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
  688. qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
  689. if (!msdu_ext_desc) {
  690. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  691. return NULL;
  692. }
  693. if (msdu_info->exception_fw &&
  694. qdf_unlikely(vdev->mesh_vdev)) {
  695. qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
  696. &msdu_info->meta_data[0],
  697. sizeof(struct htt_tx_msdu_desc_ext2_t));
  698. qdf_atomic_inc(&soc->num_tx_exception);
  699. msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
  700. }
  701. switch (msdu_info->frm_type) {
  702. case dp_tx_frm_sg:
  703. case dp_tx_frm_me:
  704. case dp_tx_frm_raw:
  705. seg_info = msdu_info->u.sg_info.curr_seg;
  706. /* Update the buffer pointers in MSDU Extension Descriptor */
  707. for (i = 0; i < seg_info->frag_cnt; i++) {
  708. hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
  709. seg_info->frags[i].paddr_lo,
  710. seg_info->frags[i].paddr_hi,
  711. seg_info->frags[i].len);
  712. }
  713. break;
  714. case dp_tx_frm_tso:
  715. dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
  716. &cached_ext_desc[0]);
  717. break;
  718. default:
  719. break;
  720. }
  721. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  722. cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
  723. hal_tx_ext_desc_sync(&cached_ext_desc[0],
  724. msdu_ext_desc->vaddr);
  725. return msdu_ext_desc;
  726. }
  727. /**
  728. * dp_tx_trace_pkt() - Trace TX packet at DP layer
  729. * @soc: datapath SOC
  730. * @skb: skb to be traced
  731. * @msdu_id: msdu_id of the packet
  732. * @vdev_id: vdev_id of the packet
  733. *
  734. * Return: None
  735. */
  736. #ifdef DP_DISABLE_TX_PKT_TRACE
  737. static void dp_tx_trace_pkt(struct dp_soc *soc,
  738. qdf_nbuf_t skb, uint16_t msdu_id,
  739. uint8_t vdev_id)
  740. {
  741. }
  742. #else
  743. static void dp_tx_trace_pkt(struct dp_soc *soc,
  744. qdf_nbuf_t skb, uint16_t msdu_id,
  745. uint8_t vdev_id)
  746. {
  747. if (dp_is_tput_high(soc))
  748. return;
  749. QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
  750. QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
  751. DPTRACE(qdf_dp_trace_ptr(skb,
  752. QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
  753. QDF_TRACE_DEFAULT_PDEV_ID,
  754. qdf_nbuf_data_addr(skb),
  755. sizeof(qdf_nbuf_data(skb)),
  756. msdu_id, vdev_id, 0));
  757. qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
  758. DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
  759. QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
  760. msdu_id, QDF_TX));
  761. }
  762. #endif
  763. #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
  764. /**
  765. * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
  766. * exception by the upper layer (OS_IF)
  767. * @soc: DP soc handle
  768. * @nbuf: packet to be transmitted
  769. *
  770. * Return: 1 if the packet is marked as exception,
  771. * 0, if the packet is not marked as exception.
  772. */
  773. static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
  774. qdf_nbuf_t nbuf)
  775. {
  776. return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
  777. }
  778. #else
  779. static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
  780. qdf_nbuf_t nbuf)
  781. {
  782. return 0;
  783. }
  784. #endif
  785. #ifdef DP_TRAFFIC_END_INDICATION
  786. /**
  787. * dp_tx_get_traffic_end_indication_pkt() - Allocate and prepare packet to send
  788. * as indication to fw to inform that
  789. * data stream has ended
  790. * @vdev: DP vdev handle
  791. * @nbuf: original buffer from network stack
  792. *
  793. * Return: NULL on failure,
  794. * nbuf on success
  795. */
  796. static inline qdf_nbuf_t
  797. dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
  798. qdf_nbuf_t nbuf)
  799. {
  800. /* Packet length should be enough to copy upto L3 header */
  801. uint8_t end_nbuf_len = 64;
  802. uint8_t htt_desc_size_aligned;
  803. uint8_t htt_desc_size;
  804. qdf_nbuf_t end_nbuf;
  805. if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
  806. QDF_NBUF_CB_PACKET_TYPE_END_INDICATION)) {
  807. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  808. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  809. end_nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q);
  810. if (!end_nbuf) {
  811. end_nbuf = qdf_nbuf_alloc(NULL,
  812. (htt_desc_size_aligned +
  813. end_nbuf_len),
  814. htt_desc_size_aligned,
  815. 8, false);
  816. if (!end_nbuf) {
  817. dp_err("Packet allocation failed");
  818. goto out;
  819. }
  820. } else {
  821. qdf_nbuf_reset(end_nbuf, htt_desc_size_aligned, 8);
  822. }
  823. qdf_mem_copy(qdf_nbuf_data(end_nbuf), qdf_nbuf_data(nbuf),
  824. end_nbuf_len);
  825. qdf_nbuf_set_pktlen(end_nbuf, end_nbuf_len);
  826. return end_nbuf;
  827. }
  828. out:
  829. return NULL;
  830. }
  831. /**
  832. * dp_tx_send_traffic_end_indication_pkt() - Send indication packet to FW
  833. * via exception path.
  834. * @vdev: DP vdev handle
  835. * @end_nbuf: skb to send as indication
  836. * @msdu_info: msdu_info of original nbuf
  837. * @peer_id: peer id
  838. *
  839. * Return: None
  840. */
  841. static inline void
  842. dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
  843. qdf_nbuf_t end_nbuf,
  844. struct dp_tx_msdu_info_s *msdu_info,
  845. uint16_t peer_id)
  846. {
  847. struct dp_tx_msdu_info_s e_msdu_info = {0};
  848. qdf_nbuf_t nbuf;
  849. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  850. (struct htt_tx_msdu_desc_ext2_t *)(e_msdu_info.meta_data);
  851. e_msdu_info.tx_queue = msdu_info->tx_queue;
  852. e_msdu_info.tid = msdu_info->tid;
  853. e_msdu_info.exception_fw = 1;
  854. desc_ext->host_tx_desc_pool = 1;
  855. desc_ext->traffic_end_indication = 1;
  856. nbuf = dp_tx_send_msdu_single(vdev, end_nbuf, &e_msdu_info,
  857. peer_id, NULL);
  858. if (nbuf) {
  859. dp_err("Traffic end indication packet tx failed");
  860. qdf_nbuf_free(nbuf);
  861. }
  862. }
  863. /**
  864. * dp_tx_traffic_end_indication_set_desc_flag() - Set tx descriptor flag to
  865. * mark it traffic end indication
  866. * packet.
  867. * @tx_desc: Tx descriptor pointer
  868. * @msdu_info: msdu_info structure pointer
  869. *
  870. * Return: None
  871. */
  872. static inline void
  873. dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
  874. struct dp_tx_msdu_info_s *msdu_info)
  875. {
  876. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  877. (struct htt_tx_msdu_desc_ext2_t *)(msdu_info->meta_data);
  878. if (qdf_unlikely(desc_ext->traffic_end_indication))
  879. tx_desc->flags |= DP_TX_DESC_FLAG_TRAFFIC_END_IND;
  880. }
  881. /**
  882. * dp_tx_traffic_end_indication_enq_ind_pkt() - Enqueue the packet instead of
  883. * freeing which are associated
  884. * with traffic end indication
  885. * flagged descriptor.
  886. * @soc: dp soc handle
  887. * @desc: Tx descriptor pointer
  888. * @nbuf: buffer pointer
  889. *
  890. * Return: True if packet gets enqueued else false
  891. */
  892. static bool
  893. dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
  894. struct dp_tx_desc_s *desc,
  895. qdf_nbuf_t nbuf)
  896. {
  897. struct dp_vdev *vdev = NULL;
  898. if (qdf_unlikely((desc->flags &
  899. DP_TX_DESC_FLAG_TRAFFIC_END_IND) != 0)) {
  900. vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
  901. DP_MOD_ID_TX_COMP);
  902. if (vdev) {
  903. qdf_nbuf_queue_add(&vdev->end_ind_pkt_q, nbuf);
  904. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_COMP);
  905. return true;
  906. }
  907. }
  908. return false;
  909. }
  910. /**
  911. * dp_tx_traffic_end_indication_is_enabled() - get the feature
  912. * enable/disable status
  913. * @vdev: dp vdev handle
  914. *
  915. * Return: True if feature is enable else false
  916. */
  917. static inline bool
  918. dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
  919. {
  920. return qdf_unlikely(vdev->traffic_end_ind_en);
  921. }
  922. static inline qdf_nbuf_t
  923. dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  924. struct dp_tx_msdu_info_s *msdu_info,
  925. uint16_t peer_id, qdf_nbuf_t end_nbuf)
  926. {
  927. if (dp_tx_traffic_end_indication_is_enabled(vdev))
  928. end_nbuf = dp_tx_get_traffic_end_indication_pkt(vdev, nbuf);
  929. nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
  930. if (qdf_unlikely(end_nbuf))
  931. dp_tx_send_traffic_end_indication_pkt(vdev, end_nbuf,
  932. msdu_info, peer_id);
  933. return nbuf;
  934. }
  935. #else
  936. static inline qdf_nbuf_t
  937. dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
  938. qdf_nbuf_t nbuf)
  939. {
  940. return NULL;
  941. }
  942. static inline void
  943. dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
  944. qdf_nbuf_t end_nbuf,
  945. struct dp_tx_msdu_info_s *msdu_info,
  946. uint16_t peer_id)
  947. {}
  948. static inline void
  949. dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
  950. struct dp_tx_msdu_info_s *msdu_info)
  951. {}
  952. static inline bool
  953. dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
  954. struct dp_tx_desc_s *desc,
  955. qdf_nbuf_t nbuf)
  956. {
  957. return false;
  958. }
  959. static inline bool
  960. dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
  961. {
  962. return false;
  963. }
  964. static inline qdf_nbuf_t
  965. dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  966. struct dp_tx_msdu_info_s *msdu_info,
  967. uint16_t peer_id, qdf_nbuf_t end_nbuf)
  968. {
  969. return dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
  970. }
  971. #endif
  972. #if defined(QCA_SUPPORT_WDS_EXTENDED)
  973. static bool
  974. dp_tx_is_wds_ast_override_en(struct dp_soc *soc,
  975. struct cdp_tx_exception_metadata *tx_exc_metadata)
  976. {
  977. if (soc->features.wds_ext_ast_override_enable &&
  978. tx_exc_metadata && tx_exc_metadata->is_wds_extended)
  979. return true;
  980. return false;
  981. }
  982. #else
  983. static bool
  984. dp_tx_is_wds_ast_override_en(struct dp_soc *soc,
  985. struct cdp_tx_exception_metadata *tx_exc_metadata)
  986. {
  987. return false;
  988. }
  989. #endif
  990. /**
  991. * dp_tx_prepare_desc_single() - Allocate and prepare Tx descriptor
  992. * @vdev: DP vdev handle
  993. * @nbuf: skb
  994. * @desc_pool_id: Descriptor pool ID
  995. * @msdu_info: Metadata to the fw
  996. * @tx_exc_metadata: Handle that holds exception path metadata
  997. *
  998. * Allocate and prepare Tx descriptor with msdu information.
  999. *
  1000. * Return: Pointer to Tx Descriptor on success,
  1001. * NULL on failure
  1002. */
  1003. static
  1004. struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
  1005. qdf_nbuf_t nbuf, uint8_t desc_pool_id,
  1006. struct dp_tx_msdu_info_s *msdu_info,
  1007. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1008. {
  1009. uint8_t align_pad;
  1010. uint8_t is_exception = 0;
  1011. uint8_t htt_hdr_size;
  1012. struct dp_tx_desc_s *tx_desc;
  1013. struct dp_pdev *pdev = vdev->pdev;
  1014. struct dp_soc *soc = pdev->soc;
  1015. if (dp_tx_limit_check(vdev, nbuf))
  1016. return NULL;
  1017. /* Allocate software Tx descriptor */
  1018. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  1019. if (qdf_unlikely(!tx_desc)) {
  1020. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1021. DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
  1022. return NULL;
  1023. }
  1024. dp_tx_outstanding_inc(pdev);
  1025. /* Initialize the SW tx descriptor */
  1026. tx_desc->nbuf = nbuf;
  1027. tx_desc->frm_type = dp_tx_frm_std;
  1028. tx_desc->tx_encap_type = ((tx_exc_metadata &&
  1029. (tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
  1030. tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
  1031. tx_desc->vdev_id = vdev->vdev_id;
  1032. tx_desc->pdev = pdev;
  1033. tx_desc->msdu_ext_desc = NULL;
  1034. tx_desc->pkt_offset = 0;
  1035. tx_desc->length = qdf_nbuf_headlen(nbuf);
  1036. tx_desc->shinfo_addr = skb_end_pointer(nbuf);
  1037. dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
  1038. if (qdf_unlikely(vdev->multipass_en)) {
  1039. if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
  1040. goto failure;
  1041. }
  1042. /* Packets marked by upper layer (OS-IF) to be sent to FW */
  1043. if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
  1044. is_exception = 1;
  1045. /* for BE chipsets if wds extension was enbled will not mark FW
  1046. * in desc will mark ast index based search for ast index.
  1047. */
  1048. if (dp_tx_is_wds_ast_override_en(soc, tx_exc_metadata))
  1049. return tx_desc;
  1050. /*
  1051. * For special modes (vdev_type == ocb or mesh), data frames should be
  1052. * transmitted using varying transmit parameters (tx spec) which include
  1053. * transmit rate, power, priority, channel, channel bandwidth , nss etc.
  1054. * These are filled in HTT MSDU descriptor and sent in frame pre-header.
  1055. * These frames are sent as exception packets to firmware.
  1056. *
  1057. * HW requirement is that metadata should always point to a
  1058. * 8-byte aligned address. So we add alignment pad to start of buffer.
  1059. * HTT Metadata should be ensured to be multiple of 8-bytes,
  1060. * to get 8-byte aligned start address along with align_pad added
  1061. *
  1062. * |-----------------------------|
  1063. * | |
  1064. * |-----------------------------| <-----Buffer Pointer Address given
  1065. * | | ^ in HW descriptor (aligned)
  1066. * | HTT Metadata | |
  1067. * | | |
  1068. * | | | Packet Offset given in descriptor
  1069. * | | |
  1070. * |-----------------------------| |
  1071. * | Alignment Pad | v
  1072. * |-----------------------------| <----- Actual buffer start address
  1073. * | SKB Data | (Unaligned)
  1074. * | |
  1075. * | |
  1076. * | |
  1077. * | |
  1078. * | |
  1079. * |-----------------------------|
  1080. */
  1081. if (qdf_unlikely((msdu_info->exception_fw)) ||
  1082. (vdev->opmode == wlan_op_mode_ocb) ||
  1083. (tx_exc_metadata &&
  1084. tx_exc_metadata->is_tx_sniffer)) {
  1085. align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
  1086. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
  1087. DP_STATS_INC(vdev,
  1088. tx_i.dropped.headroom_insufficient, 1);
  1089. goto failure;
  1090. }
  1091. if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
  1092. dp_tx_err("qdf_nbuf_push_head failed");
  1093. goto failure;
  1094. }
  1095. htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
  1096. msdu_info);
  1097. if (htt_hdr_size == 0)
  1098. goto failure;
  1099. tx_desc->length = qdf_nbuf_headlen(nbuf);
  1100. tx_desc->pkt_offset = align_pad + htt_hdr_size;
  1101. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1102. dp_tx_traffic_end_indication_set_desc_flag(tx_desc,
  1103. msdu_info);
  1104. is_exception = 1;
  1105. tx_desc->length -= tx_desc->pkt_offset;
  1106. }
  1107. #if !TQM_BYPASS_WAR
  1108. if (is_exception || tx_exc_metadata)
  1109. #endif
  1110. {
  1111. /* Temporary WAR due to TQM VP issues */
  1112. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1113. qdf_atomic_inc(&soc->num_tx_exception);
  1114. }
  1115. return tx_desc;
  1116. failure:
  1117. dp_tx_desc_release(tx_desc, desc_pool_id);
  1118. return NULL;
  1119. }
  1120. /**
  1121. * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment
  1122. * frame
  1123. * @vdev: DP vdev handle
  1124. * @nbuf: skb
  1125. * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
  1126. * @desc_pool_id : Descriptor Pool ID
  1127. *
  1128. * Allocate and prepare Tx descriptor with msdu and fragment descritor
  1129. * information. For frames with fragments, allocate and prepare
  1130. * an MSDU extension descriptor
  1131. *
  1132. * Return: Pointer to Tx Descriptor on success,
  1133. * NULL on failure
  1134. */
  1135. static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
  1136. qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
  1137. uint8_t desc_pool_id)
  1138. {
  1139. struct dp_tx_desc_s *tx_desc;
  1140. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  1141. struct dp_pdev *pdev = vdev->pdev;
  1142. struct dp_soc *soc = pdev->soc;
  1143. if (dp_tx_limit_check(vdev, nbuf))
  1144. return NULL;
  1145. /* Allocate software Tx descriptor */
  1146. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  1147. if (!tx_desc) {
  1148. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1149. return NULL;
  1150. }
  1151. dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
  1152. nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
  1153. dp_tx_outstanding_inc(pdev);
  1154. /* Initialize the SW tx descriptor */
  1155. tx_desc->nbuf = nbuf;
  1156. tx_desc->frm_type = msdu_info->frm_type;
  1157. tx_desc->tx_encap_type = vdev->tx_encap_type;
  1158. tx_desc->vdev_id = vdev->vdev_id;
  1159. tx_desc->pdev = pdev;
  1160. tx_desc->pkt_offset = 0;
  1161. dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
  1162. /* Handle scattered frames - TSO/SG/ME */
  1163. /* Allocate and prepare an extension descriptor for scattered frames */
  1164. msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
  1165. if (!msdu_ext_desc) {
  1166. dp_tx_info("Tx Extension Descriptor Alloc Fail");
  1167. goto failure;
  1168. }
  1169. #if !TQM_BYPASS_WAR
  1170. if (qdf_unlikely(msdu_info->exception_fw) ||
  1171. dp_tx_is_nbuf_marked_exception(soc, nbuf))
  1172. #endif
  1173. {
  1174. /* Temporary WAR due to TQM VP issues */
  1175. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1176. qdf_atomic_inc(&soc->num_tx_exception);
  1177. }
  1178. tx_desc->msdu_ext_desc = msdu_ext_desc;
  1179. tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
  1180. msdu_ext_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
  1181. msdu_ext_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
  1182. tx_desc->dma_addr = msdu_ext_desc->paddr;
  1183. if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
  1184. tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
  1185. else
  1186. tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
  1187. return tx_desc;
  1188. failure:
  1189. dp_tx_desc_release(tx_desc, desc_pool_id);
  1190. return NULL;
  1191. }
  1192. /**
  1193. * dp_tx_prepare_raw() - Prepare RAW packet TX
  1194. * @vdev: DP vdev handle
  1195. * @nbuf: buffer pointer
  1196. * @seg_info: Pointer to Segment info Descriptor to be prepared
  1197. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
  1198. * descriptor
  1199. *
  1200. * Return:
  1201. */
  1202. static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1203. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  1204. {
  1205. qdf_nbuf_t curr_nbuf = NULL;
  1206. uint16_t total_len = 0;
  1207. qdf_dma_addr_t paddr;
  1208. int32_t i;
  1209. int32_t mapped_buf_num = 0;
  1210. struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
  1211. qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  1212. DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
  1213. /* Continue only if frames are of DATA type */
  1214. if (!DP_FRAME_IS_DATA(qos_wh)) {
  1215. DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
  1216. dp_tx_debug("Pkt. recd is of not data type");
  1217. goto error;
  1218. }
  1219. /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
  1220. if (vdev->raw_mode_war &&
  1221. (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
  1222. (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
  1223. qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
  1224. for (curr_nbuf = nbuf, i = 0; curr_nbuf;
  1225. curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
  1226. /*
  1227. * Number of nbuf's must not exceed the size of the frags
  1228. * array in seg_info.
  1229. */
  1230. if (i >= DP_TX_MAX_NUM_FRAGS) {
  1231. dp_err_rl("nbuf cnt exceeds the max number of segs");
  1232. DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
  1233. goto error;
  1234. }
  1235. if (QDF_STATUS_SUCCESS !=
  1236. qdf_nbuf_map_nbytes_single(vdev->osdev,
  1237. curr_nbuf,
  1238. QDF_DMA_TO_DEVICE,
  1239. curr_nbuf->len)) {
  1240. dp_tx_err("%s dma map error ", __func__);
  1241. DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
  1242. goto error;
  1243. }
  1244. /* Update the count of mapped nbuf's */
  1245. mapped_buf_num++;
  1246. paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
  1247. seg_info->frags[i].paddr_lo = paddr;
  1248. seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
  1249. seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
  1250. seg_info->frags[i].vaddr = (void *) curr_nbuf;
  1251. total_len += qdf_nbuf_len(curr_nbuf);
  1252. }
  1253. seg_info->frag_cnt = i;
  1254. seg_info->total_len = total_len;
  1255. seg_info->next = NULL;
  1256. sg_info->curr_seg = seg_info;
  1257. msdu_info->frm_type = dp_tx_frm_raw;
  1258. msdu_info->num_seg = 1;
  1259. return nbuf;
  1260. error:
  1261. i = 0;
  1262. while (nbuf) {
  1263. curr_nbuf = nbuf;
  1264. if (i < mapped_buf_num) {
  1265. qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
  1266. QDF_DMA_TO_DEVICE,
  1267. curr_nbuf->len);
  1268. i++;
  1269. }
  1270. nbuf = qdf_nbuf_next(nbuf);
  1271. qdf_nbuf_free(curr_nbuf);
  1272. }
  1273. return NULL;
  1274. }
  1275. /**
  1276. * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
  1277. * @soc: DP soc handle
  1278. * @nbuf: Buffer pointer
  1279. *
  1280. * unmap the chain of nbufs that belong to this RAW frame.
  1281. *
  1282. * Return: None
  1283. */
  1284. static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
  1285. qdf_nbuf_t nbuf)
  1286. {
  1287. qdf_nbuf_t cur_nbuf = nbuf;
  1288. do {
  1289. qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
  1290. QDF_DMA_TO_DEVICE,
  1291. cur_nbuf->len);
  1292. cur_nbuf = qdf_nbuf_next(cur_nbuf);
  1293. } while (cur_nbuf);
  1294. }
  1295. #ifdef VDEV_PEER_PROTOCOL_COUNT
  1296. void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
  1297. qdf_nbuf_t nbuf)
  1298. {
  1299. qdf_nbuf_t nbuf_local;
  1300. struct dp_vdev *vdev_local = vdev_hdl;
  1301. do {
  1302. if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
  1303. break;
  1304. nbuf_local = nbuf;
  1305. if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
  1306. htt_cmn_pkt_type_raw))
  1307. break;
  1308. else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
  1309. break;
  1310. else if (qdf_nbuf_is_tso((nbuf_local)))
  1311. break;
  1312. dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
  1313. (nbuf_local),
  1314. NULL, 1, 0);
  1315. } while (0);
  1316. }
  1317. #endif
  1318. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  1319. void dp_tx_update_stats(struct dp_soc *soc,
  1320. struct dp_tx_desc_s *tx_desc,
  1321. uint8_t ring_id)
  1322. {
  1323. uint32_t stats_len = dp_tx_get_pkt_len(tx_desc);
  1324. DP_STATS_INC_PKT(soc, tx.egress[ring_id], 1, stats_len);
  1325. }
  1326. int
  1327. dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
  1328. struct dp_tx_desc_s *tx_desc,
  1329. uint8_t tid,
  1330. struct dp_tx_msdu_info_s *msdu_info,
  1331. uint8_t ring_id)
  1332. {
  1333. struct dp_swlm *swlm = &soc->swlm;
  1334. union swlm_data swlm_query_data;
  1335. struct dp_swlm_tcl_data tcl_data;
  1336. QDF_STATUS status;
  1337. int ret;
  1338. if (!swlm->is_enabled)
  1339. return msdu_info->skip_hp_update;
  1340. tcl_data.nbuf = tx_desc->nbuf;
  1341. tcl_data.tid = tid;
  1342. tcl_data.ring_id = ring_id;
  1343. tcl_data.pkt_len = dp_tx_get_pkt_len(tx_desc);
  1344. tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
  1345. swlm_query_data.tcl_data = &tcl_data;
  1346. status = dp_swlm_tcl_pre_check(soc, &tcl_data);
  1347. if (QDF_IS_STATUS_ERROR(status)) {
  1348. dp_swlm_tcl_reset_session_data(soc, ring_id);
  1349. DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
  1350. return 0;
  1351. }
  1352. ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
  1353. if (ret) {
  1354. DP_STATS_INC(swlm, tcl[ring_id].coalesce_success, 1);
  1355. } else {
  1356. DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
  1357. }
  1358. return ret;
  1359. }
  1360. void
  1361. dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
  1362. int coalesce)
  1363. {
  1364. if (coalesce)
  1365. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1366. else
  1367. dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
  1368. }
  1369. static inline void
  1370. dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
  1371. {
  1372. if (((i + 1) < msdu_info->num_seg))
  1373. msdu_info->skip_hp_update = 1;
  1374. else
  1375. msdu_info->skip_hp_update = 0;
  1376. }
  1377. static inline void
  1378. dp_flush_tcp_hp(struct dp_soc *soc, uint8_t ring_id)
  1379. {
  1380. hal_ring_handle_t hal_ring_hdl =
  1381. dp_tx_get_hal_ring_hdl(soc, ring_id);
  1382. if (dp_tx_hal_ring_access_start(soc, hal_ring_hdl)) {
  1383. dp_err("Fillmore: SRNG access start failed");
  1384. return;
  1385. }
  1386. dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
  1387. }
  1388. static inline void
  1389. dp_tx_check_and_flush_hp(struct dp_soc *soc,
  1390. QDF_STATUS status,
  1391. struct dp_tx_msdu_info_s *msdu_info)
  1392. {
  1393. if (QDF_IS_STATUS_ERROR(status) && !msdu_info->skip_hp_update) {
  1394. dp_flush_tcp_hp(soc,
  1395. (msdu_info->tx_queue.ring_id & DP_TX_QUEUE_MASK));
  1396. }
  1397. }
  1398. #else
  1399. static inline void
  1400. dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
  1401. {
  1402. }
  1403. static inline void
  1404. dp_tx_check_and_flush_hp(struct dp_soc *soc,
  1405. QDF_STATUS status,
  1406. struct dp_tx_msdu_info_s *msdu_info)
  1407. {
  1408. }
  1409. #endif
  1410. #ifdef FEATURE_RUNTIME_PM
  1411. void
  1412. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  1413. hal_ring_handle_t hal_ring_hdl,
  1414. int coalesce)
  1415. {
  1416. int ret;
  1417. /*
  1418. * Avoid runtime get and put APIs under high throughput scenarios.
  1419. */
  1420. if (dp_get_rtpm_tput_policy_requirement(soc)) {
  1421. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1422. return;
  1423. }
  1424. ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
  1425. if (QDF_IS_STATUS_SUCCESS(ret)) {
  1426. if (hif_system_pm_state_check(soc->hif_handle)) {
  1427. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1428. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1429. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1430. } else {
  1431. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1432. }
  1433. hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
  1434. } else {
  1435. dp_runtime_get(soc);
  1436. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1437. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1438. qdf_atomic_inc(&soc->tx_pending_rtpm);
  1439. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1440. dp_runtime_put(soc);
  1441. }
  1442. }
  1443. #else
  1444. #ifdef DP_POWER_SAVE
  1445. void
  1446. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  1447. hal_ring_handle_t hal_ring_hdl,
  1448. int coalesce)
  1449. {
  1450. if (hif_system_pm_state_check(soc->hif_handle)) {
  1451. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1452. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1453. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1454. } else {
  1455. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1456. }
  1457. }
  1458. #endif
  1459. #endif
  1460. /**
  1461. * dp_tx_get_tid() - Obtain TID to be used for this frame
  1462. * @vdev: DP vdev handle
  1463. * @nbuf: skb
  1464. * @msdu_info: msdu descriptor
  1465. *
  1466. * Extract the DSCP or PCP information from frame and map into TID value.
  1467. *
  1468. * Return: void
  1469. */
  1470. static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1471. struct dp_tx_msdu_info_s *msdu_info)
  1472. {
  1473. uint8_t tos = 0, dscp_tid_override = 0;
  1474. uint8_t *hdr_ptr, *L3datap;
  1475. uint8_t is_mcast = 0;
  1476. qdf_ether_header_t *eh = NULL;
  1477. qdf_ethervlan_header_t *evh = NULL;
  1478. uint16_t ether_type;
  1479. qdf_llc_t *llcHdr;
  1480. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  1481. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1482. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1483. eh = (qdf_ether_header_t *)nbuf->data;
  1484. hdr_ptr = (uint8_t *)(eh->ether_dhost);
  1485. L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
  1486. } else {
  1487. qdf_dot3_qosframe_t *qos_wh =
  1488. (qdf_dot3_qosframe_t *) nbuf->data;
  1489. msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
  1490. qos_wh->i_qos[0] & DP_QOS_TID : 0;
  1491. return;
  1492. }
  1493. is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
  1494. ether_type = eh->ether_type;
  1495. llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
  1496. /*
  1497. * Check if packet is dot3 or eth2 type.
  1498. */
  1499. if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
  1500. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1501. sizeof(*llcHdr));
  1502. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1503. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
  1504. sizeof(*llcHdr);
  1505. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
  1506. + sizeof(*llcHdr) +
  1507. sizeof(qdf_net_vlanhdr_t));
  1508. } else {
  1509. L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
  1510. sizeof(*llcHdr);
  1511. }
  1512. } else {
  1513. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1514. evh = (qdf_ethervlan_header_t *) eh;
  1515. ether_type = evh->ether_type;
  1516. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
  1517. }
  1518. }
  1519. /*
  1520. * Find priority from IP TOS DSCP field
  1521. */
  1522. if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
  1523. qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
  1524. if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
  1525. /* Only for unicast frames */
  1526. if (!is_mcast) {
  1527. /* send it on VO queue */
  1528. msdu_info->tid = DP_VO_TID;
  1529. }
  1530. } else {
  1531. /*
  1532. * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
  1533. * from TOS byte.
  1534. */
  1535. tos = ip->ip_tos;
  1536. dscp_tid_override = 1;
  1537. }
  1538. } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
  1539. /* TODO
  1540. * use flowlabel
  1541. *igmpmld cases to be handled in phase 2
  1542. */
  1543. unsigned long ver_pri_flowlabel;
  1544. unsigned long pri;
  1545. ver_pri_flowlabel = *(unsigned long *) L3datap;
  1546. pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
  1547. DP_IPV6_PRIORITY_SHIFT;
  1548. tos = pri;
  1549. dscp_tid_override = 1;
  1550. } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
  1551. msdu_info->tid = DP_VO_TID;
  1552. else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
  1553. /* Only for unicast frames */
  1554. if (!is_mcast) {
  1555. /* send ucast arp on VO queue */
  1556. msdu_info->tid = DP_VO_TID;
  1557. }
  1558. }
  1559. /*
  1560. * Assign all MCAST packets to BE
  1561. */
  1562. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1563. if (is_mcast) {
  1564. tos = 0;
  1565. dscp_tid_override = 1;
  1566. }
  1567. }
  1568. if (dscp_tid_override == 1) {
  1569. tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  1570. msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
  1571. }
  1572. if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
  1573. msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
  1574. return;
  1575. }
  1576. /**
  1577. * dp_tx_classify_tid() - Obtain TID to be used for this frame
  1578. * @vdev: DP vdev handle
  1579. * @nbuf: skb
  1580. * @msdu_info: msdu descriptor
  1581. *
  1582. * Software based TID classification is required when more than 2 DSCP-TID
  1583. * mapping tables are needed.
  1584. * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
  1585. *
  1586. * Return: void
  1587. */
  1588. static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1589. struct dp_tx_msdu_info_s *msdu_info)
  1590. {
  1591. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1592. /*
  1593. * skip_sw_tid_classification flag will set in below cases-
  1594. * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
  1595. * 2. hlos_tid_override enabled for vdev
  1596. * 3. mesh mode enabled for vdev
  1597. */
  1598. if (qdf_likely(vdev->skip_sw_tid_classification)) {
  1599. /* Update tid in msdu_info from skb priority */
  1600. if (qdf_unlikely(vdev->skip_sw_tid_classification
  1601. & DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
  1602. uint32_t tid = qdf_nbuf_get_priority(nbuf);
  1603. if (tid == DP_TX_INVALID_QOS_TAG)
  1604. return;
  1605. msdu_info->tid = tid;
  1606. return;
  1607. }
  1608. return;
  1609. }
  1610. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1611. }
  1612. #ifdef FEATURE_WLAN_TDLS
  1613. /**
  1614. * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
  1615. * @soc: datapath SOC
  1616. * @vdev: datapath vdev
  1617. * @tx_desc: TX descriptor
  1618. *
  1619. * Return: None
  1620. */
  1621. static void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1622. struct dp_vdev *vdev,
  1623. struct dp_tx_desc_s *tx_desc)
  1624. {
  1625. if (vdev) {
  1626. if (vdev->is_tdls_frame) {
  1627. tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
  1628. vdev->is_tdls_frame = false;
  1629. }
  1630. }
  1631. }
  1632. static uint8_t dp_htt_tx_comp_get_status(struct dp_soc *soc, char *htt_desc)
  1633. {
  1634. uint8_t tx_status = HTT_TX_FW2WBM_TX_STATUS_MAX;
  1635. switch (soc->arch_id) {
  1636. case CDP_ARCH_TYPE_LI:
  1637. tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
  1638. break;
  1639. case CDP_ARCH_TYPE_BE:
  1640. tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
  1641. break;
  1642. case CDP_ARCH_TYPE_RH:
  1643. {
  1644. uint32_t *msg_word = (uint32_t *)htt_desc;
  1645. tx_status = HTT_TX_MSDU_INFO_RELEASE_REASON_GET(
  1646. *(msg_word + 3));
  1647. }
  1648. break;
  1649. default:
  1650. dp_err("Incorrect CDP_ARCH %d", soc->arch_id);
  1651. QDF_BUG(0);
  1652. }
  1653. return tx_status;
  1654. }
  1655. /**
  1656. * dp_non_std_htt_tx_comp_free_buff() - Free the non std tx packet buffer
  1657. * @soc: dp_soc handle
  1658. * @tx_desc: TX descriptor
  1659. *
  1660. * Return: None
  1661. */
  1662. static void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
  1663. struct dp_tx_desc_s *tx_desc)
  1664. {
  1665. uint8_t tx_status = 0;
  1666. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  1667. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1668. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
  1669. DP_MOD_ID_TDLS);
  1670. if (qdf_unlikely(!vdev)) {
  1671. dp_err_rl("vdev is null!");
  1672. goto error;
  1673. }
  1674. hal_tx_comp_get_htt_desc(&tx_desc->comp, htt_tx_status);
  1675. tx_status = dp_htt_tx_comp_get_status(soc, htt_tx_status);
  1676. dp_debug("vdev_id: %d tx_status: %d", tx_desc->vdev_id, tx_status);
  1677. if (vdev->tx_non_std_data_callback.func) {
  1678. qdf_nbuf_set_next(nbuf, NULL);
  1679. vdev->tx_non_std_data_callback.func(
  1680. vdev->tx_non_std_data_callback.ctxt,
  1681. nbuf, tx_status);
  1682. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1683. return;
  1684. } else {
  1685. dp_err_rl("callback func is null");
  1686. }
  1687. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1688. error:
  1689. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  1690. qdf_nbuf_free(nbuf);
  1691. }
  1692. /**
  1693. * dp_tx_msdu_single_map() - do nbuf map
  1694. * @vdev: DP vdev handle
  1695. * @tx_desc: DP TX descriptor pointer
  1696. * @nbuf: skb pointer
  1697. *
  1698. * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
  1699. * operation done in other component.
  1700. *
  1701. * Return: QDF_STATUS
  1702. */
  1703. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1704. struct dp_tx_desc_s *tx_desc,
  1705. qdf_nbuf_t nbuf)
  1706. {
  1707. if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
  1708. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1709. nbuf,
  1710. QDF_DMA_TO_DEVICE,
  1711. nbuf->len);
  1712. else
  1713. return qdf_nbuf_map_single(vdev->osdev, nbuf,
  1714. QDF_DMA_TO_DEVICE);
  1715. }
  1716. #else
  1717. static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1718. struct dp_vdev *vdev,
  1719. struct dp_tx_desc_s *tx_desc)
  1720. {
  1721. }
  1722. static inline void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
  1723. struct dp_tx_desc_s *tx_desc)
  1724. {
  1725. }
  1726. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1727. struct dp_tx_desc_s *tx_desc,
  1728. qdf_nbuf_t nbuf)
  1729. {
  1730. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1731. nbuf,
  1732. QDF_DMA_TO_DEVICE,
  1733. nbuf->len);
  1734. }
  1735. #endif
  1736. static inline
  1737. qdf_dma_addr_t dp_tx_nbuf_map_regular(struct dp_vdev *vdev,
  1738. struct dp_tx_desc_s *tx_desc,
  1739. qdf_nbuf_t nbuf)
  1740. {
  1741. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  1742. ret = dp_tx_msdu_single_map(vdev, tx_desc, nbuf);
  1743. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
  1744. return 0;
  1745. return qdf_nbuf_mapped_paddr_get(nbuf);
  1746. }
  1747. static inline
  1748. void dp_tx_nbuf_unmap_regular(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1749. {
  1750. qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev,
  1751. desc->nbuf,
  1752. desc->dma_addr,
  1753. QDF_DMA_TO_DEVICE,
  1754. desc->length);
  1755. }
  1756. #ifdef QCA_DP_TX_RMNET_OPTIMIZATION
  1757. static inline bool
  1758. is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
  1759. {
  1760. struct net_device *ingress_dev;
  1761. skb_frag_t *frag;
  1762. uint16_t buf_len = 0;
  1763. uint16_t linear_data_len = 0;
  1764. uint8_t *payload_addr = NULL;
  1765. ingress_dev = dev_get_by_index(dev_net(nbuf->dev), nbuf->skb_iif);
  1766. if (!ingress_dev)
  1767. return false;
  1768. if ((ingress_dev->priv_flags & IFF_PHONY_HEADROOM)) {
  1769. dev_put(ingress_dev);
  1770. frag = &(skb_shinfo(nbuf)->frags[0]);
  1771. buf_len = skb_frag_size(frag);
  1772. payload_addr = (uint8_t *)skb_frag_address(frag);
  1773. linear_data_len = skb_headlen(nbuf);
  1774. buf_len += linear_data_len;
  1775. payload_addr = payload_addr - linear_data_len;
  1776. memcpy(payload_addr, nbuf->data, linear_data_len);
  1777. msdu_info->frm_type = dp_tx_frm_rmnet;
  1778. msdu_info->buf_len = buf_len;
  1779. msdu_info->payload_addr = payload_addr;
  1780. return true;
  1781. }
  1782. dev_put(ingress_dev);
  1783. return false;
  1784. }
  1785. static inline
  1786. qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info,
  1787. struct dp_tx_desc_s *tx_desc)
  1788. {
  1789. qdf_dma_addr_t paddr;
  1790. paddr = (qdf_dma_addr_t)qdf_mem_virt_to_phys(msdu_info->payload_addr);
  1791. tx_desc->length = msdu_info->buf_len;
  1792. qdf_nbuf_dma_clean_range((void *)msdu_info->payload_addr,
  1793. (void *)(msdu_info->payload_addr +
  1794. msdu_info->buf_len));
  1795. tx_desc->flags |= DP_TX_DESC_FLAG_RMNET;
  1796. return paddr;
  1797. }
  1798. #else
  1799. static inline bool
  1800. is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
  1801. {
  1802. return false;
  1803. }
  1804. static inline
  1805. qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info,
  1806. struct dp_tx_desc_s *tx_desc)
  1807. {
  1808. return 0;
  1809. }
  1810. #endif
  1811. #if defined(QCA_DP_TX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
  1812. static inline
  1813. qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
  1814. struct dp_tx_desc_s *tx_desc,
  1815. qdf_nbuf_t nbuf)
  1816. {
  1817. if (qdf_likely(tx_desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
  1818. qdf_nbuf_dma_clean_range((void *)nbuf->data,
  1819. (void *)(nbuf->data + nbuf->len));
  1820. return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
  1821. } else {
  1822. return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
  1823. }
  1824. }
  1825. static inline
  1826. void dp_tx_nbuf_unmap(struct dp_soc *soc,
  1827. struct dp_tx_desc_s *desc)
  1828. {
  1829. if (qdf_unlikely(!(desc->flags &
  1830. (DP_TX_DESC_FLAG_SIMPLE | DP_TX_DESC_FLAG_RMNET))))
  1831. return dp_tx_nbuf_unmap_regular(soc, desc);
  1832. }
  1833. #else
  1834. static inline
  1835. qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
  1836. struct dp_tx_desc_s *tx_desc,
  1837. qdf_nbuf_t nbuf)
  1838. {
  1839. return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
  1840. }
  1841. static inline
  1842. void dp_tx_nbuf_unmap(struct dp_soc *soc,
  1843. struct dp_tx_desc_s *desc)
  1844. {
  1845. return dp_tx_nbuf_unmap_regular(soc, desc);
  1846. }
  1847. #endif
  1848. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
  1849. static inline
  1850. void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1851. {
  1852. dp_tx_nbuf_unmap(soc, desc);
  1853. desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
  1854. }
  1855. static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1856. {
  1857. if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
  1858. dp_tx_nbuf_unmap(soc, desc);
  1859. }
  1860. #else
  1861. static inline
  1862. void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1863. {
  1864. }
  1865. static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1866. {
  1867. dp_tx_nbuf_unmap(soc, desc);
  1868. }
  1869. #endif
  1870. #ifdef MESH_MODE_SUPPORT
  1871. /**
  1872. * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
  1873. * @soc: datapath SOC
  1874. * @vdev: datapath vdev
  1875. * @tx_desc: TX descriptor
  1876. *
  1877. * Return: None
  1878. */
  1879. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1880. struct dp_vdev *vdev,
  1881. struct dp_tx_desc_s *tx_desc)
  1882. {
  1883. if (qdf_unlikely(vdev->mesh_vdev))
  1884. tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
  1885. }
  1886. /**
  1887. * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
  1888. * @soc: dp_soc handle
  1889. * @tx_desc: TX descriptor
  1890. * @delayed_free: delay the nbuf free
  1891. *
  1892. * Return: nbuf to be freed late
  1893. */
  1894. static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1895. struct dp_tx_desc_s *tx_desc,
  1896. bool delayed_free)
  1897. {
  1898. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1899. struct dp_vdev *vdev = NULL;
  1900. vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, DP_MOD_ID_MESH);
  1901. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
  1902. if (vdev)
  1903. DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
  1904. if (delayed_free)
  1905. return nbuf;
  1906. qdf_nbuf_free(nbuf);
  1907. } else {
  1908. if (vdev && vdev->osif_tx_free_ext) {
  1909. vdev->osif_tx_free_ext((nbuf));
  1910. } else {
  1911. if (delayed_free)
  1912. return nbuf;
  1913. qdf_nbuf_free(nbuf);
  1914. }
  1915. }
  1916. if (vdev)
  1917. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  1918. return NULL;
  1919. }
  1920. #else
  1921. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1922. struct dp_vdev *vdev,
  1923. struct dp_tx_desc_s *tx_desc)
  1924. {
  1925. }
  1926. static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1927. struct dp_tx_desc_s *tx_desc,
  1928. bool delayed_free)
  1929. {
  1930. return NULL;
  1931. }
  1932. #endif
  1933. int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
  1934. {
  1935. struct dp_pdev *pdev = NULL;
  1936. struct dp_ast_entry *src_ast_entry = NULL;
  1937. struct dp_ast_entry *dst_ast_entry = NULL;
  1938. struct dp_soc *soc = NULL;
  1939. qdf_assert(vdev);
  1940. pdev = vdev->pdev;
  1941. qdf_assert(pdev);
  1942. soc = pdev->soc;
  1943. dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1944. (soc, dstmac, vdev->pdev->pdev_id);
  1945. src_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1946. (soc, srcmac, vdev->pdev->pdev_id);
  1947. if (dst_ast_entry && src_ast_entry) {
  1948. if (dst_ast_entry->peer_id ==
  1949. src_ast_entry->peer_id)
  1950. return 1;
  1951. }
  1952. return 0;
  1953. }
  1954. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  1955. defined(WLAN_MCAST_MLO)
  1956. /* MLO peer id for reinject*/
  1957. #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
  1958. /* MLO vdev id inc offset */
  1959. #define DP_MLO_VDEV_ID_OFFSET 0x80
  1960. #ifdef QCA_SUPPORT_WDS_EXTENDED
  1961. static inline bool
  1962. dp_tx_wds_ext_check(struct cdp_tx_exception_metadata *tx_exc_metadata)
  1963. {
  1964. if (tx_exc_metadata && tx_exc_metadata->is_wds_extended)
  1965. return true;
  1966. return false;
  1967. }
  1968. #else
  1969. static inline bool
  1970. dp_tx_wds_ext_check(struct cdp_tx_exception_metadata *tx_exc_metadata)
  1971. {
  1972. return false;
  1973. }
  1974. #endif
  1975. static inline void
  1976. dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  1977. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1978. {
  1979. /* wds ext enabled will not set the TO_FW bit */
  1980. if (dp_tx_wds_ext_check(tx_exc_metadata))
  1981. return;
  1982. if (!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) {
  1983. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1984. qdf_atomic_inc(&soc->num_tx_exception);
  1985. }
  1986. }
  1987. static inline void
  1988. dp_tx_update_mcast_param(uint16_t peer_id,
  1989. uint16_t *htt_tcl_metadata,
  1990. struct dp_vdev *vdev,
  1991. struct dp_tx_msdu_info_s *msdu_info)
  1992. {
  1993. if (peer_id == DP_MLO_MCAST_REINJECT_PEER_ID) {
  1994. *htt_tcl_metadata = 0;
  1995. DP_TX_TCL_METADATA_TYPE_SET(
  1996. *htt_tcl_metadata,
  1997. HTT_TCL_METADATA_V2_TYPE_GLOBAL_SEQ_BASED);
  1998. HTT_TX_TCL_METADATA_GLBL_SEQ_NO_SET(*htt_tcl_metadata,
  1999. msdu_info->gsn);
  2000. msdu_info->vdev_id = vdev->vdev_id + DP_MLO_VDEV_ID_OFFSET;
  2001. HTT_TX_TCL_METADATA_GLBL_SEQ_HOST_INSPECTED_SET(
  2002. *htt_tcl_metadata, 1);
  2003. } else {
  2004. msdu_info->vdev_id = vdev->vdev_id;
  2005. }
  2006. }
  2007. #else
  2008. static inline void
  2009. dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  2010. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2011. {
  2012. }
  2013. static inline void
  2014. dp_tx_update_mcast_param(uint16_t peer_id,
  2015. uint16_t *htt_tcl_metadata,
  2016. struct dp_vdev *vdev,
  2017. struct dp_tx_msdu_info_s *msdu_info)
  2018. {
  2019. }
  2020. #endif
  2021. #ifdef DP_TX_SW_DROP_STATS_INC
  2022. static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
  2023. qdf_nbuf_t nbuf,
  2024. enum cdp_tx_sw_drop drop_code)
  2025. {
  2026. /* EAPOL Drop stats */
  2027. if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) {
  2028. switch (drop_code) {
  2029. case TX_DESC_ERR:
  2030. DP_STATS_INC(pdev, eap_drop_stats.tx_desc_err, 1);
  2031. break;
  2032. case TX_HAL_RING_ACCESS_ERR:
  2033. DP_STATS_INC(pdev,
  2034. eap_drop_stats.tx_hal_ring_access_err, 1);
  2035. break;
  2036. case TX_DMA_MAP_ERR:
  2037. DP_STATS_INC(pdev, eap_drop_stats.tx_dma_map_err, 1);
  2038. break;
  2039. case TX_HW_ENQUEUE:
  2040. DP_STATS_INC(pdev, eap_drop_stats.tx_hw_enqueue, 1);
  2041. break;
  2042. case TX_SW_ENQUEUE:
  2043. DP_STATS_INC(pdev, eap_drop_stats.tx_sw_enqueue, 1);
  2044. break;
  2045. default:
  2046. dp_info_rl("Invalid eapol_drop code: %d", drop_code);
  2047. break;
  2048. }
  2049. }
  2050. }
  2051. #else
  2052. static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
  2053. qdf_nbuf_t nbuf,
  2054. enum cdp_tx_sw_drop drop_code)
  2055. {
  2056. }
  2057. #endif
  2058. qdf_nbuf_t
  2059. dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2060. struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
  2061. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2062. {
  2063. struct dp_pdev *pdev = vdev->pdev;
  2064. struct dp_soc *soc = pdev->soc;
  2065. struct dp_tx_desc_s *tx_desc;
  2066. QDF_STATUS status;
  2067. struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
  2068. uint16_t htt_tcl_metadata = 0;
  2069. enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
  2070. uint8_t tid = msdu_info->tid;
  2071. struct cdp_tid_tx_stats *tid_stats = NULL;
  2072. qdf_dma_addr_t paddr;
  2073. /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
  2074. tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
  2075. msdu_info, tx_exc_metadata);
  2076. if (!tx_desc) {
  2077. dp_err_rl("Tx_desc prepare Fail vdev_id %d vdev %pK queue %d",
  2078. vdev->vdev_id, vdev, tx_q->desc_pool_id);
  2079. drop_code = TX_DESC_ERR;
  2080. goto fail_return;
  2081. }
  2082. dp_tx_update_tdls_flags(soc, vdev, tx_desc);
  2083. if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
  2084. htt_tcl_metadata = vdev->htt_tcl_metadata;
  2085. DP_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
  2086. } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
  2087. DP_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
  2088. DP_TCL_METADATA_TYPE_PEER_BASED);
  2089. DP_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
  2090. peer_id);
  2091. dp_tx_bypass_reinjection(soc, tx_desc, tx_exc_metadata);
  2092. } else
  2093. htt_tcl_metadata = vdev->htt_tcl_metadata;
  2094. if (msdu_info->exception_fw)
  2095. DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  2096. dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
  2097. !pdev->enhanced_stats_en);
  2098. dp_tx_update_mesh_flags(soc, vdev, tx_desc);
  2099. if (qdf_unlikely(msdu_info->frm_type == dp_tx_frm_rmnet))
  2100. paddr = dp_tx_rmnet_nbuf_map(msdu_info, tx_desc);
  2101. else
  2102. paddr = dp_tx_nbuf_map(vdev, tx_desc, nbuf);
  2103. if (!paddr) {
  2104. /* Handle failure */
  2105. dp_err("qdf_nbuf_map failed");
  2106. DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
  2107. drop_code = TX_DMA_MAP_ERR;
  2108. goto release_desc;
  2109. }
  2110. tx_desc->dma_addr = paddr;
  2111. dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
  2112. tx_desc->id, DP_TX_DESC_MAP);
  2113. dp_tx_update_mcast_param(peer_id, &htt_tcl_metadata, vdev, msdu_info);
  2114. /* Enqueue the Tx MSDU descriptor to HW for transmit */
  2115. status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
  2116. htt_tcl_metadata,
  2117. tx_exc_metadata, msdu_info);
  2118. if (status != QDF_STATUS_SUCCESS) {
  2119. dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
  2120. tx_desc, tx_q->ring_id);
  2121. dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
  2122. tx_desc->id, DP_TX_DESC_UNMAP);
  2123. dp_tx_nbuf_unmap(soc, tx_desc);
  2124. drop_code = TX_HW_ENQUEUE;
  2125. goto release_desc;
  2126. }
  2127. tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
  2128. return NULL;
  2129. release_desc:
  2130. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2131. fail_return:
  2132. dp_tx_get_tid(vdev, nbuf, msdu_info);
  2133. tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
  2134. tid_stats = &pdev->stats.tid_stats.
  2135. tid_tx_stats[tx_q->ring_id][tid];
  2136. tid_stats->swdrop_cnt[drop_code]++;
  2137. return nbuf;
  2138. }
  2139. /**
  2140. * dp_tdls_tx_comp_free_buff() - Free non std buffer when TDLS flag is set
  2141. * @soc: Soc handle
  2142. * @desc: software Tx descriptor to be processed
  2143. *
  2144. * Return: 0 if Success
  2145. */
  2146. #ifdef FEATURE_WLAN_TDLS
  2147. static inline int
  2148. dp_tdls_tx_comp_free_buff(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  2149. {
  2150. /* If it is TDLS mgmt, don't unmap or free the frame */
  2151. if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) {
  2152. dp_non_std_htt_tx_comp_free_buff(soc, desc);
  2153. return 0;
  2154. }
  2155. return 1;
  2156. }
  2157. #else
  2158. static inline int
  2159. dp_tdls_tx_comp_free_buff(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  2160. {
  2161. return 1;
  2162. }
  2163. #endif
  2164. qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
  2165. bool delayed_free)
  2166. {
  2167. qdf_nbuf_t nbuf = desc->nbuf;
  2168. enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
  2169. /* nbuf already freed in vdev detach path */
  2170. if (!nbuf)
  2171. return NULL;
  2172. if (!dp_tdls_tx_comp_free_buff(soc, desc))
  2173. return NULL;
  2174. /* 0 : MSDU buffer, 1 : MLE */
  2175. if (desc->msdu_ext_desc) {
  2176. /* TSO free */
  2177. if (hal_tx_ext_desc_get_tso_enable(
  2178. desc->msdu_ext_desc->vaddr)) {
  2179. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
  2180. desc->id, DP_TX_COMP_MSDU_EXT);
  2181. dp_tx_tso_seg_history_add(soc,
  2182. desc->msdu_ext_desc->tso_desc,
  2183. desc->nbuf, desc->id, type);
  2184. /* unmap eash TSO seg before free the nbuf */
  2185. dp_tx_tso_unmap_segment(soc,
  2186. desc->msdu_ext_desc->tso_desc,
  2187. desc->msdu_ext_desc->
  2188. tso_num_desc);
  2189. goto nbuf_free;
  2190. }
  2191. if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) {
  2192. void *msdu_ext_desc = desc->msdu_ext_desc->vaddr;
  2193. qdf_dma_addr_t iova;
  2194. uint32_t frag_len;
  2195. uint32_t i;
  2196. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  2197. QDF_DMA_TO_DEVICE,
  2198. qdf_nbuf_headlen(nbuf));
  2199. for (i = 1; i < DP_TX_MAX_NUM_FRAGS; i++) {
  2200. hal_tx_ext_desc_get_frag_info(msdu_ext_desc, i,
  2201. &iova,
  2202. &frag_len);
  2203. if (!iova || !frag_len)
  2204. break;
  2205. qdf_mem_unmap_page(soc->osdev, iova, frag_len,
  2206. QDF_DMA_TO_DEVICE);
  2207. }
  2208. goto nbuf_free;
  2209. }
  2210. }
  2211. /* If it's ME frame, dont unmap the cloned nbuf's */
  2212. if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
  2213. goto nbuf_free;
  2214. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
  2215. dp_tx_unmap(soc, desc);
  2216. if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
  2217. return dp_mesh_tx_comp_free_buff(soc, desc, delayed_free);
  2218. if (dp_tx_traffic_end_indication_enq_ind_pkt(soc, desc, nbuf))
  2219. return NULL;
  2220. nbuf_free:
  2221. if (delayed_free)
  2222. return nbuf;
  2223. qdf_nbuf_free(nbuf);
  2224. return NULL;
  2225. }
  2226. /**
  2227. * dp_tx_sg_unmap_buf() - Unmap scatter gather fragments
  2228. * @soc: DP soc handle
  2229. * @nbuf: skb
  2230. * @msdu_info: MSDU info
  2231. *
  2232. * Return: None
  2233. */
  2234. static inline void
  2235. dp_tx_sg_unmap_buf(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2236. struct dp_tx_msdu_info_s *msdu_info)
  2237. {
  2238. uint32_t cur_idx;
  2239. struct dp_tx_seg_info_s *seg = msdu_info->u.sg_info.curr_seg;
  2240. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE,
  2241. qdf_nbuf_headlen(nbuf));
  2242. for (cur_idx = 1; cur_idx < seg->frag_cnt; cur_idx++)
  2243. qdf_mem_unmap_page(soc->osdev, (qdf_dma_addr_t)
  2244. (seg->frags[cur_idx].paddr_lo | ((uint64_t)
  2245. seg->frags[cur_idx].paddr_hi) << 32),
  2246. seg->frags[cur_idx].len,
  2247. QDF_DMA_TO_DEVICE);
  2248. }
  2249. #if QDF_LOCK_STATS
  2250. noinline
  2251. #else
  2252. #endif
  2253. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2254. struct dp_tx_msdu_info_s *msdu_info)
  2255. {
  2256. uint32_t i;
  2257. struct dp_pdev *pdev = vdev->pdev;
  2258. struct dp_soc *soc = pdev->soc;
  2259. struct dp_tx_desc_s *tx_desc;
  2260. bool is_cce_classified = false;
  2261. QDF_STATUS status;
  2262. uint16_t htt_tcl_metadata = 0;
  2263. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  2264. struct cdp_tid_tx_stats *tid_stats = NULL;
  2265. uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
  2266. if (msdu_info->frm_type == dp_tx_frm_me)
  2267. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  2268. i = 0;
  2269. /* Print statement to track i and num_seg */
  2270. /*
  2271. * For each segment (maps to 1 MSDU) , prepare software and hardware
  2272. * descriptors using information in msdu_info
  2273. */
  2274. while (i < msdu_info->num_seg) {
  2275. /*
  2276. * Setup Tx descriptor for an MSDU, and MSDU extension
  2277. * descriptor
  2278. */
  2279. tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
  2280. tx_q->desc_pool_id);
  2281. if (!tx_desc) {
  2282. if (msdu_info->frm_type == dp_tx_frm_me) {
  2283. prep_desc_fail++;
  2284. dp_tx_me_free_buf(pdev,
  2285. (void *)(msdu_info->u.sg_info
  2286. .curr_seg->frags[0].vaddr));
  2287. if (prep_desc_fail == msdu_info->num_seg) {
  2288. /*
  2289. * Unmap is needed only if descriptor
  2290. * preparation failed for all segments.
  2291. */
  2292. qdf_nbuf_unmap(soc->osdev,
  2293. msdu_info->u.sg_info.
  2294. curr_seg->nbuf,
  2295. QDF_DMA_TO_DEVICE);
  2296. }
  2297. /*
  2298. * Free the nbuf for the current segment
  2299. * and make it point to the next in the list.
  2300. * For me, there are as many segments as there
  2301. * are no of clients.
  2302. */
  2303. qdf_nbuf_free(msdu_info->u.sg_info
  2304. .curr_seg->nbuf);
  2305. if (msdu_info->u.sg_info.curr_seg->next) {
  2306. msdu_info->u.sg_info.curr_seg =
  2307. msdu_info->u.sg_info
  2308. .curr_seg->next;
  2309. nbuf = msdu_info->u.sg_info
  2310. .curr_seg->nbuf;
  2311. }
  2312. i++;
  2313. continue;
  2314. }
  2315. if (msdu_info->frm_type == dp_tx_frm_tso) {
  2316. dp_tx_tso_seg_history_add(
  2317. soc,
  2318. msdu_info->u.tso_info.curr_seg,
  2319. nbuf, 0, DP_TX_DESC_UNMAP);
  2320. dp_tx_tso_unmap_segment(soc,
  2321. msdu_info->u.tso_info.
  2322. curr_seg,
  2323. msdu_info->u.tso_info.
  2324. tso_num_seg_list);
  2325. if (msdu_info->u.tso_info.curr_seg->next) {
  2326. msdu_info->u.tso_info.curr_seg =
  2327. msdu_info->u.tso_info.curr_seg->next;
  2328. i++;
  2329. continue;
  2330. }
  2331. }
  2332. if (msdu_info->frm_type == dp_tx_frm_sg)
  2333. dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
  2334. goto done;
  2335. }
  2336. if (msdu_info->frm_type == dp_tx_frm_me) {
  2337. tx_desc->msdu_ext_desc->me_buffer =
  2338. (struct dp_tx_me_buf_t *)msdu_info->
  2339. u.sg_info.curr_seg->frags[0].vaddr;
  2340. tx_desc->flags |= DP_TX_DESC_FLAG_ME;
  2341. }
  2342. if (is_cce_classified)
  2343. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  2344. htt_tcl_metadata = vdev->htt_tcl_metadata;
  2345. if (msdu_info->exception_fw) {
  2346. DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  2347. }
  2348. dp_tx_is_hp_update_required(i, msdu_info);
  2349. /*
  2350. * For frames with multiple segments (TSO, ME), jump to next
  2351. * segment.
  2352. */
  2353. if (msdu_info->frm_type == dp_tx_frm_tso) {
  2354. if (msdu_info->u.tso_info.curr_seg->next) {
  2355. msdu_info->u.tso_info.curr_seg =
  2356. msdu_info->u.tso_info.curr_seg->next;
  2357. /*
  2358. * If this is a jumbo nbuf, then increment the
  2359. * number of nbuf users for each additional
  2360. * segment of the msdu. This will ensure that
  2361. * the skb is freed only after receiving tx
  2362. * completion for all segments of an nbuf
  2363. */
  2364. qdf_nbuf_inc_users(nbuf);
  2365. /* Check with MCL if this is needed */
  2366. /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
  2367. */
  2368. }
  2369. }
  2370. dp_tx_update_mcast_param(DP_INVALID_PEER,
  2371. &htt_tcl_metadata,
  2372. vdev,
  2373. msdu_info);
  2374. /*
  2375. * Enqueue the Tx MSDU descriptor to HW for transmit
  2376. */
  2377. status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
  2378. htt_tcl_metadata,
  2379. NULL, msdu_info);
  2380. dp_tx_check_and_flush_hp(soc, status, msdu_info);
  2381. if (status != QDF_STATUS_SUCCESS) {
  2382. dp_info_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
  2383. tx_desc, tx_q->ring_id);
  2384. dp_tx_get_tid(vdev, nbuf, msdu_info);
  2385. tid_stats = &pdev->stats.tid_stats.
  2386. tid_tx_stats[tx_q->ring_id][msdu_info->tid];
  2387. tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
  2388. if (msdu_info->frm_type == dp_tx_frm_me) {
  2389. hw_enq_fail++;
  2390. if (hw_enq_fail == msdu_info->num_seg) {
  2391. /*
  2392. * Unmap is needed only if enqueue
  2393. * failed for all segments.
  2394. */
  2395. qdf_nbuf_unmap(soc->osdev,
  2396. msdu_info->u.sg_info.
  2397. curr_seg->nbuf,
  2398. QDF_DMA_TO_DEVICE);
  2399. }
  2400. /*
  2401. * Free the nbuf for the current segment
  2402. * and make it point to the next in the list.
  2403. * For me, there are as many segments as there
  2404. * are no of clients.
  2405. */
  2406. qdf_nbuf_free(msdu_info->u.sg_info
  2407. .curr_seg->nbuf);
  2408. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2409. if (msdu_info->u.sg_info.curr_seg->next) {
  2410. msdu_info->u.sg_info.curr_seg =
  2411. msdu_info->u.sg_info
  2412. .curr_seg->next;
  2413. nbuf = msdu_info->u.sg_info
  2414. .curr_seg->nbuf;
  2415. } else
  2416. break;
  2417. i++;
  2418. continue;
  2419. }
  2420. /*
  2421. * For TSO frames, the nbuf users increment done for
  2422. * the current segment has to be reverted, since the
  2423. * hw enqueue for this segment failed
  2424. */
  2425. if (msdu_info->frm_type == dp_tx_frm_tso &&
  2426. msdu_info->u.tso_info.curr_seg) {
  2427. /*
  2428. * unmap and free current,
  2429. * retransmit remaining segments
  2430. */
  2431. dp_tx_comp_free_buf(soc, tx_desc, false);
  2432. i++;
  2433. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2434. continue;
  2435. }
  2436. if (msdu_info->frm_type == dp_tx_frm_sg)
  2437. dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
  2438. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2439. goto done;
  2440. }
  2441. /*
  2442. * TODO
  2443. * if tso_info structure can be modified to have curr_seg
  2444. * as first element, following 2 blocks of code (for TSO and SG)
  2445. * can be combined into 1
  2446. */
  2447. /*
  2448. * For Multicast-Unicast converted packets,
  2449. * each converted frame (for a client) is represented as
  2450. * 1 segment
  2451. */
  2452. if ((msdu_info->frm_type == dp_tx_frm_sg) ||
  2453. (msdu_info->frm_type == dp_tx_frm_me)) {
  2454. if (msdu_info->u.sg_info.curr_seg->next) {
  2455. msdu_info->u.sg_info.curr_seg =
  2456. msdu_info->u.sg_info.curr_seg->next;
  2457. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  2458. } else
  2459. break;
  2460. }
  2461. i++;
  2462. }
  2463. nbuf = NULL;
  2464. done:
  2465. return nbuf;
  2466. }
  2467. /**
  2468. * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
  2469. * for SG frames
  2470. * @vdev: DP vdev handle
  2471. * @nbuf: skb
  2472. * @seg_info: Pointer to Segment info Descriptor to be prepared
  2473. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2474. *
  2475. * Return: NULL on success,
  2476. * nbuf when it fails to send
  2477. */
  2478. static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2479. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  2480. {
  2481. uint32_t cur_frag, nr_frags, i;
  2482. qdf_dma_addr_t paddr;
  2483. struct dp_tx_sg_info_s *sg_info;
  2484. sg_info = &msdu_info->u.sg_info;
  2485. nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  2486. if (QDF_STATUS_SUCCESS !=
  2487. qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
  2488. QDF_DMA_TO_DEVICE,
  2489. qdf_nbuf_headlen(nbuf))) {
  2490. dp_tx_err("dma map error");
  2491. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  2492. qdf_nbuf_free(nbuf);
  2493. return NULL;
  2494. }
  2495. paddr = qdf_nbuf_mapped_paddr_get(nbuf);
  2496. seg_info->frags[0].paddr_lo = paddr;
  2497. seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
  2498. seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
  2499. seg_info->frags[0].vaddr = (void *) nbuf;
  2500. for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
  2501. if (QDF_STATUS_SUCCESS != qdf_nbuf_frag_map(vdev->osdev,
  2502. nbuf, 0,
  2503. QDF_DMA_TO_DEVICE,
  2504. cur_frag)) {
  2505. dp_tx_err("frag dma map error");
  2506. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  2507. goto map_err;
  2508. }
  2509. paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
  2510. seg_info->frags[cur_frag + 1].paddr_lo = paddr;
  2511. seg_info->frags[cur_frag + 1].paddr_hi =
  2512. ((uint64_t) paddr) >> 32;
  2513. seg_info->frags[cur_frag + 1].len =
  2514. qdf_nbuf_get_frag_size(nbuf, cur_frag);
  2515. }
  2516. seg_info->frag_cnt = (cur_frag + 1);
  2517. seg_info->total_len = qdf_nbuf_len(nbuf);
  2518. seg_info->next = NULL;
  2519. sg_info->curr_seg = seg_info;
  2520. msdu_info->frm_type = dp_tx_frm_sg;
  2521. msdu_info->num_seg = 1;
  2522. return nbuf;
  2523. map_err:
  2524. /* restore paddr into nbuf before calling unmap */
  2525. qdf_nbuf_mapped_paddr_set(nbuf,
  2526. (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
  2527. ((uint64_t)
  2528. seg_info->frags[0].paddr_hi) << 32));
  2529. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  2530. QDF_DMA_TO_DEVICE,
  2531. seg_info->frags[0].len);
  2532. for (i = 1; i <= cur_frag; i++) {
  2533. qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
  2534. (seg_info->frags[i].paddr_lo | ((uint64_t)
  2535. seg_info->frags[i].paddr_hi) << 32),
  2536. seg_info->frags[i].len,
  2537. QDF_DMA_TO_DEVICE);
  2538. }
  2539. qdf_nbuf_free(nbuf);
  2540. return NULL;
  2541. }
  2542. /**
  2543. * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
  2544. * @vdev: DP vdev handle
  2545. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2546. * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
  2547. *
  2548. * Return: NULL on failure,
  2549. * nbuf when extracted successfully
  2550. */
  2551. static
  2552. void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
  2553. struct dp_tx_msdu_info_s *msdu_info,
  2554. uint16_t ppdu_cookie)
  2555. {
  2556. struct htt_tx_msdu_desc_ext2_t *meta_data =
  2557. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  2558. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  2559. HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
  2560. (msdu_info->meta_data[5], 1);
  2561. HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
  2562. (msdu_info->meta_data[5], 1);
  2563. HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
  2564. (msdu_info->meta_data[6], ppdu_cookie);
  2565. msdu_info->exception_fw = 1;
  2566. msdu_info->is_tx_sniffer = 1;
  2567. }
  2568. #ifdef MESH_MODE_SUPPORT
  2569. /**
  2570. * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
  2571. * and prepare msdu_info for mesh frames.
  2572. * @vdev: DP vdev handle
  2573. * @nbuf: skb
  2574. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2575. *
  2576. * Return: NULL on failure,
  2577. * nbuf when extracted successfully
  2578. */
  2579. static
  2580. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2581. struct dp_tx_msdu_info_s *msdu_info)
  2582. {
  2583. struct meta_hdr_s *mhdr;
  2584. struct htt_tx_msdu_desc_ext2_t *meta_data =
  2585. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  2586. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  2587. if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
  2588. msdu_info->exception_fw = 0;
  2589. goto remove_meta_hdr;
  2590. }
  2591. msdu_info->exception_fw = 1;
  2592. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  2593. meta_data->host_tx_desc_pool = 1;
  2594. meta_data->update_peer_cache = 1;
  2595. meta_data->learning_frame = 1;
  2596. if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
  2597. meta_data->power = mhdr->power;
  2598. meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
  2599. meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
  2600. meta_data->pream_type = mhdr->rate_info[0].preamble_type;
  2601. meta_data->retry_limit = mhdr->rate_info[0].max_tries;
  2602. meta_data->dyn_bw = 1;
  2603. meta_data->valid_pwr = 1;
  2604. meta_data->valid_mcs_mask = 1;
  2605. meta_data->valid_nss_mask = 1;
  2606. meta_data->valid_preamble_type = 1;
  2607. meta_data->valid_retries = 1;
  2608. meta_data->valid_bw_info = 1;
  2609. }
  2610. if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
  2611. meta_data->encrypt_type = 0;
  2612. meta_data->valid_encrypt_type = 1;
  2613. meta_data->learning_frame = 0;
  2614. }
  2615. meta_data->valid_key_flags = 1;
  2616. meta_data->key_flags = (mhdr->keyix & 0x3);
  2617. remove_meta_hdr:
  2618. if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
  2619. dp_tx_err("qdf_nbuf_pull_head failed");
  2620. qdf_nbuf_free(nbuf);
  2621. return NULL;
  2622. }
  2623. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  2624. dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
  2625. " tid %d to_fw %d",
  2626. msdu_info->meta_data[0],
  2627. msdu_info->meta_data[1],
  2628. msdu_info->meta_data[2],
  2629. msdu_info->meta_data[3],
  2630. msdu_info->meta_data[4],
  2631. msdu_info->meta_data[5],
  2632. msdu_info->tid, msdu_info->exception_fw);
  2633. return nbuf;
  2634. }
  2635. #else
  2636. static
  2637. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2638. struct dp_tx_msdu_info_s *msdu_info)
  2639. {
  2640. return nbuf;
  2641. }
  2642. #endif
  2643. /**
  2644. * dp_check_exc_metadata() - Checks if parameters are valid
  2645. * @tx_exc: holds all exception path parameters
  2646. *
  2647. * Return: true when all the parameters are valid else false
  2648. *
  2649. */
  2650. static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
  2651. {
  2652. bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
  2653. HTT_INVALID_TID);
  2654. bool invalid_encap_type =
  2655. (tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
  2656. tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
  2657. bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
  2658. tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
  2659. bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
  2660. tx_exc->ppdu_cookie == 0);
  2661. if (tx_exc->is_intrabss_fwd)
  2662. return true;
  2663. if (invalid_tid || invalid_encap_type || invalid_sec_type ||
  2664. invalid_cookie) {
  2665. return false;
  2666. }
  2667. return true;
  2668. }
  2669. #ifdef ATH_SUPPORT_IQUE
  2670. bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2671. {
  2672. qdf_ether_header_t *eh;
  2673. /* Mcast to Ucast Conversion*/
  2674. if (qdf_likely(!vdev->mcast_enhancement_en))
  2675. return true;
  2676. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2677. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
  2678. !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
  2679. dp_verbose_debug("Mcast frm for ME %pK", vdev);
  2680. qdf_nbuf_set_next(nbuf, NULL);
  2681. DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
  2682. qdf_nbuf_len(nbuf));
  2683. if (dp_tx_prepare_send_me(vdev, nbuf) ==
  2684. QDF_STATUS_SUCCESS) {
  2685. return false;
  2686. }
  2687. if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
  2688. if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
  2689. QDF_STATUS_SUCCESS) {
  2690. return false;
  2691. }
  2692. }
  2693. }
  2694. return true;
  2695. }
  2696. #else
  2697. bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2698. {
  2699. return true;
  2700. }
  2701. #endif
  2702. #ifdef QCA_SUPPORT_WDS_EXTENDED
  2703. /**
  2704. * dp_tx_mcast_drop() - Drop mcast frame if drop_tx_mcast is set in WDS_EXT
  2705. * @vdev: vdev handle
  2706. * @nbuf: skb
  2707. *
  2708. * Return: true if frame is dropped, false otherwise
  2709. */
  2710. static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2711. {
  2712. /* Drop tx mcast and WDS Extended feature check */
  2713. if (qdf_unlikely((vdev->drop_tx_mcast) && (vdev->wds_ext_enabled))) {
  2714. qdf_ether_header_t *eh = (qdf_ether_header_t *)
  2715. qdf_nbuf_data(nbuf);
  2716. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  2717. DP_STATS_INC(vdev, tx_i.dropped.tx_mcast_drop, 1);
  2718. return true;
  2719. }
  2720. }
  2721. return false;
  2722. }
  2723. #else
  2724. static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2725. {
  2726. return false;
  2727. }
  2728. #endif
  2729. /**
  2730. * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
  2731. * @nbuf: qdf_nbuf_t
  2732. * @vdev: struct dp_vdev *
  2733. *
  2734. * Allow packet for processing only if it is for peer client which is
  2735. * connected with same vap. Drop packet if client is connected to
  2736. * different vap.
  2737. *
  2738. * Return: QDF_STATUS
  2739. */
  2740. static inline QDF_STATUS
  2741. dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
  2742. {
  2743. struct dp_ast_entry *dst_ast_entry = NULL;
  2744. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2745. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
  2746. DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
  2747. return QDF_STATUS_SUCCESS;
  2748. qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
  2749. dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
  2750. eh->ether_dhost,
  2751. vdev->vdev_id);
  2752. /* If there is no ast entry, return failure */
  2753. if (qdf_unlikely(!dst_ast_entry)) {
  2754. qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
  2755. return QDF_STATUS_E_FAILURE;
  2756. }
  2757. qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
  2758. return QDF_STATUS_SUCCESS;
  2759. }
  2760. /**
  2761. * dp_tx_nawds_handler() - NAWDS handler
  2762. *
  2763. * @soc: DP soc handle
  2764. * @vdev: DP vdev handle
  2765. * @msdu_info: msdu_info required to create HTT metadata
  2766. * @nbuf: skb
  2767. * @sa_peer_id:
  2768. *
  2769. * This API transfers the multicast frames with the peer id
  2770. * on NAWDS enabled peer.
  2771. *
  2772. * Return: none
  2773. */
  2774. void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
  2775. struct dp_tx_msdu_info_s *msdu_info,
  2776. qdf_nbuf_t nbuf, uint16_t sa_peer_id)
  2777. {
  2778. struct dp_peer *peer = NULL;
  2779. qdf_nbuf_t nbuf_clone = NULL;
  2780. uint16_t peer_id = DP_INVALID_PEER;
  2781. struct dp_txrx_peer *txrx_peer;
  2782. uint8_t link_id = 0;
  2783. /* This check avoids pkt forwarding which is entered
  2784. * in the ast table but still doesn't have valid peerid.
  2785. */
  2786. if (sa_peer_id == HTT_INVALID_PEER)
  2787. return;
  2788. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2789. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2790. txrx_peer = dp_get_txrx_peer(peer);
  2791. if (!txrx_peer)
  2792. continue;
  2793. if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) {
  2794. peer_id = peer->peer_id;
  2795. if (!dp_peer_is_primary_link_peer(peer))
  2796. continue;
  2797. /* In the case of wds ext peer mcast traffic will be
  2798. * sent as part of VLAN interface
  2799. */
  2800. if (dp_peer_is_wds_ext_peer(txrx_peer))
  2801. continue;
  2802. /* Multicast packets needs to be
  2803. * dropped in case of intra bss forwarding
  2804. */
  2805. if (sa_peer_id == txrx_peer->peer_id) {
  2806. dp_tx_debug("multicast packet");
  2807. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2808. tx.nawds_mcast_drop,
  2809. 1, link_id);
  2810. continue;
  2811. }
  2812. nbuf_clone = qdf_nbuf_clone(nbuf);
  2813. if (!nbuf_clone) {
  2814. QDF_TRACE(QDF_MODULE_ID_DP,
  2815. QDF_TRACE_LEVEL_ERROR,
  2816. FL("nbuf clone failed"));
  2817. break;
  2818. }
  2819. nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
  2820. msdu_info, peer_id,
  2821. NULL);
  2822. if (nbuf_clone) {
  2823. dp_tx_debug("pkt send failed");
  2824. qdf_nbuf_free(nbuf_clone);
  2825. } else {
  2826. if (peer_id != DP_INVALID_PEER)
  2827. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  2828. tx.nawds_mcast,
  2829. 1, qdf_nbuf_len(nbuf), link_id);
  2830. }
  2831. }
  2832. }
  2833. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2834. }
  2835. #ifdef WLAN_MCAST_MLO
  2836. static inline bool
  2837. dp_tx_check_mesh_vdev(struct dp_vdev *vdev,
  2838. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2839. {
  2840. if (!tx_exc_metadata->is_mlo_mcast && qdf_unlikely(vdev->mesh_vdev))
  2841. return true;
  2842. return false;
  2843. }
  2844. #else
  2845. static inline bool
  2846. dp_tx_check_mesh_vdev(struct dp_vdev *vdev,
  2847. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2848. {
  2849. if (qdf_unlikely(vdev->mesh_vdev))
  2850. return true;
  2851. return false;
  2852. }
  2853. #endif
  2854. qdf_nbuf_t
  2855. dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2856. qdf_nbuf_t nbuf,
  2857. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2858. {
  2859. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2860. struct dp_tx_msdu_info_s msdu_info;
  2861. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2862. DP_MOD_ID_TX_EXCEPTION);
  2863. if (qdf_unlikely(!vdev))
  2864. goto fail;
  2865. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  2866. if (!tx_exc_metadata)
  2867. goto fail;
  2868. msdu_info.tid = tx_exc_metadata->tid;
  2869. dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
  2870. QDF_MAC_ADDR_REF(nbuf->data));
  2871. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  2872. if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
  2873. dp_tx_err("Invalid parameters in exception path");
  2874. goto fail;
  2875. }
  2876. /* for peer based metadata check if peer is valid */
  2877. if (tx_exc_metadata->peer_id != CDP_INVALID_PEER) {
  2878. struct dp_peer *peer = NULL;
  2879. peer = dp_peer_get_ref_by_id(vdev->pdev->soc,
  2880. tx_exc_metadata->peer_id,
  2881. DP_MOD_ID_TX_EXCEPTION);
  2882. if (qdf_unlikely(!peer)) {
  2883. DP_STATS_INC(vdev,
  2884. tx_i.dropped.invalid_peer_id_in_exc_path,
  2885. 1);
  2886. goto fail;
  2887. }
  2888. dp_peer_unref_delete(peer, DP_MOD_ID_TX_EXCEPTION);
  2889. }
  2890. /* Basic sanity checks for unsupported packets */
  2891. /* MESH mode */
  2892. if (dp_tx_check_mesh_vdev(vdev, tx_exc_metadata)) {
  2893. dp_tx_err("Mesh mode is not supported in exception path");
  2894. goto fail;
  2895. }
  2896. /*
  2897. * Classify the frame and call corresponding
  2898. * "prepare" function which extracts the segment (TSO)
  2899. * and fragmentation information (for TSO , SG, ME, or Raw)
  2900. * into MSDU_INFO structure which is later used to fill
  2901. * SW and HW descriptors.
  2902. */
  2903. if (qdf_nbuf_is_tso(nbuf)) {
  2904. dp_verbose_debug("TSO frame %pK", vdev);
  2905. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  2906. qdf_nbuf_len(nbuf));
  2907. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  2908. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  2909. qdf_nbuf_len(nbuf));
  2910. goto fail;
  2911. }
  2912. DP_STATS_INC(vdev, tx_i.rcvd.num, msdu_info.num_seg - 1);
  2913. goto send_multiple;
  2914. }
  2915. /* SG */
  2916. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  2917. struct dp_tx_seg_info_s seg_info = {0};
  2918. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  2919. if (!nbuf)
  2920. goto fail;
  2921. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  2922. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  2923. qdf_nbuf_len(nbuf));
  2924. goto send_multiple;
  2925. }
  2926. if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
  2927. DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
  2928. qdf_nbuf_len(nbuf));
  2929. dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
  2930. tx_exc_metadata->ppdu_cookie);
  2931. }
  2932. /*
  2933. * Get HW Queue to use for this frame.
  2934. * TCL supports upto 4 DMA rings, out of which 3 rings are
  2935. * dedicated for data and 1 for command.
  2936. * "queue_id" maps to one hardware ring.
  2937. * With each ring, we also associate a unique Tx descriptor pool
  2938. * to minimize lock contention for these resources.
  2939. */
  2940. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2941. /*
  2942. * if the packet is mcast packet send through mlo_macst handler
  2943. * for all prnt_vdevs
  2944. */
  2945. if (soc->arch_ops.dp_tx_mlo_mcast_send) {
  2946. nbuf = soc->arch_ops.dp_tx_mlo_mcast_send(soc, vdev,
  2947. nbuf,
  2948. tx_exc_metadata);
  2949. if (!nbuf)
  2950. goto fail;
  2951. }
  2952. if (qdf_likely(tx_exc_metadata->is_intrabss_fwd)) {
  2953. if (qdf_unlikely(vdev->nawds_enabled)) {
  2954. /*
  2955. * This is a multicast packet
  2956. */
  2957. dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
  2958. tx_exc_metadata->peer_id);
  2959. DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
  2960. 1, qdf_nbuf_len(nbuf));
  2961. }
  2962. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
  2963. DP_INVALID_PEER, NULL);
  2964. } else {
  2965. /*
  2966. * Check exception descriptors
  2967. */
  2968. if (dp_tx_exception_limit_check(vdev))
  2969. goto fail;
  2970. /* Single linear frame */
  2971. /*
  2972. * If nbuf is a simple linear frame, use send_single function to
  2973. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  2974. * SRNG. There is no need to setup a MSDU extension descriptor.
  2975. */
  2976. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
  2977. tx_exc_metadata->peer_id,
  2978. tx_exc_metadata);
  2979. }
  2980. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2981. return nbuf;
  2982. send_multiple:
  2983. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2984. fail:
  2985. if (vdev)
  2986. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2987. dp_verbose_debug("pkt send failed");
  2988. return nbuf;
  2989. }
  2990. qdf_nbuf_t
  2991. dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
  2992. uint8_t vdev_id, qdf_nbuf_t nbuf,
  2993. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2994. {
  2995. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2996. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2997. DP_MOD_ID_TX_EXCEPTION);
  2998. if (qdf_unlikely(!vdev))
  2999. goto fail;
  3000. if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
  3001. == QDF_STATUS_E_FAILURE)) {
  3002. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  3003. goto fail;
  3004. }
  3005. /* Unref count as it will again be taken inside dp_tx_exception */
  3006. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  3007. return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
  3008. fail:
  3009. if (vdev)
  3010. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  3011. dp_verbose_debug("pkt send failed");
  3012. return nbuf;
  3013. }
  3014. #ifdef MESH_MODE_SUPPORT
  3015. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3016. qdf_nbuf_t nbuf)
  3017. {
  3018. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3019. struct meta_hdr_s *mhdr;
  3020. qdf_nbuf_t nbuf_mesh = NULL;
  3021. qdf_nbuf_t nbuf_clone = NULL;
  3022. struct dp_vdev *vdev;
  3023. uint8_t no_enc_frame = 0;
  3024. nbuf_mesh = qdf_nbuf_unshare(nbuf);
  3025. if (!nbuf_mesh) {
  3026. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3027. "qdf_nbuf_unshare failed");
  3028. return nbuf;
  3029. }
  3030. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
  3031. if (!vdev) {
  3032. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3033. "vdev is NULL for vdev_id %d", vdev_id);
  3034. return nbuf;
  3035. }
  3036. nbuf = nbuf_mesh;
  3037. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  3038. if ((vdev->sec_type != cdp_sec_type_none) &&
  3039. (mhdr->flags & METAHDR_FLAG_NOENCRYPT))
  3040. no_enc_frame = 1;
  3041. if (mhdr->flags & METAHDR_FLAG_NOQOS)
  3042. qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
  3043. if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
  3044. !no_enc_frame) {
  3045. nbuf_clone = qdf_nbuf_clone(nbuf);
  3046. if (!nbuf_clone) {
  3047. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3048. "qdf_nbuf_clone failed");
  3049. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  3050. return nbuf;
  3051. }
  3052. qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
  3053. }
  3054. if (nbuf_clone) {
  3055. if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
  3056. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  3057. } else {
  3058. qdf_nbuf_free(nbuf_clone);
  3059. }
  3060. }
  3061. if (no_enc_frame)
  3062. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
  3063. else
  3064. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
  3065. nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
  3066. if ((!nbuf) && no_enc_frame) {
  3067. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  3068. }
  3069. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  3070. return nbuf;
  3071. }
  3072. #else
  3073. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3074. qdf_nbuf_t nbuf)
  3075. {
  3076. return dp_tx_send(soc_hdl, vdev_id, nbuf);
  3077. }
  3078. #endif
  3079. #ifdef QCA_DP_TX_NBUF_AND_NBUF_DATA_PREFETCH
  3080. static inline
  3081. void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
  3082. {
  3083. if (nbuf) {
  3084. qdf_prefetch(&nbuf->len);
  3085. qdf_prefetch(&nbuf->data);
  3086. }
  3087. }
  3088. #else
  3089. static inline
  3090. void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
  3091. {
  3092. }
  3093. #endif
  3094. #ifdef DP_UMAC_HW_RESET_SUPPORT
  3095. qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3096. qdf_nbuf_t nbuf)
  3097. {
  3098. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3099. struct dp_vdev *vdev = NULL;
  3100. vdev = soc->vdev_id_map[vdev_id];
  3101. if (qdf_unlikely(!vdev))
  3102. return nbuf;
  3103. DP_STATS_INC(vdev, tx_i.dropped.drop_ingress, 1);
  3104. return nbuf;
  3105. }
  3106. qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3107. qdf_nbuf_t nbuf,
  3108. struct cdp_tx_exception_metadata *tx_exc_metadata)
  3109. {
  3110. return dp_tx_drop(soc_hdl, vdev_id, nbuf);
  3111. }
  3112. #endif
  3113. #ifdef FEATURE_DIRECT_LINK
  3114. /**
  3115. * dp_vdev_tx_mark_to_fw() - Mark to_fw bit for the tx packet
  3116. * @nbuf: skb
  3117. * @vdev: DP vdev handle
  3118. *
  3119. * Return: None
  3120. */
  3121. static inline void dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
  3122. {
  3123. if (qdf_unlikely(vdev->to_fw))
  3124. QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf) = 1;
  3125. }
  3126. #else
  3127. static inline void dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
  3128. {
  3129. }
  3130. #endif
  3131. qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3132. qdf_nbuf_t nbuf)
  3133. {
  3134. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3135. uint16_t peer_id = HTT_INVALID_PEER;
  3136. /*
  3137. * doing a memzero is causing additional function call overhead
  3138. * so doing static stack clearing
  3139. */
  3140. struct dp_tx_msdu_info_s msdu_info = {0};
  3141. struct dp_vdev *vdev = NULL;
  3142. qdf_nbuf_t end_nbuf = NULL;
  3143. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  3144. return nbuf;
  3145. /*
  3146. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  3147. * this in per packet path.
  3148. *
  3149. * As in this path vdev memory is already protected with netdev
  3150. * tx lock
  3151. */
  3152. vdev = soc->vdev_id_map[vdev_id];
  3153. if (qdf_unlikely(!vdev))
  3154. return nbuf;
  3155. dp_vdev_tx_mark_to_fw(nbuf, vdev);
  3156. /*
  3157. * Set Default Host TID value to invalid TID
  3158. * (TID override disabled)
  3159. */
  3160. msdu_info.tid = HTT_TX_EXT_TID_INVALID;
  3161. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  3162. if (qdf_unlikely(vdev->mesh_vdev)) {
  3163. qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
  3164. &msdu_info);
  3165. if (!nbuf_mesh) {
  3166. dp_verbose_debug("Extracting mesh metadata failed");
  3167. return nbuf;
  3168. }
  3169. nbuf = nbuf_mesh;
  3170. }
  3171. /*
  3172. * Get HW Queue to use for this frame.
  3173. * TCL supports upto 4 DMA rings, out of which 3 rings are
  3174. * dedicated for data and 1 for command.
  3175. * "queue_id" maps to one hardware ring.
  3176. * With each ring, we also associate a unique Tx descriptor pool
  3177. * to minimize lock contention for these resources.
  3178. */
  3179. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  3180. DP_STATS_INC(vdev, tx_i.rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
  3181. 1);
  3182. /*
  3183. * TCL H/W supports 2 DSCP-TID mapping tables.
  3184. * Table 1 - Default DSCP-TID mapping table
  3185. * Table 2 - 1 DSCP-TID override table
  3186. *
  3187. * If we need a different DSCP-TID mapping for this vap,
  3188. * call tid_classify to extract DSCP/ToS from frame and
  3189. * map to a TID and store in msdu_info. This is later used
  3190. * to fill in TCL Input descriptor (per-packet TID override).
  3191. */
  3192. dp_tx_classify_tid(vdev, nbuf, &msdu_info);
  3193. /*
  3194. * Classify the frame and call corresponding
  3195. * "prepare" function which extracts the segment (TSO)
  3196. * and fragmentation information (for TSO , SG, ME, or Raw)
  3197. * into MSDU_INFO structure which is later used to fill
  3198. * SW and HW descriptors.
  3199. */
  3200. if (qdf_nbuf_is_tso(nbuf)) {
  3201. dp_verbose_debug("TSO frame %pK", vdev);
  3202. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  3203. qdf_nbuf_len(nbuf));
  3204. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  3205. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  3206. qdf_nbuf_len(nbuf));
  3207. return nbuf;
  3208. }
  3209. DP_STATS_INC(vdev, tx_i.rcvd.num, msdu_info.num_seg - 1);
  3210. goto send_multiple;
  3211. }
  3212. /* SG */
  3213. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  3214. if (qdf_nbuf_get_nr_frags(nbuf) > DP_TX_MAX_NUM_FRAGS - 1) {
  3215. if (qdf_unlikely(qdf_nbuf_linearize(nbuf)))
  3216. return nbuf;
  3217. } else {
  3218. struct dp_tx_seg_info_s seg_info = {0};
  3219. if (qdf_unlikely(is_nbuf_frm_rmnet(nbuf, &msdu_info)))
  3220. goto send_single;
  3221. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info,
  3222. &msdu_info);
  3223. if (!nbuf)
  3224. return NULL;
  3225. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  3226. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  3227. qdf_nbuf_len(nbuf));
  3228. goto send_multiple;
  3229. }
  3230. }
  3231. if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
  3232. return NULL;
  3233. if (qdf_unlikely(dp_tx_mcast_drop(vdev, nbuf)))
  3234. return nbuf;
  3235. /* RAW */
  3236. if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
  3237. struct dp_tx_seg_info_s seg_info = {0};
  3238. nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
  3239. if (!nbuf)
  3240. return NULL;
  3241. dp_verbose_debug("Raw frame %pK", vdev);
  3242. goto send_multiple;
  3243. }
  3244. if (qdf_unlikely(vdev->nawds_enabled)) {
  3245. qdf_ether_header_t *eh = (qdf_ether_header_t *)
  3246. qdf_nbuf_data(nbuf);
  3247. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  3248. uint16_t sa_peer_id = DP_INVALID_PEER;
  3249. if (!soc->ast_offload_support) {
  3250. struct dp_ast_entry *ast_entry = NULL;
  3251. qdf_spin_lock_bh(&soc->ast_lock);
  3252. ast_entry = dp_peer_ast_hash_find_by_pdevid
  3253. (soc,
  3254. (uint8_t *)(eh->ether_shost),
  3255. vdev->pdev->pdev_id);
  3256. if (ast_entry)
  3257. sa_peer_id = ast_entry->peer_id;
  3258. qdf_spin_unlock_bh(&soc->ast_lock);
  3259. }
  3260. dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
  3261. sa_peer_id);
  3262. }
  3263. peer_id = DP_INVALID_PEER;
  3264. DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
  3265. 1, qdf_nbuf_len(nbuf));
  3266. }
  3267. send_single:
  3268. /* Single linear frame */
  3269. /*
  3270. * If nbuf is a simple linear frame, use send_single function to
  3271. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  3272. * SRNG. There is no need to setup a MSDU extension descriptor.
  3273. */
  3274. dp_tx_prefetch_nbuf_data(nbuf);
  3275. nbuf = dp_tx_send_msdu_single_wrapper(vdev, nbuf, &msdu_info,
  3276. peer_id, end_nbuf);
  3277. return nbuf;
  3278. send_multiple:
  3279. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  3280. if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
  3281. dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
  3282. return nbuf;
  3283. }
  3284. qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
  3285. uint8_t vdev_id, qdf_nbuf_t nbuf)
  3286. {
  3287. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3288. struct dp_vdev *vdev = NULL;
  3289. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  3290. return nbuf;
  3291. /*
  3292. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  3293. * this in per packet path.
  3294. *
  3295. * As in this path vdev memory is already protected with netdev
  3296. * tx lock
  3297. */
  3298. vdev = soc->vdev_id_map[vdev_id];
  3299. if (qdf_unlikely(!vdev))
  3300. return nbuf;
  3301. if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
  3302. == QDF_STATUS_E_FAILURE)) {
  3303. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  3304. return nbuf;
  3305. }
  3306. return dp_tx_send(soc_hdl, vdev_id, nbuf);
  3307. }
  3308. #ifdef UMAC_SUPPORT_PROXY_ARP
  3309. /**
  3310. * dp_tx_proxy_arp() - Tx proxy arp handler
  3311. * @vdev: datapath vdev handle
  3312. * @nbuf: sk buffer
  3313. *
  3314. * Return: status
  3315. */
  3316. int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  3317. {
  3318. if (vdev->osif_proxy_arp)
  3319. return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
  3320. /*
  3321. * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
  3322. * osif_proxy_arp has a valid function pointer assigned
  3323. * to it
  3324. */
  3325. dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
  3326. return QDF_STATUS_NOT_INITIALIZED;
  3327. }
  3328. #else
  3329. int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  3330. {
  3331. return QDF_STATUS_SUCCESS;
  3332. }
  3333. #endif
  3334. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  3335. !defined(CONFIG_MLO_SINGLE_DEV)
  3336. #ifdef WLAN_MCAST_MLO
  3337. static bool
  3338. dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
  3339. struct dp_tx_desc_s *tx_desc,
  3340. qdf_nbuf_t nbuf,
  3341. uint8_t reinject_reason)
  3342. {
  3343. if (reinject_reason == HTT_TX_FW2WBM_REINJECT_REASON_MLO_MCAST) {
  3344. if (soc->arch_ops.dp_tx_mcast_handler)
  3345. soc->arch_ops.dp_tx_mcast_handler(soc, vdev, nbuf);
  3346. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3347. return true;
  3348. }
  3349. return false;
  3350. }
  3351. #else /* WLAN_MCAST_MLO */
  3352. static inline bool
  3353. dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
  3354. struct dp_tx_desc_s *tx_desc,
  3355. qdf_nbuf_t nbuf,
  3356. uint8_t reinject_reason)
  3357. {
  3358. return false;
  3359. }
  3360. #endif /* WLAN_MCAST_MLO */
  3361. #else
  3362. static inline bool
  3363. dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
  3364. struct dp_tx_desc_s *tx_desc,
  3365. qdf_nbuf_t nbuf,
  3366. uint8_t reinject_reason)
  3367. {
  3368. return false;
  3369. }
  3370. #endif
  3371. void dp_tx_reinject_handler(struct dp_soc *soc,
  3372. struct dp_vdev *vdev,
  3373. struct dp_tx_desc_s *tx_desc,
  3374. uint8_t *status,
  3375. uint8_t reinject_reason)
  3376. {
  3377. struct dp_peer *peer = NULL;
  3378. uint32_t peer_id = HTT_INVALID_PEER;
  3379. qdf_nbuf_t nbuf = tx_desc->nbuf;
  3380. qdf_nbuf_t nbuf_copy = NULL;
  3381. struct dp_tx_msdu_info_s msdu_info;
  3382. #ifdef WDS_VENDOR_EXTENSION
  3383. int is_mcast = 0, is_ucast = 0;
  3384. int num_peers_3addr = 0;
  3385. qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
  3386. struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
  3387. #endif
  3388. struct dp_txrx_peer *txrx_peer;
  3389. qdf_assert(vdev);
  3390. dp_tx_debug("Tx reinject path");
  3391. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  3392. qdf_nbuf_len(tx_desc->nbuf));
  3393. if (dp_tx_reinject_mlo_hdl(soc, vdev, tx_desc, nbuf, reinject_reason))
  3394. return;
  3395. #ifdef WDS_VENDOR_EXTENSION
  3396. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  3397. is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
  3398. } else {
  3399. is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
  3400. }
  3401. is_ucast = !is_mcast;
  3402. qdf_spin_lock_bh(&vdev->peer_list_lock);
  3403. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  3404. txrx_peer = dp_get_txrx_peer(peer);
  3405. if (!txrx_peer || txrx_peer->bss_peer)
  3406. continue;
  3407. /* Detect wds peers that use 3-addr framing for mcast.
  3408. * if there are any, the bss_peer is used to send the
  3409. * the mcast frame using 3-addr format. all wds enabled
  3410. * peers that use 4-addr framing for mcast frames will
  3411. * be duplicated and sent as 4-addr frames below.
  3412. */
  3413. if (!txrx_peer->wds_enabled ||
  3414. !txrx_peer->wds_ecm.wds_tx_mcast_4addr) {
  3415. num_peers_3addr = 1;
  3416. break;
  3417. }
  3418. }
  3419. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  3420. #endif
  3421. if (qdf_unlikely(vdev->mesh_vdev)) {
  3422. DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
  3423. } else {
  3424. qdf_spin_lock_bh(&vdev->peer_list_lock);
  3425. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  3426. txrx_peer = dp_get_txrx_peer(peer);
  3427. if (!txrx_peer)
  3428. continue;
  3429. if ((txrx_peer->peer_id != HTT_INVALID_PEER) &&
  3430. #ifdef WDS_VENDOR_EXTENSION
  3431. /*
  3432. * . if 3-addr STA, then send on BSS Peer
  3433. * . if Peer WDS enabled and accept 4-addr mcast,
  3434. * send mcast on that peer only
  3435. * . if Peer WDS enabled and accept 4-addr ucast,
  3436. * send ucast on that peer only
  3437. */
  3438. ((txrx_peer->bss_peer && num_peers_3addr && is_mcast) ||
  3439. (txrx_peer->wds_enabled &&
  3440. ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) ||
  3441. (is_ucast &&
  3442. txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) {
  3443. #else
  3444. (txrx_peer->bss_peer &&
  3445. (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
  3446. #endif
  3447. peer_id = DP_INVALID_PEER;
  3448. nbuf_copy = qdf_nbuf_copy(nbuf);
  3449. if (!nbuf_copy) {
  3450. dp_tx_debug("nbuf copy failed");
  3451. break;
  3452. }
  3453. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  3454. dp_tx_get_queue(vdev, nbuf,
  3455. &msdu_info.tx_queue);
  3456. nbuf_copy = dp_tx_send_msdu_single(vdev,
  3457. nbuf_copy,
  3458. &msdu_info,
  3459. peer_id,
  3460. NULL);
  3461. if (nbuf_copy) {
  3462. dp_tx_debug("pkt send failed");
  3463. qdf_nbuf_free(nbuf_copy);
  3464. }
  3465. }
  3466. }
  3467. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  3468. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  3469. QDF_DMA_TO_DEVICE, nbuf->len);
  3470. qdf_nbuf_free(nbuf);
  3471. }
  3472. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3473. }
  3474. void dp_tx_inspect_handler(struct dp_soc *soc,
  3475. struct dp_vdev *vdev,
  3476. struct dp_tx_desc_s *tx_desc,
  3477. uint8_t *status)
  3478. {
  3479. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3480. "%s Tx inspect path",
  3481. __func__);
  3482. DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
  3483. qdf_nbuf_len(tx_desc->nbuf));
  3484. DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
  3485. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3486. }
  3487. #ifdef MESH_MODE_SUPPORT
  3488. /**
  3489. * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
  3490. * in mesh meta header
  3491. * @tx_desc: software descriptor head pointer
  3492. * @ts: pointer to tx completion stats
  3493. * Return: none
  3494. */
  3495. static
  3496. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  3497. struct hal_tx_completion_status *ts)
  3498. {
  3499. qdf_nbuf_t netbuf = tx_desc->nbuf;
  3500. if (!tx_desc->msdu_ext_desc) {
  3501. if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
  3502. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3503. "netbuf %pK offset %d",
  3504. netbuf, tx_desc->pkt_offset);
  3505. return;
  3506. }
  3507. }
  3508. }
  3509. #else
  3510. static
  3511. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  3512. struct hal_tx_completion_status *ts)
  3513. {
  3514. }
  3515. #endif
  3516. #ifdef CONFIG_SAWF
  3517. static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
  3518. struct dp_vdev *vdev,
  3519. struct dp_txrx_peer *txrx_peer,
  3520. struct dp_tx_desc_s *tx_desc,
  3521. struct hal_tx_completion_status *ts,
  3522. uint8_t tid)
  3523. {
  3524. dp_sawf_tx_compl_update_peer_stats(soc, vdev, txrx_peer, tx_desc,
  3525. ts, tid);
  3526. }
  3527. static void dp_tx_compute_delay_avg(struct cdp_delay_tx_stats *tx_delay,
  3528. uint32_t nw_delay,
  3529. uint32_t sw_delay,
  3530. uint32_t hw_delay)
  3531. {
  3532. dp_peer_tid_delay_avg(tx_delay,
  3533. nw_delay,
  3534. sw_delay,
  3535. hw_delay);
  3536. }
  3537. #else
  3538. static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
  3539. struct dp_vdev *vdev,
  3540. struct dp_txrx_peer *txrx_peer,
  3541. struct dp_tx_desc_s *tx_desc,
  3542. struct hal_tx_completion_status *ts,
  3543. uint8_t tid)
  3544. {
  3545. }
  3546. static inline void
  3547. dp_tx_compute_delay_avg(struct cdp_delay_tx_stats *tx_delay,
  3548. uint32_t nw_delay, uint32_t sw_delay,
  3549. uint32_t hw_delay)
  3550. {
  3551. }
  3552. #endif
  3553. #ifdef QCA_PEER_EXT_STATS
  3554. #ifdef WLAN_CONFIG_TX_DELAY
  3555. static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  3556. struct dp_tx_desc_s *tx_desc,
  3557. struct hal_tx_completion_status *ts,
  3558. struct dp_vdev *vdev)
  3559. {
  3560. struct dp_soc *soc = vdev->pdev->soc;
  3561. struct cdp_delay_tx_stats *tx_delay = &stats->tx_delay;
  3562. int64_t timestamp_ingress, timestamp_hw_enqueue;
  3563. uint32_t sw_enqueue_delay, fwhw_transmit_delay = 0;
  3564. if (!ts->valid)
  3565. return;
  3566. timestamp_ingress = qdf_nbuf_get_timestamp_us(tx_desc->nbuf);
  3567. timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
  3568. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3569. dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
  3570. if (soc->arch_ops.dp_tx_compute_hw_delay)
  3571. if (!soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
  3572. &fwhw_transmit_delay))
  3573. dp_hist_update_stats(&tx_delay->hwtx_delay,
  3574. fwhw_transmit_delay);
  3575. dp_tx_compute_delay_avg(tx_delay, 0, sw_enqueue_delay,
  3576. fwhw_transmit_delay);
  3577. }
  3578. #else
  3579. /**
  3580. * dp_tx_compute_tid_delay() - Compute per TID delay
  3581. * @stats: Per TID delay stats
  3582. * @tx_desc: Software Tx descriptor
  3583. * @ts: Tx completion status
  3584. * @vdev: vdev
  3585. *
  3586. * Compute the software enqueue and hw enqueue delays and
  3587. * update the respective histograms
  3588. *
  3589. * Return: void
  3590. */
  3591. static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  3592. struct dp_tx_desc_s *tx_desc,
  3593. struct hal_tx_completion_status *ts,
  3594. struct dp_vdev *vdev)
  3595. {
  3596. struct cdp_delay_tx_stats *tx_delay = &stats->tx_delay;
  3597. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  3598. uint32_t sw_enqueue_delay, fwhw_transmit_delay;
  3599. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  3600. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  3601. timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
  3602. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3603. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  3604. timestamp_hw_enqueue);
  3605. /*
  3606. * Update the Tx software enqueue delay and HW enque-Completion delay.
  3607. */
  3608. dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
  3609. dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
  3610. }
  3611. #endif
  3612. /**
  3613. * dp_tx_update_peer_delay_stats() - Update the peer delay stats
  3614. * @txrx_peer: DP peer context
  3615. * @tx_desc: Tx software descriptor
  3616. * @ts: Tx completion status
  3617. * @ring_id: Rx CPU context ID/CPU_ID
  3618. *
  3619. * Update the peer extended stats. These are enhanced other
  3620. * delay stats per msdu level.
  3621. *
  3622. * Return: void
  3623. */
  3624. static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
  3625. struct dp_tx_desc_s *tx_desc,
  3626. struct hal_tx_completion_status *ts,
  3627. uint8_t ring_id)
  3628. {
  3629. struct dp_pdev *pdev = txrx_peer->vdev->pdev;
  3630. struct dp_soc *soc = NULL;
  3631. struct dp_peer_delay_stats *delay_stats = NULL;
  3632. uint8_t tid;
  3633. soc = pdev->soc;
  3634. if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
  3635. return;
  3636. if (!txrx_peer->delay_stats)
  3637. return;
  3638. tid = ts->tid;
  3639. delay_stats = txrx_peer->delay_stats;
  3640. qdf_assert(ring < CDP_MAX_TXRX_CTX);
  3641. /*
  3642. * For non-TID packets use the TID 9
  3643. */
  3644. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3645. tid = CDP_MAX_DATA_TIDS - 1;
  3646. dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
  3647. tx_desc, ts, txrx_peer->vdev);
  3648. }
  3649. #else
  3650. static inline
  3651. void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
  3652. struct dp_tx_desc_s *tx_desc,
  3653. struct hal_tx_completion_status *ts,
  3654. uint8_t ring_id)
  3655. {
  3656. }
  3657. #endif
  3658. #ifdef WLAN_PEER_JITTER
  3659. /**
  3660. * dp_tx_jitter_get_avg_jitter() - compute the average jitter
  3661. * @curr_delay: Current delay
  3662. * @prev_delay: Previous delay
  3663. * @avg_jitter: Average Jitter
  3664. * Return: Newly Computed Average Jitter
  3665. */
  3666. static uint32_t dp_tx_jitter_get_avg_jitter(uint32_t curr_delay,
  3667. uint32_t prev_delay,
  3668. uint32_t avg_jitter)
  3669. {
  3670. uint32_t curr_jitter;
  3671. int32_t jitter_diff;
  3672. curr_jitter = qdf_abs(curr_delay - prev_delay);
  3673. if (!avg_jitter)
  3674. return curr_jitter;
  3675. jitter_diff = curr_jitter - avg_jitter;
  3676. if (jitter_diff < 0)
  3677. avg_jitter = avg_jitter -
  3678. (qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
  3679. else
  3680. avg_jitter = avg_jitter +
  3681. (qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
  3682. return avg_jitter;
  3683. }
  3684. /**
  3685. * dp_tx_jitter_get_avg_delay() - compute the average delay
  3686. * @curr_delay: Current delay
  3687. * @avg_delay: Average delay
  3688. * Return: Newly Computed Average Delay
  3689. */
  3690. static uint32_t dp_tx_jitter_get_avg_delay(uint32_t curr_delay,
  3691. uint32_t avg_delay)
  3692. {
  3693. int32_t delay_diff;
  3694. if (!avg_delay)
  3695. return curr_delay;
  3696. delay_diff = curr_delay - avg_delay;
  3697. if (delay_diff < 0)
  3698. avg_delay = avg_delay - (qdf_abs(delay_diff) >>
  3699. DP_AVG_DELAY_WEIGHT_DENOM);
  3700. else
  3701. avg_delay = avg_delay + (qdf_abs(delay_diff) >>
  3702. DP_AVG_DELAY_WEIGHT_DENOM);
  3703. return avg_delay;
  3704. }
  3705. #ifdef WLAN_CONFIG_TX_DELAY
  3706. /**
  3707. * dp_tx_compute_cur_delay() - get the current delay
  3708. * @soc: soc handle
  3709. * @vdev: vdev structure for data path state
  3710. * @ts: Tx completion status
  3711. * @curr_delay: current delay
  3712. * @tx_desc: tx descriptor
  3713. * Return: void
  3714. */
  3715. static
  3716. QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
  3717. struct dp_vdev *vdev,
  3718. struct hal_tx_completion_status *ts,
  3719. uint32_t *curr_delay,
  3720. struct dp_tx_desc_s *tx_desc)
  3721. {
  3722. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  3723. if (soc->arch_ops.dp_tx_compute_hw_delay)
  3724. status = soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
  3725. curr_delay);
  3726. return status;
  3727. }
  3728. #else
  3729. static
  3730. QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
  3731. struct dp_vdev *vdev,
  3732. struct hal_tx_completion_status *ts,
  3733. uint32_t *curr_delay,
  3734. struct dp_tx_desc_s *tx_desc)
  3735. {
  3736. int64_t current_timestamp, timestamp_hw_enqueue;
  3737. current_timestamp = qdf_ktime_to_us(qdf_ktime_real_get());
  3738. timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
  3739. *curr_delay = (uint32_t)(current_timestamp - timestamp_hw_enqueue);
  3740. return QDF_STATUS_SUCCESS;
  3741. }
  3742. #endif
  3743. /**
  3744. * dp_tx_compute_tid_jitter() - compute per tid per ring jitter
  3745. * @jitter: per tid per ring jitter stats
  3746. * @ts: Tx completion status
  3747. * @vdev: vdev structure for data path state
  3748. * @tx_desc: tx descriptor
  3749. * Return: void
  3750. */
  3751. static void dp_tx_compute_tid_jitter(struct cdp_peer_tid_stats *jitter,
  3752. struct hal_tx_completion_status *ts,
  3753. struct dp_vdev *vdev,
  3754. struct dp_tx_desc_s *tx_desc)
  3755. {
  3756. uint32_t curr_delay, avg_delay, avg_jitter, prev_delay;
  3757. struct dp_soc *soc = vdev->pdev->soc;
  3758. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  3759. if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
  3760. jitter->tx_drop += 1;
  3761. return;
  3762. }
  3763. status = dp_tx_compute_cur_delay(soc, vdev, ts, &curr_delay,
  3764. tx_desc);
  3765. if (QDF_IS_STATUS_SUCCESS(status)) {
  3766. avg_delay = jitter->tx_avg_delay;
  3767. avg_jitter = jitter->tx_avg_jitter;
  3768. prev_delay = jitter->tx_prev_delay;
  3769. avg_jitter = dp_tx_jitter_get_avg_jitter(curr_delay,
  3770. prev_delay,
  3771. avg_jitter);
  3772. avg_delay = dp_tx_jitter_get_avg_delay(curr_delay, avg_delay);
  3773. jitter->tx_avg_delay = avg_delay;
  3774. jitter->tx_avg_jitter = avg_jitter;
  3775. jitter->tx_prev_delay = curr_delay;
  3776. jitter->tx_total_success += 1;
  3777. } else if (status == QDF_STATUS_E_FAILURE) {
  3778. jitter->tx_avg_err += 1;
  3779. }
  3780. }
  3781. /* dp_tx_update_peer_jitter_stats() - Update the peer jitter stats
  3782. * @txrx_peer: DP peer context
  3783. * @tx_desc: Tx software descriptor
  3784. * @ts: Tx completion status
  3785. * @ring_id: Rx CPU context ID/CPU_ID
  3786. * Return: void
  3787. */
  3788. static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
  3789. struct dp_tx_desc_s *tx_desc,
  3790. struct hal_tx_completion_status *ts,
  3791. uint8_t ring_id)
  3792. {
  3793. struct dp_pdev *pdev = txrx_peer->vdev->pdev;
  3794. struct dp_soc *soc = pdev->soc;
  3795. struct cdp_peer_tid_stats *jitter_stats = NULL;
  3796. uint8_t tid;
  3797. struct cdp_peer_tid_stats *rx_tid = NULL;
  3798. if (qdf_likely(!wlan_cfg_is_peer_jitter_stats_enabled(soc->wlan_cfg_ctx)))
  3799. return;
  3800. tid = ts->tid;
  3801. jitter_stats = txrx_peer->jitter_stats;
  3802. qdf_assert_always(jitter_stats);
  3803. qdf_assert(ring < CDP_MAX_TXRX_CTX);
  3804. /*
  3805. * For non-TID packets use the TID 9
  3806. */
  3807. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3808. tid = CDP_MAX_DATA_TIDS - 1;
  3809. rx_tid = &jitter_stats[tid * CDP_MAX_TXRX_CTX + ring_id];
  3810. dp_tx_compute_tid_jitter(rx_tid,
  3811. ts, txrx_peer->vdev, tx_desc);
  3812. }
  3813. #else
  3814. static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
  3815. struct dp_tx_desc_s *tx_desc,
  3816. struct hal_tx_completion_status *ts,
  3817. uint8_t ring_id)
  3818. {
  3819. }
  3820. #endif
  3821. #ifdef HW_TX_DELAY_STATS_ENABLE
  3822. /**
  3823. * dp_update_tx_delay_stats() - update the delay stats
  3824. * @vdev: vdev handle
  3825. * @delay: delay in ms or us based on the flag delay_in_us
  3826. * @tid: tid value
  3827. * @mode: type of tx delay mode
  3828. * @ring_id: ring number
  3829. * @delay_in_us: flag to indicate whether the delay is in ms or us
  3830. *
  3831. * Return: none
  3832. */
  3833. static inline
  3834. void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
  3835. uint8_t mode, uint8_t ring_id, bool delay_in_us)
  3836. {
  3837. struct cdp_tid_tx_stats *tstats =
  3838. &vdev->stats.tid_tx_stats[ring_id][tid];
  3839. dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
  3840. delay_in_us);
  3841. }
  3842. #else
  3843. static inline
  3844. void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
  3845. uint8_t mode, uint8_t ring_id, bool delay_in_us)
  3846. {
  3847. struct cdp_tid_tx_stats *tstats =
  3848. &vdev->pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  3849. dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
  3850. delay_in_us);
  3851. }
  3852. #endif
  3853. void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
  3854. uint8_t tid, uint8_t ring_id)
  3855. {
  3856. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  3857. uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
  3858. uint32_t fwhw_transmit_delay_us;
  3859. if (qdf_likely(!vdev->pdev->delay_stats_flag) &&
  3860. qdf_likely(!dp_is_vdev_tx_delay_stats_enabled(vdev)))
  3861. return;
  3862. if (dp_is_vdev_tx_delay_stats_enabled(vdev)) {
  3863. fwhw_transmit_delay_us =
  3864. qdf_ktime_to_us(qdf_ktime_real_get()) -
  3865. qdf_ktime_to_us(tx_desc->timestamp);
  3866. /*
  3867. * Delay between packet enqueued to HW and Tx completion in us
  3868. */
  3869. dp_update_tx_delay_stats(vdev, fwhw_transmit_delay_us, tid,
  3870. CDP_DELAY_STATS_FW_HW_TRANSMIT,
  3871. ring_id, true);
  3872. /*
  3873. * For MCL, only enqueue to completion delay is required
  3874. * so return if the vdev flag is enabled.
  3875. */
  3876. return;
  3877. }
  3878. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  3879. timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
  3880. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  3881. timestamp_hw_enqueue);
  3882. if (!timestamp_hw_enqueue)
  3883. return;
  3884. /*
  3885. * Delay between packet enqueued to HW and Tx completion in ms
  3886. */
  3887. dp_update_tx_delay_stats(vdev, fwhw_transmit_delay, tid,
  3888. CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id,
  3889. false);
  3890. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  3891. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3892. interframe_delay = (uint32_t)(timestamp_ingress -
  3893. vdev->prev_tx_enq_tstamp);
  3894. /*
  3895. * Delay in software enqueue
  3896. */
  3897. dp_update_tx_delay_stats(vdev, sw_enqueue_delay, tid,
  3898. CDP_DELAY_STATS_SW_ENQ, ring_id,
  3899. false);
  3900. /*
  3901. * Update interframe delay stats calculated at hardstart receive point.
  3902. * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
  3903. * interframe delay will not be calculate correctly for 1st frame.
  3904. * On the other side, this will help in avoiding extra per packet check
  3905. * of !vdev->prev_tx_enq_tstamp.
  3906. */
  3907. dp_update_tx_delay_stats(vdev, interframe_delay, tid,
  3908. CDP_DELAY_STATS_TX_INTERFRAME, ring_id,
  3909. false);
  3910. vdev->prev_tx_enq_tstamp = timestamp_ingress;
  3911. }
  3912. #ifdef DISABLE_DP_STATS
  3913. static
  3914. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf,
  3915. struct dp_txrx_peer *txrx_peer,
  3916. uint8_t link_id)
  3917. {
  3918. }
  3919. #else
  3920. static inline void
  3921. dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer,
  3922. uint8_t link_id)
  3923. {
  3924. enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
  3925. DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
  3926. if (subtype != QDF_PROTO_INVALID)
  3927. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype],
  3928. 1, link_id);
  3929. }
  3930. #endif
  3931. #ifndef QCA_ENHANCED_STATS_SUPPORT
  3932. #ifdef DP_PEER_EXTENDED_API
  3933. static inline uint8_t
  3934. dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
  3935. {
  3936. return txrx_peer->mpdu_retry_threshold;
  3937. }
  3938. #else
  3939. static inline uint8_t
  3940. dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
  3941. {
  3942. return 0;
  3943. }
  3944. #endif
  3945. /**
  3946. * dp_tx_update_peer_extd_stats()- Update Tx extended path stats for peer
  3947. *
  3948. * @ts: Tx compltion status
  3949. * @txrx_peer: datapath txrx_peer handle
  3950. * @link_id: Link id
  3951. *
  3952. * Return: void
  3953. */
  3954. static inline void
  3955. dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
  3956. struct dp_txrx_peer *txrx_peer, uint8_t link_id)
  3957. {
  3958. uint8_t mcs, pkt_type, dst_mcs_idx;
  3959. uint8_t retry_threshold = dp_tx_get_mpdu_retry_threshold(txrx_peer);
  3960. mcs = ts->mcs;
  3961. pkt_type = ts->pkt_type;
  3962. /* do HW to SW pkt type conversion */
  3963. pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
  3964. hal_2_dp_pkt_type_map[pkt_type]);
  3965. dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
  3966. if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
  3967. DP_PEER_EXTD_STATS_INC(txrx_peer,
  3968. tx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
  3969. 1, link_id);
  3970. DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1, link_id);
  3971. DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1, link_id);
  3972. DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi,
  3973. link_id);
  3974. DP_PEER_EXTD_STATS_INC(txrx_peer,
  3975. tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1,
  3976. link_id);
  3977. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc, link_id);
  3978. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc, link_id);
  3979. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1,
  3980. link_id);
  3981. if (ts->first_msdu) {
  3982. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1,
  3983. ts->transmit_cnt > 1, link_id);
  3984. if (!retry_threshold)
  3985. return;
  3986. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries,
  3987. qdf_do_div(ts->transmit_cnt,
  3988. retry_threshold),
  3989. ts->transmit_cnt > retry_threshold,
  3990. link_id);
  3991. }
  3992. }
  3993. #else
  3994. static inline void
  3995. dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
  3996. struct dp_txrx_peer *txrx_peer, uint8_t link_id)
  3997. {
  3998. }
  3999. #endif
  4000. #if defined(WLAN_FEATURE_11BE_MLO) && defined(QCA_ENHANCED_STATS_SUPPORT)
  4001. static inline uint8_t
  4002. dp_tx_get_link_id_from_ppdu_id(struct dp_soc *soc,
  4003. struct hal_tx_completion_status *ts,
  4004. struct dp_txrx_peer *txrx_peer,
  4005. struct dp_vdev *vdev)
  4006. {
  4007. uint8_t hw_link_id = 0;
  4008. uint32_t ppdu_id;
  4009. uint8_t link_id_offset, link_id_bits;
  4010. if (!txrx_peer->is_mld_peer || !vdev->pdev->link_peer_stats)
  4011. return 0;
  4012. link_id_offset = soc->link_id_offset;
  4013. link_id_bits = soc->link_id_bits;
  4014. ppdu_id = ts->ppdu_id;
  4015. hw_link_id = ((DP_GET_HW_LINK_ID_FRM_PPDU_ID(ppdu_id, link_id_offset,
  4016. link_id_bits)) + 1);
  4017. if (hw_link_id > DP_MAX_MLO_LINKS) {
  4018. hw_link_id = 0;
  4019. DP_PEER_PER_PKT_STATS_INC(
  4020. txrx_peer,
  4021. tx.inval_link_id_pkt_cnt, 1, hw_link_id);
  4022. }
  4023. return hw_link_id;
  4024. }
  4025. #else
  4026. static inline uint8_t
  4027. dp_tx_get_link_id_from_ppdu_id(struct dp_soc *soc,
  4028. struct hal_tx_completion_status *ts,
  4029. struct dp_txrx_peer *txrx_peer,
  4030. struct dp_vdev *vdev)
  4031. {
  4032. return 0;
  4033. }
  4034. #endif
  4035. /**
  4036. * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
  4037. * per wbm ring
  4038. *
  4039. * @tx_desc: software descriptor head pointer
  4040. * @ts: Tx completion status
  4041. * @txrx_peer: peer handle
  4042. * @ring_id: ring number
  4043. * @link_id: Link id
  4044. *
  4045. * Return: None
  4046. */
  4047. static inline void
  4048. dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
  4049. struct hal_tx_completion_status *ts,
  4050. struct dp_txrx_peer *txrx_peer, uint8_t ring_id,
  4051. uint8_t link_id)
  4052. {
  4053. struct dp_pdev *pdev = txrx_peer->vdev->pdev;
  4054. uint8_t tid = ts->tid;
  4055. uint32_t length;
  4056. struct cdp_tid_tx_stats *tid_stats;
  4057. if (!pdev)
  4058. return;
  4059. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  4060. tid = CDP_MAX_DATA_TIDS - 1;
  4061. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  4062. if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
  4063. dp_err_rl("Release source:%d is not from TQM", ts->release_src);
  4064. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1,
  4065. link_id);
  4066. return;
  4067. }
  4068. length = qdf_nbuf_len(tx_desc->nbuf);
  4069. DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
  4070. if (qdf_unlikely(pdev->delay_stats_flag) ||
  4071. qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(txrx_peer->vdev)))
  4072. dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
  4073. if (ts->status < CDP_MAX_TX_TQM_STATUS) {
  4074. tid_stats->tqm_status_cnt[ts->status]++;
  4075. }
  4076. if (qdf_likely(ts->status == HAL_TX_TQM_RR_FRAME_ACKED)) {
  4077. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1,
  4078. ts->transmit_cnt > 1, link_id);
  4079. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count,
  4080. 1, ts->transmit_cnt > 2, link_id);
  4081. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma,
  4082. link_id);
  4083. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1,
  4084. ts->msdu_part_of_amsdu, link_id);
  4085. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1,
  4086. !ts->msdu_part_of_amsdu, link_id);
  4087. txrx_peer->stats[link_id].per_pkt_stats.tx.last_tx_ts =
  4088. qdf_system_ticks();
  4089. dp_tx_update_peer_extd_stats(ts, txrx_peer, link_id);
  4090. return;
  4091. }
  4092. /*
  4093. * tx_failed is ideally supposed to be updated from HTT ppdu
  4094. * completion stats. But in IPQ807X/IPQ6018 chipsets owing to
  4095. * hw limitation there are no completions for failed cases.
  4096. * Hence updating tx_failed from data path. Please note that
  4097. * if tx_failed is fixed to be from ppdu, then this has to be
  4098. * removed
  4099. */
  4100. DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
  4101. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1,
  4102. ts->transmit_cnt > DP_RETRY_COUNT,
  4103. link_id);
  4104. dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer, link_id);
  4105. if (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) {
  4106. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1,
  4107. link_id);
  4108. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_REM) {
  4109. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.dropped.fw_rem, 1,
  4110. length, link_id);
  4111. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX) {
  4112. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1,
  4113. link_id);
  4114. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TX) {
  4115. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1,
  4116. link_id);
  4117. } else if (ts->status == HAL_TX_TQM_RR_FW_REASON1) {
  4118. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1,
  4119. link_id);
  4120. } else if (ts->status == HAL_TX_TQM_RR_FW_REASON2) {
  4121. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1,
  4122. link_id);
  4123. } else if (ts->status == HAL_TX_TQM_RR_FW_REASON3) {
  4124. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1,
  4125. link_id);
  4126. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE) {
  4127. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  4128. tx.dropped.fw_rem_queue_disable, 1,
  4129. link_id);
  4130. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TILL_NONMATCHING) {
  4131. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  4132. tx.dropped.fw_rem_no_match, 1,
  4133. link_id);
  4134. } else if (ts->status == HAL_TX_TQM_RR_DROP_THRESHOLD) {
  4135. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  4136. tx.dropped.drop_threshold, 1,
  4137. link_id);
  4138. } else if (ts->status == HAL_TX_TQM_RR_LINK_DESC_UNAVAILABLE) {
  4139. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  4140. tx.dropped.drop_link_desc_na, 1,
  4141. link_id);
  4142. } else if (ts->status == HAL_TX_TQM_RR_DROP_OR_INVALID_MSDU) {
  4143. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  4144. tx.dropped.invalid_drop, 1,
  4145. link_id);
  4146. } else if (ts->status == HAL_TX_TQM_RR_MULTICAST_DROP) {
  4147. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  4148. tx.dropped.mcast_vdev_drop, 1,
  4149. link_id);
  4150. } else {
  4151. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1,
  4152. link_id);
  4153. }
  4154. }
  4155. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  4156. /**
  4157. * dp_tx_flow_pool_lock() - take flow pool lock
  4158. * @soc: core txrx main context
  4159. * @tx_desc: tx desc
  4160. *
  4161. * Return: None
  4162. */
  4163. static inline
  4164. void dp_tx_flow_pool_lock(struct dp_soc *soc,
  4165. struct dp_tx_desc_s *tx_desc)
  4166. {
  4167. struct dp_tx_desc_pool_s *pool;
  4168. uint8_t desc_pool_id;
  4169. desc_pool_id = tx_desc->pool_id;
  4170. pool = &soc->tx_desc[desc_pool_id];
  4171. qdf_spin_lock_bh(&pool->flow_pool_lock);
  4172. }
  4173. /**
  4174. * dp_tx_flow_pool_unlock() - release flow pool lock
  4175. * @soc: core txrx main context
  4176. * @tx_desc: tx desc
  4177. *
  4178. * Return: None
  4179. */
  4180. static inline
  4181. void dp_tx_flow_pool_unlock(struct dp_soc *soc,
  4182. struct dp_tx_desc_s *tx_desc)
  4183. {
  4184. struct dp_tx_desc_pool_s *pool;
  4185. uint8_t desc_pool_id;
  4186. desc_pool_id = tx_desc->pool_id;
  4187. pool = &soc->tx_desc[desc_pool_id];
  4188. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  4189. }
  4190. #else
  4191. static inline
  4192. void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  4193. {
  4194. }
  4195. static inline
  4196. void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  4197. {
  4198. }
  4199. #endif
  4200. /**
  4201. * dp_tx_notify_completion() - Notify tx completion for this desc
  4202. * @soc: core txrx main context
  4203. * @vdev: datapath vdev handle
  4204. * @tx_desc: tx desc
  4205. * @netbuf: buffer
  4206. * @status: tx status
  4207. *
  4208. * Return: none
  4209. */
  4210. static inline void dp_tx_notify_completion(struct dp_soc *soc,
  4211. struct dp_vdev *vdev,
  4212. struct dp_tx_desc_s *tx_desc,
  4213. qdf_nbuf_t netbuf,
  4214. uint8_t status)
  4215. {
  4216. void *osif_dev;
  4217. ol_txrx_completion_fp tx_compl_cbk = NULL;
  4218. uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
  4219. qdf_assert(tx_desc);
  4220. if (!vdev ||
  4221. !vdev->osif_vdev) {
  4222. return;
  4223. }
  4224. osif_dev = vdev->osif_vdev;
  4225. tx_compl_cbk = vdev->tx_comp;
  4226. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  4227. flag |= BIT(QDF_TX_RX_STATUS_OK);
  4228. if (tx_compl_cbk)
  4229. tx_compl_cbk(netbuf, osif_dev, flag);
  4230. }
  4231. /**
  4232. * dp_tx_sojourn_stats_process() - Collect sojourn stats
  4233. * @pdev: pdev handle
  4234. * @txrx_peer: DP peer context
  4235. * @tid: tid value
  4236. * @txdesc_ts: timestamp from txdesc
  4237. * @ppdu_id: ppdu id
  4238. * @link_id: link id
  4239. *
  4240. * Return: none
  4241. */
  4242. #ifdef FEATURE_PERPKT_INFO
  4243. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  4244. struct dp_txrx_peer *txrx_peer,
  4245. uint8_t tid,
  4246. uint64_t txdesc_ts,
  4247. uint32_t ppdu_id,
  4248. uint8_t link_id)
  4249. {
  4250. uint64_t delta_ms;
  4251. struct cdp_tx_sojourn_stats *sojourn_stats;
  4252. struct dp_peer *primary_link_peer = NULL;
  4253. struct dp_soc *link_peer_soc = NULL;
  4254. if (qdf_unlikely(!pdev->enhanced_stats_en))
  4255. return;
  4256. if (qdf_unlikely(tid == HTT_INVALID_TID ||
  4257. tid >= CDP_DATA_TID_MAX))
  4258. return;
  4259. if (qdf_unlikely(!pdev->sojourn_buf))
  4260. return;
  4261. primary_link_peer = dp_get_primary_link_peer_by_id(pdev->soc,
  4262. txrx_peer->peer_id,
  4263. DP_MOD_ID_TX_COMP);
  4264. if (qdf_unlikely(!primary_link_peer))
  4265. return;
  4266. sojourn_stats = (struct cdp_tx_sojourn_stats *)
  4267. qdf_nbuf_data(pdev->sojourn_buf);
  4268. link_peer_soc = primary_link_peer->vdev->pdev->soc;
  4269. sojourn_stats->cookie = (void *)
  4270. dp_monitor_peer_get_peerstats_ctx(link_peer_soc,
  4271. primary_link_peer);
  4272. delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) -
  4273. txdesc_ts;
  4274. qdf_ewma_tx_lag_add(&txrx_peer->stats[link_id].per_pkt_stats.tx.avg_sojourn_msdu[tid],
  4275. delta_ms);
  4276. sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
  4277. sojourn_stats->num_msdus[tid] = 1;
  4278. sojourn_stats->avg_sojourn_msdu[tid].internal =
  4279. txrx_peer->stats[link_id].
  4280. per_pkt_stats.tx.avg_sojourn_msdu[tid].internal;
  4281. dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
  4282. pdev->sojourn_buf, HTT_INVALID_PEER,
  4283. WDI_NO_VAL, pdev->pdev_id);
  4284. sojourn_stats->sum_sojourn_msdu[tid] = 0;
  4285. sojourn_stats->num_msdus[tid] = 0;
  4286. sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
  4287. dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_TX_COMP);
  4288. }
  4289. #else
  4290. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  4291. struct dp_txrx_peer *txrx_peer,
  4292. uint8_t tid,
  4293. uint64_t txdesc_ts,
  4294. uint32_t ppdu_id)
  4295. {
  4296. }
  4297. #endif
  4298. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  4299. void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
  4300. struct dp_tx_desc_s *desc,
  4301. struct hal_tx_completion_status *ts)
  4302. {
  4303. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
  4304. desc, ts->peer_id,
  4305. WDI_NO_VAL, desc->pdev->pdev_id);
  4306. }
  4307. #endif
  4308. void
  4309. dp_tx_comp_process_desc(struct dp_soc *soc,
  4310. struct dp_tx_desc_s *desc,
  4311. struct hal_tx_completion_status *ts,
  4312. struct dp_txrx_peer *txrx_peer)
  4313. {
  4314. uint64_t time_latency = 0;
  4315. uint16_t peer_id = DP_INVALID_PEER_ID;
  4316. /*
  4317. * m_copy/tx_capture modes are not supported for
  4318. * scatter gather packets
  4319. */
  4320. if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
  4321. time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
  4322. qdf_ktime_to_ms(desc->timestamp));
  4323. }
  4324. dp_send_completion_to_pkt_capture(soc, desc, ts);
  4325. if (dp_tx_pkt_tracepoints_enabled())
  4326. qdf_trace_dp_packet(desc->nbuf, QDF_TX,
  4327. desc->msdu_ext_desc ?
  4328. desc->msdu_ext_desc->tso_desc : NULL,
  4329. qdf_ktime_to_ms(desc->timestamp));
  4330. if (!(desc->msdu_ext_desc)) {
  4331. dp_tx_enh_unmap(soc, desc);
  4332. if (txrx_peer)
  4333. peer_id = txrx_peer->peer_id;
  4334. if (QDF_STATUS_SUCCESS ==
  4335. dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer_id)) {
  4336. return;
  4337. }
  4338. if (QDF_STATUS_SUCCESS ==
  4339. dp_get_completion_indication_for_stack(soc,
  4340. desc->pdev,
  4341. txrx_peer, ts,
  4342. desc->nbuf,
  4343. time_latency)) {
  4344. dp_send_completion_to_stack(soc,
  4345. desc->pdev,
  4346. ts->peer_id,
  4347. ts->ppdu_id,
  4348. desc->nbuf);
  4349. return;
  4350. }
  4351. }
  4352. desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
  4353. dp_tx_comp_free_buf(soc, desc, false);
  4354. }
  4355. #ifdef DISABLE_DP_STATS
  4356. /**
  4357. * dp_tx_update_connectivity_stats() - update tx connectivity stats
  4358. * @soc: core txrx main context
  4359. * @vdev: virtual device instance
  4360. * @tx_desc: tx desc
  4361. * @status: tx status
  4362. *
  4363. * Return: none
  4364. */
  4365. static inline
  4366. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  4367. struct dp_vdev *vdev,
  4368. struct dp_tx_desc_s *tx_desc,
  4369. uint8_t status)
  4370. {
  4371. }
  4372. #else
  4373. static inline
  4374. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  4375. struct dp_vdev *vdev,
  4376. struct dp_tx_desc_s *tx_desc,
  4377. uint8_t status)
  4378. {
  4379. void *osif_dev;
  4380. ol_txrx_stats_rx_fp stats_cbk;
  4381. uint8_t pkt_type;
  4382. qdf_assert(tx_desc);
  4383. if (!vdev ||
  4384. !vdev->osif_vdev ||
  4385. !vdev->stats_cb)
  4386. return;
  4387. osif_dev = vdev->osif_vdev;
  4388. stats_cbk = vdev->stats_cb;
  4389. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
  4390. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  4391. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
  4392. &pkt_type);
  4393. }
  4394. #endif
  4395. #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
  4396. /* Mask for bit29 ~ bit31 */
  4397. #define DP_TX_TS_BIT29_31_MASK 0xE0000000
  4398. /* Timestamp value (unit us) if bit29 is set */
  4399. #define DP_TX_TS_BIT29_SET_VALUE BIT(29)
  4400. /**
  4401. * dp_tx_adjust_enqueue_buffer_ts() - adjust the enqueue buffer_timestamp
  4402. * @ack_ts: OTA ack timestamp, unit us.
  4403. * @enqueue_ts: TCL enqueue TX data to TQM timestamp, unit us.
  4404. * @base_delta_ts: base timestamp delta for ack_ts and enqueue_ts
  4405. *
  4406. * this function will restore the bit29 ~ bit31 3 bits value for
  4407. * buffer_timestamp in wbm2sw ring entry, currently buffer_timestamp only
  4408. * can support 0x7FFF * 1024 us (29 bits), but if the timestamp is >
  4409. * 0x7FFF * 1024 us, bit29~ bit31 will be lost.
  4410. *
  4411. * Return: the adjusted buffer_timestamp value
  4412. */
  4413. static inline
  4414. uint32_t dp_tx_adjust_enqueue_buffer_ts(uint32_t ack_ts,
  4415. uint32_t enqueue_ts,
  4416. uint32_t base_delta_ts)
  4417. {
  4418. uint32_t ack_buffer_ts;
  4419. uint32_t ack_buffer_ts_bit29_31;
  4420. uint32_t adjusted_enqueue_ts;
  4421. /* corresponding buffer_timestamp value when receive OTA Ack */
  4422. ack_buffer_ts = ack_ts - base_delta_ts;
  4423. ack_buffer_ts_bit29_31 = ack_buffer_ts & DP_TX_TS_BIT29_31_MASK;
  4424. /* restore the bit29 ~ bit31 value */
  4425. adjusted_enqueue_ts = ack_buffer_ts_bit29_31 | enqueue_ts;
  4426. /*
  4427. * if actual enqueue_ts value occupied 29 bits only, this enqueue_ts
  4428. * value + real UL delay overflow 29 bits, then 30th bit (bit-29)
  4429. * should not be marked, otherwise extra 0x20000000 us is added to
  4430. * enqueue_ts.
  4431. */
  4432. if (qdf_unlikely(adjusted_enqueue_ts > ack_buffer_ts))
  4433. adjusted_enqueue_ts -= DP_TX_TS_BIT29_SET_VALUE;
  4434. return adjusted_enqueue_ts;
  4435. }
  4436. QDF_STATUS
  4437. dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
  4438. uint32_t delta_tsf,
  4439. uint32_t *delay_us)
  4440. {
  4441. uint32_t buffer_ts;
  4442. uint32_t delay;
  4443. if (!delay_us)
  4444. return QDF_STATUS_E_INVAL;
  4445. /* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
  4446. if (!ts->valid)
  4447. return QDF_STATUS_E_INVAL;
  4448. /* buffer_timestamp is in units of 1024 us and is [31:13] of
  4449. * WBM_RELEASE_RING_4. After left shift 10 bits, it's
  4450. * valid up to 29 bits.
  4451. */
  4452. buffer_ts = ts->buffer_timestamp << 10;
  4453. buffer_ts = dp_tx_adjust_enqueue_buffer_ts(ts->tsf,
  4454. buffer_ts, delta_tsf);
  4455. delay = ts->tsf - buffer_ts - delta_tsf;
  4456. if (qdf_unlikely(delay & 0x80000000)) {
  4457. dp_err_rl("delay = 0x%x (-ve)\n"
  4458. "release_src = %d\n"
  4459. "ppdu_id = 0x%x\n"
  4460. "peer_id = 0x%x\n"
  4461. "tid = 0x%x\n"
  4462. "release_reason = %d\n"
  4463. "tsf = %u (0x%x)\n"
  4464. "buffer_timestamp = %u (0x%x)\n"
  4465. "delta_tsf = %u (0x%x)\n",
  4466. delay, ts->release_src, ts->ppdu_id, ts->peer_id,
  4467. ts->tid, ts->status, ts->tsf, ts->tsf,
  4468. ts->buffer_timestamp, ts->buffer_timestamp,
  4469. delta_tsf, delta_tsf);
  4470. delay = 0;
  4471. goto end;
  4472. }
  4473. delay &= 0x1FFFFFFF; /* mask 29 BITS */
  4474. if (delay > 0x1000000) {
  4475. dp_info_rl("----------------------\n"
  4476. "Tx completion status:\n"
  4477. "----------------------\n"
  4478. "release_src = %d\n"
  4479. "ppdu_id = 0x%x\n"
  4480. "release_reason = %d\n"
  4481. "tsf = %u (0x%x)\n"
  4482. "buffer_timestamp = %u (0x%x)\n"
  4483. "delta_tsf = %u (0x%x)\n",
  4484. ts->release_src, ts->ppdu_id, ts->status,
  4485. ts->tsf, ts->tsf, ts->buffer_timestamp,
  4486. ts->buffer_timestamp, delta_tsf, delta_tsf);
  4487. return QDF_STATUS_E_FAILURE;
  4488. }
  4489. end:
  4490. *delay_us = delay;
  4491. return QDF_STATUS_SUCCESS;
  4492. }
  4493. void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  4494. uint32_t delta_tsf)
  4495. {
  4496. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4497. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  4498. DP_MOD_ID_CDP);
  4499. if (!vdev) {
  4500. dp_err_rl("vdev %d does not exist", vdev_id);
  4501. return;
  4502. }
  4503. vdev->delta_tsf = delta_tsf;
  4504. dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
  4505. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  4506. }
  4507. #endif
  4508. #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
  4509. QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
  4510. uint8_t vdev_id, bool enable)
  4511. {
  4512. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4513. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  4514. DP_MOD_ID_CDP);
  4515. if (!vdev) {
  4516. dp_err_rl("vdev %d does not exist", vdev_id);
  4517. return QDF_STATUS_E_FAILURE;
  4518. }
  4519. qdf_atomic_set(&vdev->ul_delay_report, enable);
  4520. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  4521. return QDF_STATUS_SUCCESS;
  4522. }
  4523. QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  4524. uint32_t *val)
  4525. {
  4526. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4527. struct dp_vdev *vdev;
  4528. uint32_t delay_accum;
  4529. uint32_t pkts_accum;
  4530. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
  4531. if (!vdev) {
  4532. dp_err_rl("vdev %d does not exist", vdev_id);
  4533. return QDF_STATUS_E_FAILURE;
  4534. }
  4535. if (!qdf_atomic_read(&vdev->ul_delay_report)) {
  4536. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  4537. return QDF_STATUS_E_FAILURE;
  4538. }
  4539. /* Average uplink delay based on current accumulated values */
  4540. delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
  4541. pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
  4542. *val = delay_accum / pkts_accum;
  4543. dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
  4544. delay_accum, pkts_accum);
  4545. /* Reset accumulated values to 0 */
  4546. qdf_atomic_set(&vdev->ul_delay_accum, 0);
  4547. qdf_atomic_set(&vdev->ul_pkts_accum, 0);
  4548. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  4549. return QDF_STATUS_SUCCESS;
  4550. }
  4551. static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
  4552. struct hal_tx_completion_status *ts)
  4553. {
  4554. uint32_t ul_delay;
  4555. if (qdf_unlikely(!vdev)) {
  4556. dp_info_rl("vdev is null or delete in progress");
  4557. return;
  4558. }
  4559. if (!qdf_atomic_read(&vdev->ul_delay_report))
  4560. return;
  4561. if (QDF_IS_STATUS_ERROR(dp_tx_compute_hw_delay_us(ts,
  4562. vdev->delta_tsf,
  4563. &ul_delay)))
  4564. return;
  4565. ul_delay /= 1000; /* in unit of ms */
  4566. qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
  4567. qdf_atomic_inc(&vdev->ul_pkts_accum);
  4568. }
  4569. #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
  4570. static inline
  4571. void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
  4572. struct hal_tx_completion_status *ts)
  4573. {
  4574. }
  4575. #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
  4576. void dp_tx_comp_process_tx_status(struct dp_soc *soc,
  4577. struct dp_tx_desc_s *tx_desc,
  4578. struct hal_tx_completion_status *ts,
  4579. struct dp_txrx_peer *txrx_peer,
  4580. uint8_t ring_id)
  4581. {
  4582. uint32_t length;
  4583. qdf_ether_header_t *eh;
  4584. struct dp_vdev *vdev = NULL;
  4585. qdf_nbuf_t nbuf = tx_desc->nbuf;
  4586. enum qdf_dp_tx_rx_status dp_status;
  4587. uint8_t link_id = 0;
  4588. if (!nbuf) {
  4589. dp_info_rl("invalid tx descriptor. nbuf NULL");
  4590. goto out;
  4591. }
  4592. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  4593. length = dp_tx_get_pkt_len(tx_desc);
  4594. dp_status = dp_tx_hw_to_qdf(ts->status);
  4595. DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
  4596. QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
  4597. QDF_TRACE_DEFAULT_PDEV_ID,
  4598. qdf_nbuf_data_addr(nbuf),
  4599. sizeof(qdf_nbuf_data(nbuf)),
  4600. tx_desc->id, ts->status, dp_status));
  4601. dp_tx_comp_debug("-------------------- \n"
  4602. "Tx Completion Stats: \n"
  4603. "-------------------- \n"
  4604. "ack_frame_rssi = %d \n"
  4605. "first_msdu = %d \n"
  4606. "last_msdu = %d \n"
  4607. "msdu_part_of_amsdu = %d \n"
  4608. "rate_stats valid = %d \n"
  4609. "bw = %d \n"
  4610. "pkt_type = %d \n"
  4611. "stbc = %d \n"
  4612. "ldpc = %d \n"
  4613. "sgi = %d \n"
  4614. "mcs = %d \n"
  4615. "ofdma = %d \n"
  4616. "tones_in_ru = %d \n"
  4617. "tsf = %d \n"
  4618. "ppdu_id = %d \n"
  4619. "transmit_cnt = %d \n"
  4620. "tid = %d \n"
  4621. "peer_id = %d\n"
  4622. "tx_status = %d\n"
  4623. "tx_release_source = %d\n",
  4624. ts->ack_frame_rssi, ts->first_msdu,
  4625. ts->last_msdu, ts->msdu_part_of_amsdu,
  4626. ts->valid, ts->bw, ts->pkt_type, ts->stbc,
  4627. ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
  4628. ts->tones_in_ru, ts->tsf, ts->ppdu_id,
  4629. ts->transmit_cnt, ts->tid, ts->peer_id,
  4630. ts->status, ts->release_src);
  4631. /* Update SoC level stats */
  4632. DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
  4633. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  4634. if (!txrx_peer) {
  4635. dp_info_rl("peer is null or deletion in progress");
  4636. DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
  4637. goto out;
  4638. }
  4639. vdev = txrx_peer->vdev;
  4640. link_id = dp_tx_get_link_id_from_ppdu_id(soc, ts, txrx_peer, vdev);
  4641. dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
  4642. dp_tx_update_uplink_delay(soc, vdev, ts);
  4643. /* check tx complete notification */
  4644. if (qdf_nbuf_tx_notify_comp_get(nbuf))
  4645. dp_tx_notify_completion(soc, vdev, tx_desc,
  4646. nbuf, ts->status);
  4647. /* Update per-packet stats for mesh mode */
  4648. if (qdf_unlikely(vdev->mesh_vdev) &&
  4649. !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
  4650. dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
  4651. /* Update peer level stats */
  4652. if (qdf_unlikely(txrx_peer->bss_peer &&
  4653. vdev->opmode == wlan_op_mode_ap)) {
  4654. if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
  4655. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
  4656. length, link_id);
  4657. if (txrx_peer->vdev->tx_encap_type ==
  4658. htt_cmn_pkt_type_ethernet &&
  4659. QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  4660. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  4661. tx.bcast, 1,
  4662. length, link_id);
  4663. }
  4664. }
  4665. } else {
  4666. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length,
  4667. link_id);
  4668. if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
  4669. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success,
  4670. 1, length, link_id);
  4671. if (qdf_unlikely(txrx_peer->in_twt)) {
  4672. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  4673. tx.tx_success_twt,
  4674. 1, length,
  4675. link_id);
  4676. }
  4677. }
  4678. }
  4679. dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id, link_id);
  4680. dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts, ring_id);
  4681. dp_tx_update_peer_jitter_stats(txrx_peer, tx_desc, ts, ring_id);
  4682. dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc,
  4683. ts, ts->tid);
  4684. dp_tx_send_pktlog(soc, vdev->pdev, tx_desc, nbuf, dp_status);
  4685. #ifdef QCA_SUPPORT_RDK_STATS
  4686. if (soc->peerstats_enabled)
  4687. dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
  4688. qdf_ktime_to_ms(tx_desc->timestamp),
  4689. ts->ppdu_id, link_id);
  4690. #endif
  4691. out:
  4692. return;
  4693. }
  4694. #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \
  4695. defined(QCA_ENHANCED_STATS_SUPPORT)
  4696. void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
  4697. uint32_t length, uint8_t tx_status,
  4698. bool update)
  4699. {
  4700. if (update || (!txrx_peer->hw_txrx_stats_en)) {
  4701. DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
  4702. if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
  4703. DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
  4704. }
  4705. }
  4706. #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
  4707. void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
  4708. uint32_t length, uint8_t tx_status,
  4709. bool update)
  4710. {
  4711. if (!txrx_peer->hw_txrx_stats_en) {
  4712. DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
  4713. if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
  4714. DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
  4715. }
  4716. }
  4717. #else
  4718. void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
  4719. uint32_t length, uint8_t tx_status,
  4720. bool update)
  4721. {
  4722. DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
  4723. if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
  4724. DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
  4725. }
  4726. #endif
  4727. /**
  4728. * dp_tx_prefetch_next_nbuf_data(): Prefetch nbuf and nbuf data
  4729. * @next: descriptor of the nrxt buffer
  4730. *
  4731. * Return: none
  4732. */
  4733. #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
  4734. static inline
  4735. void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
  4736. {
  4737. qdf_nbuf_t nbuf = NULL;
  4738. if (next)
  4739. nbuf = next->nbuf;
  4740. if (nbuf)
  4741. qdf_prefetch(nbuf);
  4742. }
  4743. #else
  4744. static inline
  4745. void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
  4746. {
  4747. }
  4748. #endif
  4749. /**
  4750. * dp_tx_mcast_reinject_handler() - Tx reinjected multicast packets handler
  4751. * @soc: core txrx main context
  4752. * @desc: software descriptor
  4753. *
  4754. * Return: true when packet is reinjected
  4755. */
  4756. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  4757. defined(WLAN_MCAST_MLO) && !defined(CONFIG_MLO_SINGLE_DEV)
  4758. static inline bool
  4759. dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  4760. {
  4761. struct dp_vdev *vdev = NULL;
  4762. if (desc->tx_status == HAL_TX_TQM_RR_MULTICAST_DROP) {
  4763. if (!soc->arch_ops.dp_tx_mcast_handler ||
  4764. !soc->arch_ops.dp_tx_is_mcast_primary)
  4765. return false;
  4766. vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
  4767. DP_MOD_ID_REINJECT);
  4768. if (qdf_unlikely(!vdev)) {
  4769. dp_tx_comp_info_rl("Unable to get vdev ref %d",
  4770. desc->id);
  4771. return false;
  4772. }
  4773. if (!(soc->arch_ops.dp_tx_is_mcast_primary(soc, vdev))) {
  4774. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
  4775. return false;
  4776. }
  4777. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  4778. qdf_nbuf_len(desc->nbuf));
  4779. soc->arch_ops.dp_tx_mcast_handler(soc, vdev, desc->nbuf);
  4780. dp_tx_desc_release(desc, desc->pool_id);
  4781. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
  4782. return true;
  4783. }
  4784. return false;
  4785. }
  4786. #else
  4787. static inline bool
  4788. dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  4789. {
  4790. return false;
  4791. }
  4792. #endif
  4793. #ifdef QCA_DP_TX_NBUF_LIST_FREE
  4794. static inline void
  4795. dp_tx_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head)
  4796. {
  4797. qdf_nbuf_queue_head_init(nbuf_queue_head);
  4798. }
  4799. static inline void
  4800. dp_tx_nbuf_dev_queue_free(qdf_nbuf_queue_head_t *nbuf_queue_head,
  4801. struct dp_tx_desc_s *desc)
  4802. {
  4803. qdf_nbuf_t nbuf = NULL;
  4804. nbuf = desc->nbuf;
  4805. if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_FAST))
  4806. qdf_nbuf_dev_queue_head(nbuf_queue_head, nbuf);
  4807. else
  4808. qdf_nbuf_free(nbuf);
  4809. }
  4810. static inline void
  4811. dp_tx_nbuf_dev_queue_free_no_flag(qdf_nbuf_queue_head_t *nbuf_queue_head,
  4812. qdf_nbuf_t nbuf)
  4813. {
  4814. if (!nbuf)
  4815. return;
  4816. if (nbuf->is_from_recycler)
  4817. qdf_nbuf_dev_queue_head(nbuf_queue_head, nbuf);
  4818. else
  4819. qdf_nbuf_free(nbuf);
  4820. }
  4821. static inline void
  4822. dp_tx_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head)
  4823. {
  4824. qdf_nbuf_dev_kfree_list(nbuf_queue_head);
  4825. }
  4826. #else
  4827. static inline void
  4828. dp_tx_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head)
  4829. {
  4830. }
  4831. static inline void
  4832. dp_tx_nbuf_dev_queue_free(qdf_nbuf_queue_head_t *nbuf_queue_head,
  4833. struct dp_tx_desc_s *desc)
  4834. {
  4835. qdf_nbuf_free(desc->nbuf);
  4836. }
  4837. static inline void
  4838. dp_tx_nbuf_dev_queue_free_no_flag(qdf_nbuf_queue_head_t *nbuf_queue_head,
  4839. qdf_nbuf_t nbuf)
  4840. {
  4841. qdf_nbuf_free(nbuf);
  4842. }
  4843. static inline void
  4844. dp_tx_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head)
  4845. {
  4846. }
  4847. #endif
  4848. #ifdef WLAN_SUPPORT_PPEDS
  4849. static inline void
  4850. dp_tx_update_ppeds_tx_comp_stats(struct dp_soc *soc,
  4851. struct dp_txrx_peer *txrx_peer,
  4852. struct hal_tx_completion_status *ts,
  4853. struct dp_tx_desc_s *desc,
  4854. uint8_t ring_id)
  4855. {
  4856. uint8_t link_id = 0;
  4857. struct dp_vdev *vdev = NULL;
  4858. if (qdf_likely(txrx_peer)) {
  4859. if (!(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
  4860. hal_tx_comp_get_status(&desc->comp,
  4861. ts,
  4862. soc->hal_soc);
  4863. vdev = txrx_peer->vdev;
  4864. link_id = dp_tx_get_link_id_from_ppdu_id(soc,
  4865. ts,
  4866. txrx_peer,
  4867. vdev);
  4868. if (link_id < 1 || link_id > DP_MAX_MLO_LINKS)
  4869. link_id = 0;
  4870. dp_tx_update_peer_stats(desc, ts,
  4871. txrx_peer,
  4872. ring_id,
  4873. link_id);
  4874. } else {
  4875. dp_tx_update_peer_basic_stats(txrx_peer, desc->length,
  4876. desc->tx_status, false);
  4877. }
  4878. }
  4879. }
  4880. #else
  4881. static inline void
  4882. dp_tx_update_ppeds_tx_comp_stats(struct dp_soc *soc,
  4883. struct dp_txrx_peer *txrx_peer,
  4884. struct hal_tx_completion_status *ts,
  4885. struct dp_tx_desc_s *desc,
  4886. uint8_t ring_id)
  4887. {
  4888. }
  4889. #endif
  4890. void
  4891. dp_tx_comp_process_desc_list(struct dp_soc *soc,
  4892. struct dp_tx_desc_s *comp_head, uint8_t ring_id)
  4893. {
  4894. struct dp_tx_desc_s *desc;
  4895. struct dp_tx_desc_s *next;
  4896. struct hal_tx_completion_status ts;
  4897. struct dp_txrx_peer *txrx_peer = NULL;
  4898. uint16_t peer_id = DP_INVALID_PEER;
  4899. dp_txrx_ref_handle txrx_ref_handle = NULL;
  4900. qdf_nbuf_queue_head_t h;
  4901. desc = comp_head;
  4902. dp_tx_nbuf_queue_head_init(&h);
  4903. while (desc) {
  4904. next = desc->next;
  4905. dp_tx_prefetch_next_nbuf_data(next);
  4906. if (peer_id != desc->peer_id) {
  4907. if (txrx_peer)
  4908. dp_txrx_peer_unref_delete(txrx_ref_handle,
  4909. DP_MOD_ID_TX_COMP);
  4910. peer_id = desc->peer_id;
  4911. txrx_peer =
  4912. dp_txrx_peer_get_ref_by_id(soc, peer_id,
  4913. &txrx_ref_handle,
  4914. DP_MOD_ID_TX_COMP);
  4915. }
  4916. if (dp_tx_mcast_reinject_handler(soc, desc)) {
  4917. desc = next;
  4918. continue;
  4919. }
  4920. if (desc->flags & DP_TX_DESC_FLAG_PPEDS) {
  4921. qdf_nbuf_t nbuf;
  4922. dp_tx_update_ppeds_tx_comp_stats(soc, txrx_peer, &ts,
  4923. desc, ring_id);
  4924. if (desc->pool_id != DP_TX_PPEDS_POOL_ID) {
  4925. nbuf = desc->nbuf;
  4926. dp_tx_nbuf_dev_queue_free_no_flag(&h, nbuf);
  4927. dp_tx_desc_free(soc, desc, desc->pool_id);
  4928. __dp_tx_outstanding_dec(soc);
  4929. } else {
  4930. nbuf = dp_ppeds_tx_desc_free(soc, desc);
  4931. dp_tx_nbuf_dev_queue_free_no_flag(&h, nbuf);
  4932. }
  4933. desc = next;
  4934. continue;
  4935. }
  4936. if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
  4937. struct dp_pdev *pdev = desc->pdev;
  4938. if (qdf_likely(txrx_peer))
  4939. dp_tx_update_peer_basic_stats(txrx_peer,
  4940. desc->length,
  4941. desc->tx_status,
  4942. false);
  4943. qdf_assert(pdev);
  4944. dp_tx_outstanding_dec(pdev);
  4945. /*
  4946. * Calling a QDF WRAPPER here is creating significant
  4947. * performance impact so avoided the wrapper call here
  4948. */
  4949. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
  4950. desc->id, DP_TX_COMP_UNMAP);
  4951. dp_tx_nbuf_unmap(soc, desc);
  4952. dp_tx_nbuf_dev_queue_free(&h, desc);
  4953. dp_tx_desc_free(soc, desc, desc->pool_id);
  4954. desc = next;
  4955. continue;
  4956. }
  4957. hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
  4958. dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer,
  4959. ring_id);
  4960. dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer);
  4961. dp_tx_desc_release(desc, desc->pool_id);
  4962. desc = next;
  4963. }
  4964. dp_tx_nbuf_dev_kfree_list(&h);
  4965. if (txrx_peer)
  4966. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
  4967. }
  4968. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  4969. static inline
  4970. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
  4971. int max_reap_limit)
  4972. {
  4973. bool limit_hit = false;
  4974. limit_hit =
  4975. (num_reaped >= max_reap_limit) ? true : false;
  4976. if (limit_hit)
  4977. DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
  4978. return limit_hit;
  4979. }
  4980. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  4981. {
  4982. return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
  4983. }
  4984. static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
  4985. {
  4986. struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
  4987. return cfg->tx_comp_loop_pkt_limit;
  4988. }
  4989. #else
  4990. static inline
  4991. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
  4992. int max_reap_limit)
  4993. {
  4994. return false;
  4995. }
  4996. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  4997. {
  4998. return false;
  4999. }
  5000. static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
  5001. {
  5002. return 0;
  5003. }
  5004. #endif
  5005. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  5006. static inline int
  5007. dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
  5008. int *max_reap_limit)
  5009. {
  5010. return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
  5011. max_reap_limit);
  5012. }
  5013. #else
  5014. static inline int
  5015. dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
  5016. int *max_reap_limit)
  5017. {
  5018. return 0;
  5019. }
  5020. #endif
  5021. #ifdef DP_TX_TRACKING
  5022. void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
  5023. {
  5024. if ((tx_desc->magic != DP_TX_MAGIC_PATTERN_INUSE) &&
  5025. (tx_desc->magic != DP_TX_MAGIC_PATTERN_FREE)) {
  5026. dp_err_rl("tx_desc %u is corrupted", tx_desc->id);
  5027. qdf_trigger_self_recovery(NULL, QDF_TX_DESC_LEAK);
  5028. }
  5029. }
  5030. #endif
  5031. #ifndef WLAN_SOFTUMAC_SUPPORT
  5032. uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  5033. hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
  5034. uint32_t quota)
  5035. {
  5036. void *tx_comp_hal_desc;
  5037. void *last_prefetched_hw_desc = NULL;
  5038. struct dp_tx_desc_s *last_prefetched_sw_desc = NULL;
  5039. hal_soc_handle_t hal_soc;
  5040. uint8_t buffer_src;
  5041. struct dp_tx_desc_s *tx_desc = NULL;
  5042. struct dp_tx_desc_s *head_desc = NULL;
  5043. struct dp_tx_desc_s *tail_desc = NULL;
  5044. uint32_t num_processed = 0;
  5045. uint32_t count;
  5046. uint32_t num_avail_for_reap = 0;
  5047. bool force_break = false;
  5048. struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
  5049. int max_reap_limit, ring_near_full;
  5050. uint32_t num_entries;
  5051. DP_HIST_INIT();
  5052. num_entries = hal_srng_get_num_entries(soc->hal_soc, hal_ring_hdl);
  5053. more_data:
  5054. hal_soc = soc->hal_soc;
  5055. /* Re-initialize local variables to be re-used */
  5056. head_desc = NULL;
  5057. tail_desc = NULL;
  5058. count = 0;
  5059. max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
  5060. ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
  5061. &max_reap_limit);
  5062. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  5063. dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
  5064. return 0;
  5065. }
  5066. if (!num_avail_for_reap)
  5067. num_avail_for_reap = hal_srng_dst_num_valid(hal_soc,
  5068. hal_ring_hdl, 0);
  5069. if (num_avail_for_reap >= quota)
  5070. num_avail_for_reap = quota;
  5071. dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
  5072. last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
  5073. hal_ring_hdl,
  5074. num_avail_for_reap);
  5075. /* Find head descriptor from completion ring */
  5076. while (qdf_likely(num_avail_for_reap--)) {
  5077. tx_comp_hal_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
  5078. if (qdf_unlikely(!tx_comp_hal_desc))
  5079. break;
  5080. buffer_src = hal_tx_comp_get_buffer_source(hal_soc,
  5081. tx_comp_hal_desc);
  5082. /* If this buffer was not released by TQM or FW, then it is not
  5083. * Tx completion indication, assert */
  5084. if (qdf_unlikely(buffer_src !=
  5085. HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
  5086. (qdf_unlikely(buffer_src !=
  5087. HAL_TX_COMP_RELEASE_SOURCE_FW))) {
  5088. uint8_t wbm_internal_error;
  5089. dp_err_rl(
  5090. "Tx comp release_src != TQM | FW but from %d",
  5091. buffer_src);
  5092. hal_dump_comp_desc(tx_comp_hal_desc);
  5093. DP_STATS_INC(soc, tx.invalid_release_source, 1);
  5094. /* When WBM sees NULL buffer_addr_info in any of
  5095. * ingress rings it sends an error indication,
  5096. * with wbm_internal_error=1, to a specific ring.
  5097. * The WBM2SW ring used to indicate these errors is
  5098. * fixed in HW, and that ring is being used as Tx
  5099. * completion ring. These errors are not related to
  5100. * Tx completions, and should just be ignored
  5101. */
  5102. wbm_internal_error = hal_get_wbm_internal_error(
  5103. hal_soc,
  5104. tx_comp_hal_desc);
  5105. if (wbm_internal_error) {
  5106. dp_err_rl("Tx comp wbm_internal_error!!");
  5107. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
  5108. if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
  5109. buffer_src)
  5110. dp_handle_wbm_internal_error(
  5111. soc,
  5112. tx_comp_hal_desc,
  5113. hal_tx_comp_get_buffer_type(
  5114. tx_comp_hal_desc));
  5115. } else {
  5116. dp_err_rl("Tx comp wbm_internal_error false");
  5117. DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
  5118. }
  5119. continue;
  5120. }
  5121. soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
  5122. tx_comp_hal_desc,
  5123. &tx_desc);
  5124. if (qdf_unlikely(!tx_desc)) {
  5125. dp_err("unable to retrieve tx_desc!");
  5126. hal_dump_comp_desc(tx_comp_hal_desc);
  5127. DP_STATS_INC(soc, tx.invalid_tx_comp_desc, 1);
  5128. QDF_BUG(0);
  5129. continue;
  5130. }
  5131. tx_desc->buffer_src = buffer_src;
  5132. if (tx_desc->flags & DP_TX_DESC_FLAG_PPEDS)
  5133. goto add_to_pool2;
  5134. /*
  5135. * If the release source is FW, process the HTT status
  5136. */
  5137. if (qdf_unlikely(buffer_src ==
  5138. HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  5139. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  5140. hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
  5141. htt_tx_status);
  5142. /* Collect hw completion contents */
  5143. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  5144. &tx_desc->comp, 1);
  5145. soc->arch_ops.dp_tx_process_htt_completion(
  5146. soc,
  5147. tx_desc,
  5148. htt_tx_status,
  5149. ring_id);
  5150. } else {
  5151. tx_desc->tx_status =
  5152. hal_tx_comp_get_tx_status(tx_comp_hal_desc);
  5153. tx_desc->buffer_src = buffer_src;
  5154. /*
  5155. * If the fast completion mode is enabled extended
  5156. * metadata from descriptor is not copied
  5157. */
  5158. if (qdf_likely(tx_desc->flags &
  5159. DP_TX_DESC_FLAG_SIMPLE))
  5160. goto add_to_pool;
  5161. /*
  5162. * If the descriptor is already freed in vdev_detach,
  5163. * continue to next descriptor
  5164. */
  5165. if (qdf_unlikely
  5166. ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
  5167. !tx_desc->flags)) {
  5168. dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
  5169. tx_desc->id);
  5170. DP_STATS_INC(soc, tx.tx_comp_exception, 1);
  5171. dp_tx_desc_check_corruption(tx_desc);
  5172. continue;
  5173. }
  5174. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  5175. dp_tx_comp_info_rl("pdev in down state %d",
  5176. tx_desc->id);
  5177. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  5178. dp_tx_comp_free_buf(soc, tx_desc, false);
  5179. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  5180. goto next_desc;
  5181. }
  5182. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  5183. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  5184. dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
  5185. tx_desc->flags, tx_desc->id);
  5186. qdf_assert_always(0);
  5187. }
  5188. /* Collect hw completion contents */
  5189. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  5190. &tx_desc->comp, 1);
  5191. add_to_pool:
  5192. DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
  5193. add_to_pool2:
  5194. /* First ring descriptor on the cycle */
  5195. if (!head_desc) {
  5196. head_desc = tx_desc;
  5197. tail_desc = tx_desc;
  5198. }
  5199. tail_desc->next = tx_desc;
  5200. tx_desc->next = NULL;
  5201. tail_desc = tx_desc;
  5202. }
  5203. next_desc:
  5204. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  5205. /*
  5206. * Processed packet count is more than given quota
  5207. * stop to processing
  5208. */
  5209. count++;
  5210. dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
  5211. num_avail_for_reap,
  5212. hal_ring_hdl,
  5213. &last_prefetched_hw_desc,
  5214. &last_prefetched_sw_desc);
  5215. if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
  5216. break;
  5217. }
  5218. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  5219. /* Process the reaped descriptors */
  5220. if (head_desc)
  5221. dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
  5222. DP_STATS_INC(soc, tx.tx_comp[ring_id], count);
  5223. /*
  5224. * If we are processing in near-full condition, there are 3 scenario
  5225. * 1) Ring entries has reached critical state
  5226. * 2) Ring entries are still near high threshold
  5227. * 3) Ring entries are below the safe level
  5228. *
  5229. * One more loop will move the state to normal processing and yield
  5230. */
  5231. if (ring_near_full)
  5232. goto more_data;
  5233. if (dp_tx_comp_enable_eol_data_check(soc)) {
  5234. if (num_processed >= quota)
  5235. force_break = true;
  5236. if (!force_break &&
  5237. hal_srng_dst_peek_sync_locked(soc->hal_soc,
  5238. hal_ring_hdl)) {
  5239. DP_STATS_INC(soc, tx.hp_oos2, 1);
  5240. if (!hif_exec_should_yield(soc->hif_handle,
  5241. int_ctx->dp_intr_id))
  5242. goto more_data;
  5243. num_avail_for_reap =
  5244. hal_srng_dst_num_valid_locked(soc->hal_soc,
  5245. hal_ring_hdl,
  5246. true);
  5247. if (qdf_unlikely(num_entries &&
  5248. (num_avail_for_reap >=
  5249. num_entries >> 1))) {
  5250. DP_STATS_INC(soc, tx.near_full, 1);
  5251. goto more_data;
  5252. }
  5253. }
  5254. }
  5255. DP_TX_HIST_STATS_PER_PDEV();
  5256. return num_processed;
  5257. }
  5258. #endif
  5259. #ifdef FEATURE_WLAN_TDLS
  5260. qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  5261. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
  5262. {
  5263. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  5264. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  5265. DP_MOD_ID_TDLS);
  5266. if (!vdev) {
  5267. dp_err("vdev handle for id %d is NULL", vdev_id);
  5268. return NULL;
  5269. }
  5270. if (tx_spec & OL_TX_SPEC_NO_FREE)
  5271. vdev->is_tdls_frame = true;
  5272. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  5273. return dp_tx_send(soc_hdl, vdev_id, msdu_list);
  5274. }
  5275. #endif
  5276. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
  5277. {
  5278. int pdev_id;
  5279. /*
  5280. * Fill HTT TCL Metadata with Vdev ID and MAC ID
  5281. */
  5282. DP_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
  5283. DP_TCL_METADATA_TYPE_VDEV_BASED);
  5284. DP_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
  5285. vdev->vdev_id);
  5286. pdev_id =
  5287. dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
  5288. vdev->pdev->pdev_id);
  5289. DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
  5290. /*
  5291. * Set HTT Extension Valid bit to 0 by default
  5292. */
  5293. DP_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
  5294. dp_tx_vdev_update_search_flags(vdev);
  5295. return QDF_STATUS_SUCCESS;
  5296. }
  5297. #ifndef FEATURE_WDS
  5298. static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
  5299. {
  5300. return false;
  5301. }
  5302. #endif
  5303. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
  5304. {
  5305. struct dp_soc *soc = vdev->pdev->soc;
  5306. /*
  5307. * Enable both AddrY (SA based search) and AddrX (Da based search)
  5308. * for TDLS link
  5309. *
  5310. * Enable AddrY (SA based search) only for non-WDS STA and
  5311. * ProxySTA VAP (in HKv1) modes.
  5312. *
  5313. * In all other VAP modes, only DA based search should be
  5314. * enabled
  5315. */
  5316. if (vdev->opmode == wlan_op_mode_sta &&
  5317. vdev->tdls_link_connected)
  5318. vdev->hal_desc_addr_search_flags =
  5319. (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
  5320. else if ((vdev->opmode == wlan_op_mode_sta) &&
  5321. !dp_tx_da_search_override(vdev))
  5322. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
  5323. else
  5324. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
  5325. if (vdev->opmode == wlan_op_mode_sta && !vdev->tdls_link_connected)
  5326. vdev->search_type = soc->sta_mode_search_policy;
  5327. else
  5328. vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
  5329. }
  5330. static inline bool
  5331. dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
  5332. struct dp_vdev *vdev,
  5333. struct dp_tx_desc_s *tx_desc)
  5334. {
  5335. if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
  5336. return false;
  5337. /*
  5338. * if vdev is given, then only check whether desc
  5339. * vdev match. if vdev is NULL, then check whether
  5340. * desc pdev match.
  5341. */
  5342. return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
  5343. (tx_desc->pdev == pdev);
  5344. }
  5345. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  5346. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  5347. bool force_free)
  5348. {
  5349. uint8_t i;
  5350. uint32_t j;
  5351. uint32_t num_desc, page_id, offset;
  5352. uint16_t num_desc_per_page;
  5353. struct dp_soc *soc = pdev->soc;
  5354. struct dp_tx_desc_s *tx_desc = NULL;
  5355. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  5356. if (!vdev && !force_free) {
  5357. dp_err("Reset TX desc vdev, Vdev param is required!");
  5358. return;
  5359. }
  5360. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  5361. tx_desc_pool = &soc->tx_desc[i];
  5362. if (!(tx_desc_pool->pool_size) ||
  5363. IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
  5364. !(tx_desc_pool->desc_pages.cacheable_pages))
  5365. continue;
  5366. /*
  5367. * Add flow pool lock protection in case pool is freed
  5368. * due to all tx_desc is recycled when handle TX completion.
  5369. * this is not necessary when do force flush as:
  5370. * a. double lock will happen if dp_tx_desc_release is
  5371. * also trying to acquire it.
  5372. * b. dp interrupt has been disabled before do force TX desc
  5373. * flush in dp_pdev_deinit().
  5374. */
  5375. if (!force_free)
  5376. qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
  5377. num_desc = tx_desc_pool->pool_size;
  5378. num_desc_per_page =
  5379. tx_desc_pool->desc_pages.num_element_per_page;
  5380. for (j = 0; j < num_desc; j++) {
  5381. page_id = j / num_desc_per_page;
  5382. offset = j % num_desc_per_page;
  5383. if (qdf_unlikely(!(tx_desc_pool->
  5384. desc_pages.cacheable_pages)))
  5385. break;
  5386. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  5387. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  5388. /*
  5389. * Free TX desc if force free is
  5390. * required, otherwise only reset vdev
  5391. * in this TX desc.
  5392. */
  5393. if (force_free) {
  5394. tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
  5395. dp_tx_comp_free_buf(soc, tx_desc,
  5396. false);
  5397. dp_tx_desc_release(tx_desc, i);
  5398. } else {
  5399. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  5400. }
  5401. }
  5402. }
  5403. if (!force_free)
  5404. qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
  5405. }
  5406. }
  5407. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  5408. /**
  5409. * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
  5410. *
  5411. * @soc: Handle to DP soc structure
  5412. * @tx_desc: pointer of one TX desc
  5413. * @desc_pool_id: TX Desc pool id
  5414. */
  5415. static inline void
  5416. dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  5417. uint8_t desc_pool_id)
  5418. {
  5419. TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
  5420. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  5421. TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
  5422. }
  5423. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  5424. bool force_free)
  5425. {
  5426. uint8_t i, num_pool;
  5427. uint32_t j;
  5428. uint32_t num_desc, page_id, offset;
  5429. uint16_t num_desc_per_page;
  5430. struct dp_soc *soc = pdev->soc;
  5431. struct dp_tx_desc_s *tx_desc = NULL;
  5432. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  5433. if (!vdev && !force_free) {
  5434. dp_err("Reset TX desc vdev, Vdev param is required!");
  5435. return;
  5436. }
  5437. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  5438. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5439. for (i = 0; i < num_pool; i++) {
  5440. tx_desc_pool = &soc->tx_desc[i];
  5441. if (!tx_desc_pool->desc_pages.cacheable_pages)
  5442. continue;
  5443. num_desc_per_page =
  5444. tx_desc_pool->desc_pages.num_element_per_page;
  5445. for (j = 0; j < num_desc; j++) {
  5446. page_id = j / num_desc_per_page;
  5447. offset = j % num_desc_per_page;
  5448. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  5449. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  5450. if (force_free) {
  5451. tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
  5452. dp_tx_comp_free_buf(soc, tx_desc,
  5453. false);
  5454. dp_tx_desc_release(tx_desc, i);
  5455. } else {
  5456. dp_tx_desc_reset_vdev(soc, tx_desc,
  5457. i);
  5458. }
  5459. }
  5460. }
  5461. }
  5462. }
  5463. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  5464. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
  5465. {
  5466. struct dp_pdev *pdev = vdev->pdev;
  5467. /* Reset TX desc associated to this Vdev as NULL */
  5468. dp_tx_desc_flush(pdev, vdev, false);
  5469. return QDF_STATUS_SUCCESS;
  5470. }
  5471. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  5472. /* Pools will be allocated dynamically */
  5473. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  5474. int num_desc)
  5475. {
  5476. uint8_t i;
  5477. for (i = 0; i < num_pool; i++) {
  5478. qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
  5479. soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
  5480. }
  5481. return QDF_STATUS_SUCCESS;
  5482. }
  5483. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  5484. uint32_t num_desc)
  5485. {
  5486. return QDF_STATUS_SUCCESS;
  5487. }
  5488. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  5489. {
  5490. }
  5491. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  5492. {
  5493. uint8_t i;
  5494. for (i = 0; i < num_pool; i++)
  5495. qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
  5496. }
  5497. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  5498. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  5499. uint32_t num_desc)
  5500. {
  5501. uint8_t i, count;
  5502. /* Allocate software Tx descriptor pools */
  5503. for (i = 0; i < num_pool; i++) {
  5504. if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
  5505. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5506. FL("Tx Desc Pool alloc %d failed %pK"),
  5507. i, soc);
  5508. goto fail;
  5509. }
  5510. }
  5511. return QDF_STATUS_SUCCESS;
  5512. fail:
  5513. for (count = 0; count < i; count++)
  5514. dp_tx_desc_pool_free(soc, count);
  5515. return QDF_STATUS_E_NOMEM;
  5516. }
  5517. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  5518. uint32_t num_desc)
  5519. {
  5520. uint8_t i;
  5521. for (i = 0; i < num_pool; i++) {
  5522. if (dp_tx_desc_pool_init(soc, i, num_desc)) {
  5523. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5524. FL("Tx Desc Pool init %d failed %pK"),
  5525. i, soc);
  5526. return QDF_STATUS_E_NOMEM;
  5527. }
  5528. }
  5529. return QDF_STATUS_SUCCESS;
  5530. }
  5531. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  5532. {
  5533. uint8_t i;
  5534. for (i = 0; i < num_pool; i++)
  5535. dp_tx_desc_pool_deinit(soc, i);
  5536. }
  5537. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  5538. {
  5539. uint8_t i;
  5540. for (i = 0; i < num_pool; i++)
  5541. dp_tx_desc_pool_free(soc, i);
  5542. }
  5543. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  5544. /**
  5545. * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
  5546. * @soc: core txrx main context
  5547. * @num_pool: number of pools
  5548. *
  5549. */
  5550. static void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  5551. {
  5552. dp_tx_tso_desc_pool_deinit(soc, num_pool);
  5553. dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
  5554. }
  5555. /**
  5556. * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
  5557. * @soc: core txrx main context
  5558. * @num_pool: number of pools
  5559. *
  5560. */
  5561. static void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  5562. {
  5563. dp_tx_tso_desc_pool_free(soc, num_pool);
  5564. dp_tx_tso_num_seg_pool_free(soc, num_pool);
  5565. }
  5566. #ifndef WLAN_SOFTUMAC_SUPPORT
  5567. void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
  5568. {
  5569. uint8_t num_pool;
  5570. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5571. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  5572. dp_tx_ext_desc_pool_free(soc, num_pool);
  5573. dp_tx_delete_static_pools(soc, num_pool);
  5574. }
  5575. void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
  5576. {
  5577. uint8_t num_pool;
  5578. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5579. dp_tx_flow_control_deinit(soc);
  5580. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  5581. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  5582. dp_tx_deinit_static_pools(soc, num_pool);
  5583. }
  5584. #else
  5585. void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
  5586. {
  5587. uint8_t num_pool;
  5588. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5589. dp_tx_delete_static_pools(soc, num_pool);
  5590. }
  5591. void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
  5592. {
  5593. uint8_t num_pool;
  5594. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5595. dp_tx_flow_control_deinit(soc);
  5596. dp_tx_deinit_static_pools(soc, num_pool);
  5597. }
  5598. #endif /*WLAN_SOFTUMAC_SUPPORT*/
  5599. /**
  5600. * dp_tx_tso_cmn_desc_pool_alloc() - TSO cmn desc pool allocator
  5601. * @soc: DP soc handle
  5602. * @num_pool: Number of pools
  5603. * @num_desc: Number of descriptors
  5604. *
  5605. * Reserve TSO descriptor buffers
  5606. *
  5607. * Return: QDF_STATUS_E_FAILURE on failure or
  5608. * QDF_STATUS_SUCCESS on success
  5609. */
  5610. static QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
  5611. uint8_t num_pool,
  5612. uint32_t num_desc)
  5613. {
  5614. if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
  5615. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  5616. return QDF_STATUS_E_FAILURE;
  5617. }
  5618. if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
  5619. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  5620. num_pool, soc);
  5621. return QDF_STATUS_E_FAILURE;
  5622. }
  5623. return QDF_STATUS_SUCCESS;
  5624. }
  5625. /**
  5626. * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
  5627. * @soc: DP soc handle
  5628. * @num_pool: Number of pools
  5629. * @num_desc: Number of descriptors
  5630. *
  5631. * Initialize TSO descriptor pools
  5632. *
  5633. * Return: QDF_STATUS_E_FAILURE on failure or
  5634. * QDF_STATUS_SUCCESS on success
  5635. */
  5636. static QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
  5637. uint8_t num_pool,
  5638. uint32_t num_desc)
  5639. {
  5640. if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
  5641. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  5642. return QDF_STATUS_E_FAILURE;
  5643. }
  5644. if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
  5645. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  5646. num_pool, soc);
  5647. return QDF_STATUS_E_FAILURE;
  5648. }
  5649. return QDF_STATUS_SUCCESS;
  5650. }
  5651. #ifndef WLAN_SOFTUMAC_SUPPORT
  5652. QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
  5653. {
  5654. uint8_t num_pool;
  5655. uint32_t num_desc;
  5656. uint32_t num_ext_desc;
  5657. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5658. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  5659. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  5660. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  5661. "%s Tx Desc Alloc num_pool = %d, descs = %d",
  5662. __func__, num_pool, num_desc);
  5663. if ((num_pool > MAX_TXDESC_POOLS) ||
  5664. (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
  5665. goto fail1;
  5666. if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
  5667. goto fail1;
  5668. if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
  5669. goto fail2;
  5670. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  5671. return QDF_STATUS_SUCCESS;
  5672. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  5673. goto fail3;
  5674. return QDF_STATUS_SUCCESS;
  5675. fail3:
  5676. dp_tx_ext_desc_pool_free(soc, num_pool);
  5677. fail2:
  5678. dp_tx_delete_static_pools(soc, num_pool);
  5679. fail1:
  5680. return QDF_STATUS_E_RESOURCES;
  5681. }
  5682. QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
  5683. {
  5684. uint8_t num_pool;
  5685. uint32_t num_desc;
  5686. uint32_t num_ext_desc;
  5687. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5688. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  5689. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  5690. if (dp_tx_init_static_pools(soc, num_pool, num_desc))
  5691. goto fail1;
  5692. if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
  5693. goto fail2;
  5694. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  5695. return QDF_STATUS_SUCCESS;
  5696. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  5697. goto fail3;
  5698. dp_tx_flow_control_init(soc);
  5699. soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
  5700. return QDF_STATUS_SUCCESS;
  5701. fail3:
  5702. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  5703. fail2:
  5704. dp_tx_deinit_static_pools(soc, num_pool);
  5705. fail1:
  5706. return QDF_STATUS_E_RESOURCES;
  5707. }
  5708. #else
  5709. QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
  5710. {
  5711. uint8_t num_pool;
  5712. uint32_t num_desc;
  5713. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5714. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  5715. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  5716. "%s Tx Desc Alloc num_pool = %d, descs = %d",
  5717. __func__, num_pool, num_desc);
  5718. if ((num_pool > MAX_TXDESC_POOLS) ||
  5719. (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
  5720. return QDF_STATUS_E_RESOURCES;
  5721. if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
  5722. return QDF_STATUS_E_RESOURCES;
  5723. return QDF_STATUS_SUCCESS;
  5724. }
  5725. QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
  5726. {
  5727. uint8_t num_pool;
  5728. uint32_t num_desc;
  5729. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5730. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  5731. if (dp_tx_init_static_pools(soc, num_pool, num_desc))
  5732. return QDF_STATUS_E_RESOURCES;
  5733. dp_tx_flow_control_init(soc);
  5734. soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
  5735. return QDF_STATUS_SUCCESS;
  5736. }
  5737. #endif
  5738. QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
  5739. {
  5740. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  5741. uint8_t num_pool;
  5742. uint32_t num_ext_desc;
  5743. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5744. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  5745. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  5746. return QDF_STATUS_E_FAILURE;
  5747. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  5748. return QDF_STATUS_E_FAILURE;
  5749. return QDF_STATUS_SUCCESS;
  5750. }
  5751. QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
  5752. {
  5753. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  5754. uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5755. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  5756. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  5757. return QDF_STATUS_SUCCESS;
  5758. }
  5759. #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
  5760. void dp_pkt_add_timestamp(struct dp_vdev *vdev,
  5761. enum qdf_pkt_timestamp_index index, uint64_t time,
  5762. qdf_nbuf_t nbuf)
  5763. {
  5764. if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) {
  5765. uint64_t tsf_time;
  5766. if (vdev->get_tsf_time) {
  5767. vdev->get_tsf_time(vdev->osif_vdev, time, &tsf_time);
  5768. qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time);
  5769. }
  5770. }
  5771. }
  5772. void dp_pkt_get_timestamp(uint64_t *time)
  5773. {
  5774. if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled()))
  5775. *time = qdf_get_log_timestamp();
  5776. }
  5777. #endif
  5778. #ifdef QCA_MULTIPASS_SUPPORT
  5779. void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
  5780. struct dp_tx_msdu_info_s *msdu_info,
  5781. uint16_t group_key)
  5782. {
  5783. struct htt_tx_msdu_desc_ext2_t *meta_data =
  5784. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  5785. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  5786. /*
  5787. * When attempting to send a multicast packet with multi-passphrase,
  5788. * host shall add HTT EXT meta data "struct htt_tx_msdu_desc_ext2_t"
  5789. * ref htt.h indicating the group_id field in "key_flags" also having
  5790. * "valid_key_flags" as 1. Assign “key_flags = group_key_ix”.
  5791. */
  5792. HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info->meta_data[0],
  5793. 1);
  5794. HTT_TX_MSDU_EXT2_DESC_KEY_FLAGS_SET(msdu_info->meta_data[2], group_key);
  5795. }
  5796. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  5797. defined(WLAN_MCAST_MLO)
  5798. /**
  5799. * dp_tx_need_mcast_reinject() - If frame needs to be processed in reinject path
  5800. * @vdev: DP vdev handle
  5801. *
  5802. * Return: true if reinject handling is required else false
  5803. */
  5804. static inline bool
  5805. dp_tx_need_mcast_reinject(struct dp_vdev *vdev)
  5806. {
  5807. if (vdev->mlo_vdev && vdev->opmode == wlan_op_mode_ap)
  5808. return true;
  5809. return false;
  5810. }
  5811. #else
  5812. static inline bool
  5813. dp_tx_need_mcast_reinject(struct dp_vdev *vdev)
  5814. {
  5815. return false;
  5816. }
  5817. #endif
  5818. /**
  5819. * dp_tx_need_multipass_process() - If frame needs multipass phrase processing
  5820. * @soc: dp soc handle
  5821. * @vdev: DP vdev handle
  5822. * @buf: frame
  5823. * @vlan_id: vlan id of frame
  5824. *
  5825. * Return: whether peer is special or classic
  5826. */
  5827. static
  5828. uint8_t dp_tx_need_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
  5829. qdf_nbuf_t buf, uint16_t *vlan_id)
  5830. {
  5831. struct dp_txrx_peer *txrx_peer = NULL;
  5832. struct dp_peer *peer = NULL;
  5833. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
  5834. struct vlan_ethhdr *veh = NULL;
  5835. bool not_vlan = ((vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
  5836. (htons(eh->ether_type) != ETH_P_8021Q));
  5837. if (qdf_unlikely(not_vlan))
  5838. return DP_VLAN_UNTAGGED;
  5839. veh = (struct vlan_ethhdr *)eh;
  5840. *vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
  5841. if (qdf_unlikely(DP_FRAME_IS_MULTICAST((eh)->ether_dhost))) {
  5842. /* look for handling of multicast packets in reinject path */
  5843. if (dp_tx_need_mcast_reinject(vdev))
  5844. return DP_VLAN_UNTAGGED;
  5845. qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
  5846. TAILQ_FOREACH(txrx_peer, &vdev->mpass_peer_list,
  5847. mpass_peer_list_elem) {
  5848. if (*vlan_id == txrx_peer->vlan_id) {
  5849. qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
  5850. return DP_VLAN_TAGGED_MULTICAST;
  5851. }
  5852. }
  5853. qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
  5854. return DP_VLAN_UNTAGGED;
  5855. }
  5856. peer = dp_peer_find_hash_find(soc, eh->ether_dhost, 0, DP_VDEV_ALL,
  5857. DP_MOD_ID_TX_MULTIPASS);
  5858. if (qdf_unlikely(!peer))
  5859. return DP_VLAN_UNTAGGED;
  5860. /*
  5861. * Do not drop the frame when vlan_id doesn't match.
  5862. * Send the frame as it is.
  5863. */
  5864. if (*vlan_id == peer->txrx_peer->vlan_id) {
  5865. dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
  5866. return DP_VLAN_TAGGED_UNICAST;
  5867. }
  5868. dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
  5869. return DP_VLAN_UNTAGGED;
  5870. }
  5871. #ifndef WLAN_REPEATER_NOT_SUPPORTED
  5872. static inline void
  5873. dp_tx_multipass_send_pkt_to_repeater(struct dp_soc *soc, struct dp_vdev *vdev,
  5874. qdf_nbuf_t nbuf,
  5875. struct dp_tx_msdu_info_s *msdu_info)
  5876. {
  5877. qdf_nbuf_t nbuf_copy = NULL;
  5878. /* AP can have classic clients, special clients &
  5879. * classic repeaters.
  5880. * 1. Classic clients & special client:
  5881. * Remove vlan header, find corresponding group key
  5882. * index, fill in metaheader and enqueue multicast
  5883. * frame to TCL.
  5884. * 2. Classic repeater:
  5885. * Pass through to classic repeater with vlan tag
  5886. * intact without any group key index. Hardware
  5887. * will know which key to use to send frame to
  5888. * repeater.
  5889. */
  5890. nbuf_copy = qdf_nbuf_copy(nbuf);
  5891. /*
  5892. * Send multicast frame to special peers even
  5893. * if pass through to classic repeater fails.
  5894. */
  5895. if (nbuf_copy) {
  5896. struct dp_tx_msdu_info_s msdu_info_copy;
  5897. qdf_mem_zero(&msdu_info_copy, sizeof(msdu_info_copy));
  5898. msdu_info_copy.tid = HTT_TX_EXT_TID_INVALID;
  5899. HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info_copy.meta_data[0], 1);
  5900. nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy,
  5901. &msdu_info_copy,
  5902. HTT_INVALID_PEER, NULL);
  5903. if (nbuf_copy) {
  5904. qdf_nbuf_free(nbuf_copy);
  5905. dp_info_rl("nbuf_copy send failed");
  5906. }
  5907. }
  5908. }
  5909. #endif
  5910. bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
  5911. qdf_nbuf_t nbuf,
  5912. struct dp_tx_msdu_info_s *msdu_info)
  5913. {
  5914. uint16_t vlan_id = 0;
  5915. uint16_t group_key = 0;
  5916. uint8_t is_spcl_peer = DP_VLAN_UNTAGGED;
  5917. if (HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->meta_data[0]))
  5918. return true;
  5919. is_spcl_peer = dp_tx_need_multipass_process(soc, vdev, nbuf, &vlan_id);
  5920. if ((is_spcl_peer != DP_VLAN_TAGGED_MULTICAST) &&
  5921. (is_spcl_peer != DP_VLAN_TAGGED_UNICAST))
  5922. return true;
  5923. if (is_spcl_peer == DP_VLAN_TAGGED_UNICAST) {
  5924. dp_tx_remove_vlan_tag(vdev, nbuf);
  5925. return true;
  5926. }
  5927. dp_tx_multipass_send_pkt_to_repeater(soc, vdev, nbuf, msdu_info);
  5928. group_key = vdev->iv_vlan_map[vlan_id];
  5929. /*
  5930. * If group key is not installed, drop the frame.
  5931. */
  5932. if (!group_key)
  5933. return false;
  5934. dp_tx_remove_vlan_tag(vdev, nbuf);
  5935. dp_tx_add_groupkey_metadata(vdev, msdu_info, group_key);
  5936. msdu_info->exception_fw = 1;
  5937. return true;
  5938. }
  5939. #endif /* QCA_MULTIPASS_SUPPORT */