dp_tx.c 177 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "htt.h"
  20. #include "dp_htt.h"
  21. #include "hal_hw_headers.h"
  22. #include "dp_tx.h"
  23. #include "dp_tx_desc.h"
  24. #include "dp_peer.h"
  25. #include "dp_types.h"
  26. #include "hal_tx.h"
  27. #include "qdf_mem.h"
  28. #include "qdf_nbuf.h"
  29. #include "qdf_net_types.h"
  30. #include "qdf_module.h"
  31. #include <wlan_cfg.h>
  32. #include "dp_ipa.h"
  33. #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
  34. #include "if_meta_hdr.h"
  35. #endif
  36. #include "enet.h"
  37. #include "dp_internal.h"
  38. #ifdef ATH_SUPPORT_IQUE
  39. #include "dp_txrx_me.h"
  40. #endif
  41. #include "dp_hist.h"
  42. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  43. #include <wlan_dp_swlm.h>
  44. #endif
  45. #ifdef WIFI_MONITOR_SUPPORT
  46. #include <dp_mon.h>
  47. #endif
  48. #ifdef FEATURE_WDS
  49. #include "dp_txrx_wds.h"
  50. #endif
  51. #include "cdp_txrx_cmn_reg.h"
  52. #ifdef CONFIG_SAWF
  53. #include <dp_sawf.h>
  54. #endif
  55. /* Flag to skip CCE classify when mesh or tid override enabled */
  56. #define DP_TX_SKIP_CCE_CLASSIFY \
  57. (DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
  58. /* TODO Add support in TSO */
  59. #define DP_DESC_NUM_FRAG(x) 0
  60. /* disable TQM_BYPASS */
  61. #define TQM_BYPASS_WAR 0
  62. /* invalid peer id for reinject*/
  63. #define DP_INVALID_PEER 0XFFFE
  64. #define DP_RETRY_COUNT 7
  65. #ifdef WLAN_PEER_JITTER
  66. #define DP_AVG_JITTER_WEIGHT_DENOM 4
  67. #define DP_AVG_DELAY_WEIGHT_DENOM 3
  68. #endif
  69. #ifdef QCA_DP_TX_FW_METADATA_V2
  70. #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
  71. HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
  72. #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
  73. HTT_TX_TCL_METADATA_V2_VALID_HTT_SET(_var, _val)
  74. #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
  75. HTT_TX_TCL_METADATA_TYPE_V2_SET(_var, _val)
  76. #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
  77. HTT_TX_TCL_METADATA_V2_HOST_INSPECTED_SET(_var, _val)
  78. #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
  79. HTT_TX_TCL_METADATA_V2_PEER_ID_SET(_var, _val)
  80. #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
  81. HTT_TX_TCL_METADATA_V2_VDEV_ID_SET(_var, _val)
  82. #define DP_TCL_METADATA_TYPE_PEER_BASED \
  83. HTT_TCL_METADATA_V2_TYPE_PEER_BASED
  84. #define DP_TCL_METADATA_TYPE_VDEV_BASED \
  85. HTT_TCL_METADATA_V2_TYPE_VDEV_BASED
  86. #else
  87. #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
  88. HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
  89. #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
  90. HTT_TX_TCL_METADATA_VALID_HTT_SET(_var, _val)
  91. #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
  92. HTT_TX_TCL_METADATA_TYPE_SET(_var, _val)
  93. #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
  94. HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val)
  95. #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
  96. HTT_TX_TCL_METADATA_PEER_ID_SET(_var, _val)
  97. #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
  98. HTT_TX_TCL_METADATA_VDEV_ID_SET(_var, _val)
  99. #define DP_TCL_METADATA_TYPE_PEER_BASED \
  100. HTT_TCL_METADATA_TYPE_PEER_BASED
  101. #define DP_TCL_METADATA_TYPE_VDEV_BASED \
  102. HTT_TCL_METADATA_TYPE_VDEV_BASED
  103. #endif
  104. /*mapping between hal encrypt type and cdp_sec_type*/
  105. uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
  106. HAL_TX_ENCRYPT_TYPE_WEP_128,
  107. HAL_TX_ENCRYPT_TYPE_WEP_104,
  108. HAL_TX_ENCRYPT_TYPE_WEP_40,
  109. HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
  110. HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
  111. HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
  112. HAL_TX_ENCRYPT_TYPE_WAPI,
  113. HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
  114. HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
  115. HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
  116. HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
  117. qdf_export_symbol(sec_type_map);
  118. #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
  119. static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
  120. {
  121. enum dp_tx_event_type type;
  122. if (flags & DP_TX_DESC_FLAG_FLUSH)
  123. type = DP_TX_DESC_FLUSH;
  124. else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
  125. type = DP_TX_COMP_UNMAP_ERR;
  126. else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
  127. type = DP_TX_COMP_UNMAP;
  128. else
  129. type = DP_TX_DESC_UNMAP;
  130. return type;
  131. }
  132. static inline void
  133. dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
  134. qdf_nbuf_t skb, uint32_t sw_cookie,
  135. enum dp_tx_event_type type)
  136. {
  137. struct dp_tx_tcl_history *tx_tcl_history = &soc->tx_tcl_history;
  138. struct dp_tx_comp_history *tx_comp_history = &soc->tx_comp_history;
  139. struct dp_tx_desc_event *entry;
  140. uint32_t idx;
  141. uint16_t slot;
  142. switch (type) {
  143. case DP_TX_COMP_UNMAP:
  144. case DP_TX_COMP_UNMAP_ERR:
  145. case DP_TX_COMP_MSDU_EXT:
  146. if (qdf_unlikely(!tx_comp_history->allocated))
  147. return;
  148. dp_get_frag_hist_next_atomic_idx(&tx_comp_history->index, &idx,
  149. &slot,
  150. DP_TX_COMP_HIST_SLOT_SHIFT,
  151. DP_TX_COMP_HIST_PER_SLOT_MAX,
  152. DP_TX_COMP_HISTORY_SIZE);
  153. entry = &tx_comp_history->entry[slot][idx];
  154. break;
  155. case DP_TX_DESC_MAP:
  156. case DP_TX_DESC_UNMAP:
  157. case DP_TX_DESC_COOKIE:
  158. case DP_TX_DESC_FLUSH:
  159. if (qdf_unlikely(!tx_tcl_history->allocated))
  160. return;
  161. dp_get_frag_hist_next_atomic_idx(&tx_tcl_history->index, &idx,
  162. &slot,
  163. DP_TX_TCL_HIST_SLOT_SHIFT,
  164. DP_TX_TCL_HIST_PER_SLOT_MAX,
  165. DP_TX_TCL_HISTORY_SIZE);
  166. entry = &tx_tcl_history->entry[slot][idx];
  167. break;
  168. default:
  169. dp_info_rl("Invalid dp_tx_event_type: %d", type);
  170. return;
  171. }
  172. entry->skb = skb;
  173. entry->paddr = paddr;
  174. entry->sw_cookie = sw_cookie;
  175. entry->type = type;
  176. entry->ts = qdf_get_log_timestamp();
  177. }
  178. static inline void
  179. dp_tx_tso_seg_history_add(struct dp_soc *soc,
  180. struct qdf_tso_seg_elem_t *tso_seg,
  181. qdf_nbuf_t skb, uint32_t sw_cookie,
  182. enum dp_tx_event_type type)
  183. {
  184. int i;
  185. for (i = 1; i < tso_seg->seg.num_frags; i++) {
  186. dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
  187. skb, sw_cookie, type);
  188. }
  189. if (!tso_seg->next)
  190. dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
  191. skb, 0xFFFFFFFF, type);
  192. }
  193. static inline void
  194. dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
  195. qdf_nbuf_t skb, uint32_t sw_cookie,
  196. enum dp_tx_event_type type)
  197. {
  198. struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
  199. uint32_t num_segs = tso_info.num_segs;
  200. while (num_segs) {
  201. dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
  202. curr_seg = curr_seg->next;
  203. num_segs--;
  204. }
  205. }
  206. #else
  207. static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
  208. {
  209. return DP_TX_DESC_INVAL_EVT;
  210. }
  211. static inline void
  212. dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
  213. qdf_nbuf_t skb, uint32_t sw_cookie,
  214. enum dp_tx_event_type type)
  215. {
  216. }
  217. static inline void
  218. dp_tx_tso_seg_history_add(struct dp_soc *soc,
  219. struct qdf_tso_seg_elem_t *tso_seg,
  220. qdf_nbuf_t skb, uint32_t sw_cookie,
  221. enum dp_tx_event_type type)
  222. {
  223. }
  224. static inline void
  225. dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
  226. qdf_nbuf_t skb, uint32_t sw_cookie,
  227. enum dp_tx_event_type type)
  228. {
  229. }
  230. #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
  231. static int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc);
  232. /**
  233. * dp_is_tput_high() - Check if throughput is high
  234. *
  235. * @soc - core txrx main context
  236. *
  237. * The current function is based of the RTPM tput policy variable where RTPM is
  238. * avoided based on throughput.
  239. */
  240. static inline int dp_is_tput_high(struct dp_soc *soc)
  241. {
  242. return dp_get_rtpm_tput_policy_requirement(soc);
  243. }
  244. #if defined(FEATURE_TSO)
  245. /**
  246. * dp_tx_tso_unmap_segment() - Unmap TSO segment
  247. *
  248. * @soc - core txrx main context
  249. * @seg_desc - tso segment descriptor
  250. * @num_seg_desc - tso number segment descriptor
  251. */
  252. static void dp_tx_tso_unmap_segment(
  253. struct dp_soc *soc,
  254. struct qdf_tso_seg_elem_t *seg_desc,
  255. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  256. {
  257. TSO_DEBUG("%s: Unmap the tso segment", __func__);
  258. if (qdf_unlikely(!seg_desc)) {
  259. DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
  260. __func__, __LINE__);
  261. qdf_assert(0);
  262. } else if (qdf_unlikely(!num_seg_desc)) {
  263. DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
  264. __func__, __LINE__);
  265. qdf_assert(0);
  266. } else {
  267. bool is_last_seg;
  268. /* no tso segment left to do dma unmap */
  269. if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
  270. return;
  271. is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
  272. true : false;
  273. qdf_nbuf_unmap_tso_segment(soc->osdev,
  274. seg_desc, is_last_seg);
  275. num_seg_desc->num_seg.tso_cmn_num_seg--;
  276. }
  277. }
  278. /**
  279. * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
  280. * back to the freelist
  281. *
  282. * @soc - soc device handle
  283. * @tx_desc - Tx software descriptor
  284. */
  285. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  286. struct dp_tx_desc_s *tx_desc)
  287. {
  288. TSO_DEBUG("%s: Free the tso descriptor", __func__);
  289. if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) {
  290. dp_tx_err("SO desc is NULL!");
  291. qdf_assert(0);
  292. } else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) {
  293. dp_tx_err("TSO num desc is NULL!");
  294. qdf_assert(0);
  295. } else {
  296. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  297. (struct qdf_tso_num_seg_elem_t *)tx_desc->
  298. msdu_ext_desc->tso_num_desc;
  299. /* Add the tso num segment into the free list */
  300. if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
  301. dp_tso_num_seg_free(soc, tx_desc->pool_id,
  302. tx_desc->msdu_ext_desc->
  303. tso_num_desc);
  304. tx_desc->msdu_ext_desc->tso_num_desc = NULL;
  305. DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
  306. }
  307. /* Add the tso segment into the free list*/
  308. dp_tx_tso_desc_free(soc,
  309. tx_desc->pool_id, tx_desc->msdu_ext_desc->
  310. tso_desc);
  311. tx_desc->msdu_ext_desc->tso_desc = NULL;
  312. }
  313. }
  314. #else
  315. static void dp_tx_tso_unmap_segment(
  316. struct dp_soc *soc,
  317. struct qdf_tso_seg_elem_t *seg_desc,
  318. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  319. {
  320. }
  321. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  322. struct dp_tx_desc_s *tx_desc)
  323. {
  324. }
  325. #endif
  326. /**
  327. * dp_tx_desc_release() - Release Tx Descriptor
  328. * @tx_desc : Tx Descriptor
  329. * @desc_pool_id: Descriptor Pool ID
  330. *
  331. * Deallocate all resources attached to Tx descriptor and free the Tx
  332. * descriptor.
  333. *
  334. * Return:
  335. */
  336. void
  337. dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
  338. {
  339. struct dp_pdev *pdev = tx_desc->pdev;
  340. struct dp_soc *soc;
  341. uint8_t comp_status = 0;
  342. qdf_assert(pdev);
  343. soc = pdev->soc;
  344. dp_tx_outstanding_dec(pdev);
  345. if (tx_desc->msdu_ext_desc) {
  346. if (tx_desc->frm_type == dp_tx_frm_tso)
  347. dp_tx_tso_desc_release(soc, tx_desc);
  348. if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
  349. dp_tx_me_free_buf(tx_desc->pdev,
  350. tx_desc->msdu_ext_desc->me_buffer);
  351. dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
  352. }
  353. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  354. qdf_atomic_dec(&soc->num_tx_exception);
  355. if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
  356. tx_desc->buffer_src)
  357. comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
  358. soc->hal_soc);
  359. else
  360. comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
  361. dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
  362. tx_desc->id, comp_status,
  363. qdf_atomic_read(&pdev->num_tx_outstanding));
  364. dp_tx_desc_free(soc, tx_desc, desc_pool_id);
  365. return;
  366. }
  367. /**
  368. * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
  369. * @vdev: DP vdev Handle
  370. * @nbuf: skb
  371. * @msdu_info: msdu_info required to create HTT metadata
  372. *
  373. * Prepares and fills HTT metadata in the frame pre-header for special frames
  374. * that should be transmitted using varying transmit parameters.
  375. * There are 2 VDEV modes that currently needs this special metadata -
  376. * 1) Mesh Mode
  377. * 2) DSRC Mode
  378. *
  379. * Return: HTT metadata size
  380. *
  381. */
  382. static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  383. struct dp_tx_msdu_info_s *msdu_info)
  384. {
  385. uint32_t *meta_data = msdu_info->meta_data;
  386. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  387. (struct htt_tx_msdu_desc_ext2_t *) meta_data;
  388. uint8_t htt_desc_size;
  389. /* Size rounded of multiple of 8 bytes */
  390. uint8_t htt_desc_size_aligned;
  391. uint8_t *hdr = NULL;
  392. /*
  393. * Metadata - HTT MSDU Extension header
  394. */
  395. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  396. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  397. if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
  398. HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
  399. meta_data[0]) ||
  400. msdu_info->exception_fw) {
  401. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
  402. htt_desc_size_aligned)) {
  403. nbuf = qdf_nbuf_realloc_headroom(nbuf,
  404. htt_desc_size_aligned);
  405. if (!nbuf) {
  406. /*
  407. * qdf_nbuf_realloc_headroom won't do skb_clone
  408. * as skb_realloc_headroom does. so, no free is
  409. * needed here.
  410. */
  411. DP_STATS_INC(vdev,
  412. tx_i.dropped.headroom_insufficient,
  413. 1);
  414. qdf_print(" %s[%d] skb_realloc_headroom failed",
  415. __func__, __LINE__);
  416. return 0;
  417. }
  418. }
  419. /* Fill and add HTT metaheader */
  420. hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
  421. if (!hdr) {
  422. dp_tx_err("Error in filling HTT metadata");
  423. return 0;
  424. }
  425. qdf_mem_copy(hdr, desc_ext, htt_desc_size);
  426. } else if (vdev->opmode == wlan_op_mode_ocb) {
  427. /* Todo - Add support for DSRC */
  428. }
  429. return htt_desc_size_aligned;
  430. }
  431. /**
  432. * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
  433. * @tso_seg: TSO segment to process
  434. * @ext_desc: Pointer to MSDU extension descriptor
  435. *
  436. * Return: void
  437. */
  438. #if defined(FEATURE_TSO)
  439. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  440. void *ext_desc)
  441. {
  442. uint8_t num_frag;
  443. uint32_t tso_flags;
  444. /*
  445. * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
  446. * tcp_flag_mask
  447. *
  448. * Checksum enable flags are set in TCL descriptor and not in Extension
  449. * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
  450. */
  451. tso_flags = *(uint32_t *) &tso_seg->tso_flags;
  452. hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
  453. hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
  454. tso_seg->tso_flags.ip_len);
  455. hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
  456. hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
  457. for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
  458. uint32_t lo = 0;
  459. uint32_t hi = 0;
  460. qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
  461. (tso_seg->tso_frags[num_frag].length));
  462. qdf_dmaaddr_to_32s(
  463. tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
  464. hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
  465. tso_seg->tso_frags[num_frag].length);
  466. }
  467. return;
  468. }
  469. #else
  470. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  471. void *ext_desc)
  472. {
  473. return;
  474. }
  475. #endif
  476. #if defined(FEATURE_TSO)
  477. /**
  478. * dp_tx_free_tso_seg_list() - Loop through the tso segments
  479. * allocated and free them
  480. *
  481. * @soc: soc handle
  482. * @free_seg: list of tso segments
  483. * @msdu_info: msdu descriptor
  484. *
  485. * Return - void
  486. */
  487. static void dp_tx_free_tso_seg_list(
  488. struct dp_soc *soc,
  489. struct qdf_tso_seg_elem_t *free_seg,
  490. struct dp_tx_msdu_info_s *msdu_info)
  491. {
  492. struct qdf_tso_seg_elem_t *next_seg;
  493. while (free_seg) {
  494. next_seg = free_seg->next;
  495. dp_tx_tso_desc_free(soc,
  496. msdu_info->tx_queue.desc_pool_id,
  497. free_seg);
  498. free_seg = next_seg;
  499. }
  500. }
  501. /**
  502. * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
  503. * allocated and free them
  504. *
  505. * @soc: soc handle
  506. * @free_num_seg: list of tso number segments
  507. * @msdu_info: msdu descriptor
  508. * Return - void
  509. */
  510. static void dp_tx_free_tso_num_seg_list(
  511. struct dp_soc *soc,
  512. struct qdf_tso_num_seg_elem_t *free_num_seg,
  513. struct dp_tx_msdu_info_s *msdu_info)
  514. {
  515. struct qdf_tso_num_seg_elem_t *next_num_seg;
  516. while (free_num_seg) {
  517. next_num_seg = free_num_seg->next;
  518. dp_tso_num_seg_free(soc,
  519. msdu_info->tx_queue.desc_pool_id,
  520. free_num_seg);
  521. free_num_seg = next_num_seg;
  522. }
  523. }
  524. /**
  525. * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
  526. * do dma unmap for each segment
  527. *
  528. * @soc: soc handle
  529. * @free_seg: list of tso segments
  530. * @num_seg_desc: tso number segment descriptor
  531. *
  532. * Return - void
  533. */
  534. static void dp_tx_unmap_tso_seg_list(
  535. struct dp_soc *soc,
  536. struct qdf_tso_seg_elem_t *free_seg,
  537. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  538. {
  539. struct qdf_tso_seg_elem_t *next_seg;
  540. if (qdf_unlikely(!num_seg_desc)) {
  541. DP_TRACE(ERROR, "TSO number seg desc is NULL!");
  542. return;
  543. }
  544. while (free_seg) {
  545. next_seg = free_seg->next;
  546. dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
  547. free_seg = next_seg;
  548. }
  549. }
  550. #ifdef FEATURE_TSO_STATS
  551. /**
  552. * dp_tso_get_stats_idx: Retrieve the tso packet id
  553. * @pdev - pdev handle
  554. *
  555. * Return: id
  556. */
  557. static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
  558. {
  559. uint32_t stats_idx;
  560. stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
  561. % CDP_MAX_TSO_PACKETS);
  562. return stats_idx;
  563. }
  564. #else
  565. static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
  566. {
  567. return 0;
  568. }
  569. #endif /* FEATURE_TSO_STATS */
  570. /**
  571. * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
  572. * free the tso segments descriptor and
  573. * tso num segments descriptor
  574. *
  575. * @soc: soc handle
  576. * @msdu_info: msdu descriptor
  577. * @tso_seg_unmap: flag to show if dma unmap is necessary
  578. *
  579. * Return - void
  580. */
  581. static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
  582. struct dp_tx_msdu_info_s *msdu_info,
  583. bool tso_seg_unmap)
  584. {
  585. struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
  586. struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
  587. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  588. tso_info->tso_num_seg_list;
  589. /* do dma unmap for each segment */
  590. if (tso_seg_unmap)
  591. dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
  592. /* free all tso number segment descriptor though looks only have 1 */
  593. dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
  594. /* free all tso segment descriptor */
  595. dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
  596. }
  597. /**
  598. * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
  599. * @vdev: virtual device handle
  600. * @msdu: network buffer
  601. * @msdu_info: meta data associated with the msdu
  602. *
  603. * Return: QDF_STATUS_SUCCESS success
  604. */
  605. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  606. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  607. {
  608. struct qdf_tso_seg_elem_t *tso_seg;
  609. int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
  610. struct dp_soc *soc = vdev->pdev->soc;
  611. struct dp_pdev *pdev = vdev->pdev;
  612. struct qdf_tso_info_t *tso_info;
  613. struct qdf_tso_num_seg_elem_t *tso_num_seg;
  614. tso_info = &msdu_info->u.tso_info;
  615. tso_info->curr_seg = NULL;
  616. tso_info->tso_seg_list = NULL;
  617. tso_info->num_segs = num_seg;
  618. msdu_info->frm_type = dp_tx_frm_tso;
  619. tso_info->tso_num_seg_list = NULL;
  620. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  621. while (num_seg) {
  622. tso_seg = dp_tx_tso_desc_alloc(
  623. soc, msdu_info->tx_queue.desc_pool_id);
  624. if (tso_seg) {
  625. tso_seg->next = tso_info->tso_seg_list;
  626. tso_info->tso_seg_list = tso_seg;
  627. num_seg--;
  628. } else {
  629. dp_err_rl("Failed to alloc tso seg desc");
  630. DP_STATS_INC_PKT(vdev->pdev,
  631. tso_stats.tso_no_mem_dropped, 1,
  632. qdf_nbuf_len(msdu));
  633. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  634. return QDF_STATUS_E_NOMEM;
  635. }
  636. }
  637. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  638. tso_num_seg = dp_tso_num_seg_alloc(soc,
  639. msdu_info->tx_queue.desc_pool_id);
  640. if (tso_num_seg) {
  641. tso_num_seg->next = tso_info->tso_num_seg_list;
  642. tso_info->tso_num_seg_list = tso_num_seg;
  643. } else {
  644. DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
  645. __func__);
  646. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  647. return QDF_STATUS_E_NOMEM;
  648. }
  649. msdu_info->num_seg =
  650. qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
  651. TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
  652. msdu_info->num_seg);
  653. if (!(msdu_info->num_seg)) {
  654. /*
  655. * Free allocated TSO seg desc and number seg desc,
  656. * do unmap for segments if dma map has done.
  657. */
  658. DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
  659. dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
  660. return QDF_STATUS_E_INVAL;
  661. }
  662. dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
  663. msdu, 0, DP_TX_DESC_MAP);
  664. tso_info->curr_seg = tso_info->tso_seg_list;
  665. tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
  666. dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
  667. msdu, msdu_info->num_seg);
  668. dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
  669. tso_info->msdu_stats_idx);
  670. dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
  671. return QDF_STATUS_SUCCESS;
  672. }
  673. #else
  674. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  675. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  676. {
  677. return QDF_STATUS_E_NOMEM;
  678. }
  679. #endif
  680. QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
  681. (DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
  682. sizeof(struct htt_tx_msdu_desc_ext2_t)));
  683. /**
  684. * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
  685. * @vdev: DP Vdev handle
  686. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  687. * @desc_pool_id: Descriptor Pool ID
  688. *
  689. * Return:
  690. */
  691. static
  692. struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
  693. struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
  694. {
  695. uint8_t i;
  696. uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
  697. struct dp_tx_seg_info_s *seg_info;
  698. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  699. struct dp_soc *soc = vdev->pdev->soc;
  700. /* Allocate an extension descriptor */
  701. msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
  702. qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
  703. if (!msdu_ext_desc) {
  704. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  705. return NULL;
  706. }
  707. if (msdu_info->exception_fw &&
  708. qdf_unlikely(vdev->mesh_vdev)) {
  709. qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
  710. &msdu_info->meta_data[0],
  711. sizeof(struct htt_tx_msdu_desc_ext2_t));
  712. qdf_atomic_inc(&soc->num_tx_exception);
  713. msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
  714. }
  715. switch (msdu_info->frm_type) {
  716. case dp_tx_frm_sg:
  717. case dp_tx_frm_me:
  718. case dp_tx_frm_raw:
  719. seg_info = msdu_info->u.sg_info.curr_seg;
  720. /* Update the buffer pointers in MSDU Extension Descriptor */
  721. for (i = 0; i < seg_info->frag_cnt; i++) {
  722. hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
  723. seg_info->frags[i].paddr_lo,
  724. seg_info->frags[i].paddr_hi,
  725. seg_info->frags[i].len);
  726. }
  727. break;
  728. case dp_tx_frm_tso:
  729. dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
  730. &cached_ext_desc[0]);
  731. break;
  732. default:
  733. break;
  734. }
  735. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  736. cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
  737. hal_tx_ext_desc_sync(&cached_ext_desc[0],
  738. msdu_ext_desc->vaddr);
  739. return msdu_ext_desc;
  740. }
  741. /**
  742. * dp_tx_trace_pkt() - Trace TX packet at DP layer
  743. *
  744. * @skb: skb to be traced
  745. * @msdu_id: msdu_id of the packet
  746. * @vdev_id: vdev_id of the packet
  747. *
  748. * Return: None
  749. */
  750. #ifdef DP_DISABLE_TX_PKT_TRACE
  751. static void dp_tx_trace_pkt(struct dp_soc *soc,
  752. qdf_nbuf_t skb, uint16_t msdu_id,
  753. uint8_t vdev_id)
  754. {
  755. }
  756. #else
  757. static void dp_tx_trace_pkt(struct dp_soc *soc,
  758. qdf_nbuf_t skb, uint16_t msdu_id,
  759. uint8_t vdev_id)
  760. {
  761. if (dp_is_tput_high(soc))
  762. return;
  763. QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
  764. QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
  765. DPTRACE(qdf_dp_trace_ptr(skb,
  766. QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
  767. QDF_TRACE_DEFAULT_PDEV_ID,
  768. qdf_nbuf_data_addr(skb),
  769. sizeof(qdf_nbuf_data(skb)),
  770. msdu_id, vdev_id, 0));
  771. qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
  772. DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
  773. QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
  774. msdu_id, QDF_TX));
  775. }
  776. #endif
  777. #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
  778. /**
  779. * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
  780. * exception by the upper layer (OS_IF)
  781. * @soc: DP soc handle
  782. * @nbuf: packet to be transmitted
  783. *
  784. * Returns: 1 if the packet is marked as exception,
  785. * 0, if the packet is not marked as exception.
  786. */
  787. static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
  788. qdf_nbuf_t nbuf)
  789. {
  790. return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
  791. }
  792. #else
  793. static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
  794. qdf_nbuf_t nbuf)
  795. {
  796. return 0;
  797. }
  798. #endif
  799. #ifdef DP_TRAFFIC_END_INDICATION
  800. /**
  801. * dp_tx_get_traffic_end_indication_pkt() - Allocate and prepare packet to send
  802. * as indication to fw to inform that
  803. * data stream has ended
  804. * @vdev: DP vdev handle
  805. * @nbuf: original buffer from network stack
  806. *
  807. * Return: NULL on failure,
  808. * nbuf on success
  809. */
  810. static inline qdf_nbuf_t
  811. dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
  812. qdf_nbuf_t nbuf)
  813. {
  814. /* Packet length should be enough to copy upto L3 header */
  815. uint8_t end_nbuf_len = 64;
  816. uint8_t htt_desc_size_aligned;
  817. uint8_t htt_desc_size;
  818. qdf_nbuf_t end_nbuf;
  819. if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
  820. QDF_NBUF_CB_PACKET_TYPE_END_INDICATION)) {
  821. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  822. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  823. end_nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q);
  824. if (!end_nbuf) {
  825. end_nbuf = qdf_nbuf_alloc(NULL,
  826. (htt_desc_size_aligned +
  827. end_nbuf_len),
  828. htt_desc_size_aligned,
  829. 8, false);
  830. if (!end_nbuf) {
  831. dp_err("Packet allocation failed");
  832. goto out;
  833. }
  834. } else {
  835. qdf_nbuf_reset(end_nbuf, htt_desc_size_aligned, 8);
  836. }
  837. qdf_mem_copy(qdf_nbuf_data(end_nbuf), qdf_nbuf_data(nbuf),
  838. end_nbuf_len);
  839. qdf_nbuf_set_pktlen(end_nbuf, end_nbuf_len);
  840. return end_nbuf;
  841. }
  842. out:
  843. return NULL;
  844. }
  845. /**
  846. * dp_tx_send_traffic_end_indication_pkt() - Send indication packet to FW
  847. * via exception path.
  848. * @vdev: DP vdev handle
  849. * @end_nbuf: skb to send as indication
  850. * @msdu_info: msdu_info of original nbuf
  851. * @peer_id: peer id
  852. *
  853. * Return: None
  854. */
  855. static inline void
  856. dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
  857. qdf_nbuf_t end_nbuf,
  858. struct dp_tx_msdu_info_s *msdu_info,
  859. uint16_t peer_id)
  860. {
  861. struct dp_tx_msdu_info_s e_msdu_info = {0};
  862. qdf_nbuf_t nbuf;
  863. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  864. (struct htt_tx_msdu_desc_ext2_t *)(e_msdu_info.meta_data);
  865. e_msdu_info.tx_queue = msdu_info->tx_queue;
  866. e_msdu_info.tid = msdu_info->tid;
  867. e_msdu_info.exception_fw = 1;
  868. desc_ext->host_tx_desc_pool = 1;
  869. desc_ext->traffic_end_indication = 1;
  870. nbuf = dp_tx_send_msdu_single(vdev, end_nbuf, &e_msdu_info,
  871. peer_id, NULL);
  872. if (nbuf) {
  873. dp_err("Traffic end indication packet tx failed");
  874. qdf_nbuf_free(nbuf);
  875. }
  876. }
  877. /**
  878. * dp_tx_traffic_end_indication_set_desc_flag() - Set tx descriptor flag to
  879. * mark it traffic end indication
  880. * packet.
  881. * @tx_desc: Tx descriptor pointer
  882. * @msdu_info: msdu_info structure pointer
  883. *
  884. * Return: None
  885. */
  886. static inline void
  887. dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
  888. struct dp_tx_msdu_info_s *msdu_info)
  889. {
  890. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  891. (struct htt_tx_msdu_desc_ext2_t *)(msdu_info->meta_data);
  892. if (qdf_unlikely(desc_ext->traffic_end_indication))
  893. tx_desc->flags |= DP_TX_DESC_FLAG_TRAFFIC_END_IND;
  894. }
  895. /**
  896. * dp_tx_traffic_end_indication_enq_ind_pkt() - Enqueue the packet instead of
  897. * freeing which are associated
  898. * with traffic end indication
  899. * flagged descriptor.
  900. * @soc: dp soc handle
  901. * @desc: Tx descriptor pointer
  902. * @nbuf: buffer pointer
  903. *
  904. * Return: True if packet gets enqueued else false
  905. */
  906. static bool
  907. dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
  908. struct dp_tx_desc_s *desc,
  909. qdf_nbuf_t nbuf)
  910. {
  911. struct dp_vdev *vdev = NULL;
  912. if (qdf_unlikely((desc->flags &
  913. DP_TX_DESC_FLAG_TRAFFIC_END_IND) != 0)) {
  914. vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
  915. DP_MOD_ID_TX_COMP);
  916. if (vdev) {
  917. qdf_nbuf_queue_add(&vdev->end_ind_pkt_q, nbuf);
  918. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_COMP);
  919. return true;
  920. }
  921. }
  922. return false;
  923. }
  924. /**
  925. * dp_tx_traffic_end_indication_is_enabled() - get the feature
  926. * enable/disable status
  927. * @vdev: dp vdev handle
  928. *
  929. * Return: True if feature is enable else false
  930. */
  931. static inline bool
  932. dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
  933. {
  934. return qdf_unlikely(vdev->traffic_end_ind_en);
  935. }
  936. static inline qdf_nbuf_t
  937. dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  938. struct dp_tx_msdu_info_s *msdu_info,
  939. uint16_t peer_id, qdf_nbuf_t end_nbuf)
  940. {
  941. if (dp_tx_traffic_end_indication_is_enabled(vdev))
  942. end_nbuf = dp_tx_get_traffic_end_indication_pkt(vdev, nbuf);
  943. nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
  944. if (qdf_unlikely(end_nbuf))
  945. dp_tx_send_traffic_end_indication_pkt(vdev, end_nbuf,
  946. msdu_info, peer_id);
  947. return nbuf;
  948. }
  949. #else
  950. static inline qdf_nbuf_t
  951. dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
  952. qdf_nbuf_t nbuf)
  953. {
  954. return NULL;
  955. }
  956. static inline void
  957. dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
  958. qdf_nbuf_t end_nbuf,
  959. struct dp_tx_msdu_info_s *msdu_info,
  960. uint16_t peer_id)
  961. {}
  962. static inline void
  963. dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
  964. struct dp_tx_msdu_info_s *msdu_info)
  965. {}
  966. static inline bool
  967. dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
  968. struct dp_tx_desc_s *desc,
  969. qdf_nbuf_t nbuf)
  970. {
  971. return false;
  972. }
  973. static inline bool
  974. dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
  975. {
  976. return false;
  977. }
  978. static inline qdf_nbuf_t
  979. dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  980. struct dp_tx_msdu_info_s *msdu_info,
  981. uint16_t peer_id, qdf_nbuf_t end_nbuf)
  982. {
  983. return dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
  984. }
  985. #endif
  986. /**
  987. * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
  988. * @vdev: DP vdev handle
  989. * @nbuf: skb
  990. * @desc_pool_id: Descriptor pool ID
  991. * @meta_data: Metadata to the fw
  992. * @tx_exc_metadata: Handle that holds exception path metadata
  993. * Allocate and prepare Tx descriptor with msdu information.
  994. *
  995. * Return: Pointer to Tx Descriptor on success,
  996. * NULL on failure
  997. */
  998. static
  999. struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
  1000. qdf_nbuf_t nbuf, uint8_t desc_pool_id,
  1001. struct dp_tx_msdu_info_s *msdu_info,
  1002. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1003. {
  1004. uint8_t align_pad;
  1005. uint8_t is_exception = 0;
  1006. uint8_t htt_hdr_size;
  1007. struct dp_tx_desc_s *tx_desc;
  1008. struct dp_pdev *pdev = vdev->pdev;
  1009. struct dp_soc *soc = pdev->soc;
  1010. if (dp_tx_limit_check(vdev))
  1011. return NULL;
  1012. /* Allocate software Tx descriptor */
  1013. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  1014. if (qdf_unlikely(!tx_desc)) {
  1015. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1016. DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
  1017. return NULL;
  1018. }
  1019. dp_tx_outstanding_inc(pdev);
  1020. /* Initialize the SW tx descriptor */
  1021. tx_desc->nbuf = nbuf;
  1022. tx_desc->frm_type = dp_tx_frm_std;
  1023. tx_desc->tx_encap_type = ((tx_exc_metadata &&
  1024. (tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
  1025. tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
  1026. tx_desc->vdev_id = vdev->vdev_id;
  1027. tx_desc->pdev = pdev;
  1028. tx_desc->msdu_ext_desc = NULL;
  1029. tx_desc->pkt_offset = 0;
  1030. tx_desc->length = qdf_nbuf_headlen(nbuf);
  1031. tx_desc->shinfo_addr = skb_end_pointer(nbuf);
  1032. dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
  1033. if (qdf_unlikely(vdev->multipass_en)) {
  1034. if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
  1035. goto failure;
  1036. }
  1037. /* Packets marked by upper layer (OS-IF) to be sent to FW */
  1038. if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
  1039. is_exception = 1;
  1040. /*
  1041. * For special modes (vdev_type == ocb or mesh), data frames should be
  1042. * transmitted using varying transmit parameters (tx spec) which include
  1043. * transmit rate, power, priority, channel, channel bandwidth , nss etc.
  1044. * These are filled in HTT MSDU descriptor and sent in frame pre-header.
  1045. * These frames are sent as exception packets to firmware.
  1046. *
  1047. * HW requirement is that metadata should always point to a
  1048. * 8-byte aligned address. So we add alignment pad to start of buffer.
  1049. * HTT Metadata should be ensured to be multiple of 8-bytes,
  1050. * to get 8-byte aligned start address along with align_pad added
  1051. *
  1052. * |-----------------------------|
  1053. * | |
  1054. * |-----------------------------| <-----Buffer Pointer Address given
  1055. * | | ^ in HW descriptor (aligned)
  1056. * | HTT Metadata | |
  1057. * | | |
  1058. * | | | Packet Offset given in descriptor
  1059. * | | |
  1060. * |-----------------------------| |
  1061. * | Alignment Pad | v
  1062. * |-----------------------------| <----- Actual buffer start address
  1063. * | SKB Data | (Unaligned)
  1064. * | |
  1065. * | |
  1066. * | |
  1067. * | |
  1068. * | |
  1069. * |-----------------------------|
  1070. */
  1071. if (qdf_unlikely((msdu_info->exception_fw)) ||
  1072. (vdev->opmode == wlan_op_mode_ocb) ||
  1073. (tx_exc_metadata &&
  1074. tx_exc_metadata->is_tx_sniffer)) {
  1075. align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
  1076. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
  1077. DP_STATS_INC(vdev,
  1078. tx_i.dropped.headroom_insufficient, 1);
  1079. goto failure;
  1080. }
  1081. if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
  1082. dp_tx_err("qdf_nbuf_push_head failed");
  1083. goto failure;
  1084. }
  1085. htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
  1086. msdu_info);
  1087. if (htt_hdr_size == 0)
  1088. goto failure;
  1089. tx_desc->length = qdf_nbuf_headlen(nbuf);
  1090. tx_desc->pkt_offset = align_pad + htt_hdr_size;
  1091. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1092. dp_tx_traffic_end_indication_set_desc_flag(tx_desc,
  1093. msdu_info);
  1094. is_exception = 1;
  1095. tx_desc->length -= tx_desc->pkt_offset;
  1096. }
  1097. #if !TQM_BYPASS_WAR
  1098. if (is_exception || tx_exc_metadata)
  1099. #endif
  1100. {
  1101. /* Temporary WAR due to TQM VP issues */
  1102. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1103. qdf_atomic_inc(&soc->num_tx_exception);
  1104. }
  1105. return tx_desc;
  1106. failure:
  1107. dp_tx_desc_release(tx_desc, desc_pool_id);
  1108. return NULL;
  1109. }
  1110. /**
  1111. * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
  1112. * @vdev: DP vdev handle
  1113. * @nbuf: skb
  1114. * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
  1115. * @desc_pool_id : Descriptor Pool ID
  1116. *
  1117. * Allocate and prepare Tx descriptor with msdu and fragment descritor
  1118. * information. For frames with fragments, allocate and prepare
  1119. * an MSDU extension descriptor
  1120. *
  1121. * Return: Pointer to Tx Descriptor on success,
  1122. * NULL on failure
  1123. */
  1124. static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
  1125. qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
  1126. uint8_t desc_pool_id)
  1127. {
  1128. struct dp_tx_desc_s *tx_desc;
  1129. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  1130. struct dp_pdev *pdev = vdev->pdev;
  1131. struct dp_soc *soc = pdev->soc;
  1132. if (dp_tx_limit_check(vdev))
  1133. return NULL;
  1134. /* Allocate software Tx descriptor */
  1135. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  1136. if (!tx_desc) {
  1137. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1138. return NULL;
  1139. }
  1140. dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
  1141. nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
  1142. dp_tx_outstanding_inc(pdev);
  1143. /* Initialize the SW tx descriptor */
  1144. tx_desc->nbuf = nbuf;
  1145. tx_desc->frm_type = msdu_info->frm_type;
  1146. tx_desc->tx_encap_type = vdev->tx_encap_type;
  1147. tx_desc->vdev_id = vdev->vdev_id;
  1148. tx_desc->pdev = pdev;
  1149. tx_desc->pkt_offset = 0;
  1150. dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
  1151. /* Handle scattered frames - TSO/SG/ME */
  1152. /* Allocate and prepare an extension descriptor for scattered frames */
  1153. msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
  1154. if (!msdu_ext_desc) {
  1155. dp_tx_info("Tx Extension Descriptor Alloc Fail");
  1156. goto failure;
  1157. }
  1158. #if TQM_BYPASS_WAR
  1159. /* Temporary WAR due to TQM VP issues */
  1160. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1161. qdf_atomic_inc(&soc->num_tx_exception);
  1162. #endif
  1163. if (qdf_unlikely(msdu_info->exception_fw))
  1164. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1165. tx_desc->msdu_ext_desc = msdu_ext_desc;
  1166. tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
  1167. msdu_ext_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
  1168. msdu_ext_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
  1169. tx_desc->dma_addr = msdu_ext_desc->paddr;
  1170. if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
  1171. tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
  1172. else
  1173. tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
  1174. return tx_desc;
  1175. failure:
  1176. dp_tx_desc_release(tx_desc, desc_pool_id);
  1177. return NULL;
  1178. }
  1179. /**
  1180. * dp_tx_prepare_raw() - Prepare RAW packet TX
  1181. * @vdev: DP vdev handle
  1182. * @nbuf: buffer pointer
  1183. * @seg_info: Pointer to Segment info Descriptor to be prepared
  1184. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
  1185. * descriptor
  1186. *
  1187. * Return:
  1188. */
  1189. static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1190. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  1191. {
  1192. qdf_nbuf_t curr_nbuf = NULL;
  1193. uint16_t total_len = 0;
  1194. qdf_dma_addr_t paddr;
  1195. int32_t i;
  1196. int32_t mapped_buf_num = 0;
  1197. struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
  1198. qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  1199. DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
  1200. /* Continue only if frames are of DATA type */
  1201. if (!DP_FRAME_IS_DATA(qos_wh)) {
  1202. DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
  1203. dp_tx_debug("Pkt. recd is of not data type");
  1204. goto error;
  1205. }
  1206. /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
  1207. if (vdev->raw_mode_war &&
  1208. (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
  1209. (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
  1210. qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
  1211. for (curr_nbuf = nbuf, i = 0; curr_nbuf;
  1212. curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
  1213. /*
  1214. * Number of nbuf's must not exceed the size of the frags
  1215. * array in seg_info.
  1216. */
  1217. if (i >= DP_TX_MAX_NUM_FRAGS) {
  1218. dp_err_rl("nbuf cnt exceeds the max number of segs");
  1219. DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
  1220. goto error;
  1221. }
  1222. if (QDF_STATUS_SUCCESS !=
  1223. qdf_nbuf_map_nbytes_single(vdev->osdev,
  1224. curr_nbuf,
  1225. QDF_DMA_TO_DEVICE,
  1226. curr_nbuf->len)) {
  1227. dp_tx_err("%s dma map error ", __func__);
  1228. DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
  1229. goto error;
  1230. }
  1231. /* Update the count of mapped nbuf's */
  1232. mapped_buf_num++;
  1233. paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
  1234. seg_info->frags[i].paddr_lo = paddr;
  1235. seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
  1236. seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
  1237. seg_info->frags[i].vaddr = (void *) curr_nbuf;
  1238. total_len += qdf_nbuf_len(curr_nbuf);
  1239. }
  1240. seg_info->frag_cnt = i;
  1241. seg_info->total_len = total_len;
  1242. seg_info->next = NULL;
  1243. sg_info->curr_seg = seg_info;
  1244. msdu_info->frm_type = dp_tx_frm_raw;
  1245. msdu_info->num_seg = 1;
  1246. return nbuf;
  1247. error:
  1248. i = 0;
  1249. while (nbuf) {
  1250. curr_nbuf = nbuf;
  1251. if (i < mapped_buf_num) {
  1252. qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
  1253. QDF_DMA_TO_DEVICE,
  1254. curr_nbuf->len);
  1255. i++;
  1256. }
  1257. nbuf = qdf_nbuf_next(nbuf);
  1258. qdf_nbuf_free(curr_nbuf);
  1259. }
  1260. return NULL;
  1261. }
  1262. /**
  1263. * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
  1264. * @soc: DP soc handle
  1265. * @nbuf: Buffer pointer
  1266. *
  1267. * unmap the chain of nbufs that belong to this RAW frame.
  1268. *
  1269. * Return: None
  1270. */
  1271. static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
  1272. qdf_nbuf_t nbuf)
  1273. {
  1274. qdf_nbuf_t cur_nbuf = nbuf;
  1275. do {
  1276. qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
  1277. QDF_DMA_TO_DEVICE,
  1278. cur_nbuf->len);
  1279. cur_nbuf = qdf_nbuf_next(cur_nbuf);
  1280. } while (cur_nbuf);
  1281. }
  1282. #ifdef VDEV_PEER_PROTOCOL_COUNT
  1283. void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
  1284. qdf_nbuf_t nbuf)
  1285. {
  1286. qdf_nbuf_t nbuf_local;
  1287. struct dp_vdev *vdev_local = vdev_hdl;
  1288. do {
  1289. if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
  1290. break;
  1291. nbuf_local = nbuf;
  1292. if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
  1293. htt_cmn_pkt_type_raw))
  1294. break;
  1295. else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
  1296. break;
  1297. else if (qdf_nbuf_is_tso((nbuf_local)))
  1298. break;
  1299. dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
  1300. (nbuf_local),
  1301. NULL, 1, 0);
  1302. } while (0);
  1303. }
  1304. #endif
  1305. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  1306. /**
  1307. * dp_tx_update_stats() - Update soc level tx stats
  1308. * @soc: DP soc handle
  1309. * @tx_desc: TX descriptor reference
  1310. * @ring_id: TCL ring id
  1311. *
  1312. * Returns: none
  1313. */
  1314. void dp_tx_update_stats(struct dp_soc *soc,
  1315. struct dp_tx_desc_s *tx_desc,
  1316. uint8_t ring_id)
  1317. {
  1318. uint32_t stats_len = 0;
  1319. if (tx_desc->frm_type == dp_tx_frm_tso)
  1320. stats_len = tx_desc->msdu_ext_desc->tso_desc->seg.total_len;
  1321. else
  1322. stats_len = qdf_nbuf_len(tx_desc->nbuf);
  1323. DP_STATS_INC_PKT(soc, tx.egress[ring_id], 1, stats_len);
  1324. }
  1325. int
  1326. dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
  1327. struct dp_tx_desc_s *tx_desc,
  1328. uint8_t tid,
  1329. struct dp_tx_msdu_info_s *msdu_info,
  1330. uint8_t ring_id)
  1331. {
  1332. struct dp_swlm *swlm = &soc->swlm;
  1333. union swlm_data swlm_query_data;
  1334. struct dp_swlm_tcl_data tcl_data;
  1335. QDF_STATUS status;
  1336. int ret;
  1337. if (!swlm->is_enabled)
  1338. return msdu_info->skip_hp_update;
  1339. tcl_data.nbuf = tx_desc->nbuf;
  1340. tcl_data.tid = tid;
  1341. tcl_data.ring_id = ring_id;
  1342. if (tx_desc->frm_type == dp_tx_frm_tso) {
  1343. tcl_data.pkt_len =
  1344. tx_desc->msdu_ext_desc->tso_desc->seg.total_len;
  1345. } else {
  1346. tcl_data.pkt_len = qdf_nbuf_len(tx_desc->nbuf);
  1347. }
  1348. tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
  1349. swlm_query_data.tcl_data = &tcl_data;
  1350. status = dp_swlm_tcl_pre_check(soc, &tcl_data);
  1351. if (QDF_IS_STATUS_ERROR(status)) {
  1352. dp_swlm_tcl_reset_session_data(soc, ring_id);
  1353. DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
  1354. return 0;
  1355. }
  1356. ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
  1357. if (ret) {
  1358. DP_STATS_INC(swlm, tcl[ring_id].coalesce_success, 1);
  1359. } else {
  1360. DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
  1361. }
  1362. return ret;
  1363. }
  1364. void
  1365. dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
  1366. int coalesce)
  1367. {
  1368. if (coalesce)
  1369. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1370. else
  1371. dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
  1372. }
  1373. static inline void
  1374. dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
  1375. {
  1376. if (((i + 1) < msdu_info->num_seg))
  1377. msdu_info->skip_hp_update = 1;
  1378. else
  1379. msdu_info->skip_hp_update = 0;
  1380. }
  1381. static inline void
  1382. dp_flush_tcp_hp(struct dp_soc *soc, uint8_t ring_id)
  1383. {
  1384. hal_ring_handle_t hal_ring_hdl =
  1385. dp_tx_get_hal_ring_hdl(soc, ring_id);
  1386. if (dp_tx_hal_ring_access_start(soc, hal_ring_hdl)) {
  1387. dp_err("Fillmore: SRNG access start failed");
  1388. return;
  1389. }
  1390. dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
  1391. }
  1392. static inline void
  1393. dp_tx_check_and_flush_hp(struct dp_soc *soc,
  1394. QDF_STATUS status,
  1395. struct dp_tx_msdu_info_s *msdu_info)
  1396. {
  1397. if (QDF_IS_STATUS_ERROR(status) && !msdu_info->skip_hp_update) {
  1398. dp_flush_tcp_hp(soc,
  1399. (msdu_info->tx_queue.ring_id & DP_TX_QUEUE_MASK));
  1400. }
  1401. }
  1402. #else
  1403. static inline void
  1404. dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
  1405. {
  1406. }
  1407. static inline void
  1408. dp_tx_check_and_flush_hp(struct dp_soc *soc,
  1409. QDF_STATUS status,
  1410. struct dp_tx_msdu_info_s *msdu_info)
  1411. {
  1412. }
  1413. #endif
  1414. #ifdef FEATURE_RUNTIME_PM
  1415. static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
  1416. {
  1417. int ret;
  1418. ret = qdf_atomic_read(&soc->rtpm_high_tput_flag) &&
  1419. (hif_rtpm_get_state() <= HIF_RTPM_STATE_ON);
  1420. return ret;
  1421. }
  1422. /**
  1423. * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
  1424. * @soc: Datapath soc handle
  1425. * @hal_ring_hdl: HAL ring handle
  1426. * @coalesce: Coalesce the current write or not
  1427. *
  1428. * Wrapper for HAL ring access end for data transmission for
  1429. * FEATURE_RUNTIME_PM
  1430. *
  1431. * Returns: none
  1432. */
  1433. void
  1434. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  1435. hal_ring_handle_t hal_ring_hdl,
  1436. int coalesce)
  1437. {
  1438. int ret;
  1439. /*
  1440. * Avoid runtime get and put APIs under high throughput scenarios.
  1441. */
  1442. if (dp_get_rtpm_tput_policy_requirement(soc)) {
  1443. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1444. return;
  1445. }
  1446. ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
  1447. if (QDF_IS_STATUS_SUCCESS(ret)) {
  1448. if (hif_system_pm_state_check(soc->hif_handle)) {
  1449. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1450. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1451. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1452. } else {
  1453. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1454. }
  1455. hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
  1456. } else {
  1457. dp_runtime_get(soc);
  1458. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1459. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1460. qdf_atomic_inc(&soc->tx_pending_rtpm);
  1461. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1462. dp_runtime_put(soc);
  1463. }
  1464. }
  1465. #else
  1466. #ifdef DP_POWER_SAVE
  1467. void
  1468. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  1469. hal_ring_handle_t hal_ring_hdl,
  1470. int coalesce)
  1471. {
  1472. if (hif_system_pm_state_check(soc->hif_handle)) {
  1473. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1474. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1475. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1476. } else {
  1477. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1478. }
  1479. }
  1480. #endif
  1481. static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
  1482. {
  1483. return 0;
  1484. }
  1485. #endif
  1486. /**
  1487. * dp_tx_get_tid() - Obtain TID to be used for this frame
  1488. * @vdev: DP vdev handle
  1489. * @nbuf: skb
  1490. *
  1491. * Extract the DSCP or PCP information from frame and map into TID value.
  1492. *
  1493. * Return: void
  1494. */
  1495. static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1496. struct dp_tx_msdu_info_s *msdu_info)
  1497. {
  1498. uint8_t tos = 0, dscp_tid_override = 0;
  1499. uint8_t *hdr_ptr, *L3datap;
  1500. uint8_t is_mcast = 0;
  1501. qdf_ether_header_t *eh = NULL;
  1502. qdf_ethervlan_header_t *evh = NULL;
  1503. uint16_t ether_type;
  1504. qdf_llc_t *llcHdr;
  1505. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  1506. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1507. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1508. eh = (qdf_ether_header_t *)nbuf->data;
  1509. hdr_ptr = (uint8_t *)(eh->ether_dhost);
  1510. L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
  1511. } else {
  1512. qdf_dot3_qosframe_t *qos_wh =
  1513. (qdf_dot3_qosframe_t *) nbuf->data;
  1514. msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
  1515. qos_wh->i_qos[0] & DP_QOS_TID : 0;
  1516. return;
  1517. }
  1518. is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
  1519. ether_type = eh->ether_type;
  1520. llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
  1521. /*
  1522. * Check if packet is dot3 or eth2 type.
  1523. */
  1524. if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
  1525. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1526. sizeof(*llcHdr));
  1527. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1528. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
  1529. sizeof(*llcHdr);
  1530. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
  1531. + sizeof(*llcHdr) +
  1532. sizeof(qdf_net_vlanhdr_t));
  1533. } else {
  1534. L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
  1535. sizeof(*llcHdr);
  1536. }
  1537. } else {
  1538. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1539. evh = (qdf_ethervlan_header_t *) eh;
  1540. ether_type = evh->ether_type;
  1541. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
  1542. }
  1543. }
  1544. /*
  1545. * Find priority from IP TOS DSCP field
  1546. */
  1547. if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
  1548. qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
  1549. if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
  1550. /* Only for unicast frames */
  1551. if (!is_mcast) {
  1552. /* send it on VO queue */
  1553. msdu_info->tid = DP_VO_TID;
  1554. }
  1555. } else {
  1556. /*
  1557. * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
  1558. * from TOS byte.
  1559. */
  1560. tos = ip->ip_tos;
  1561. dscp_tid_override = 1;
  1562. }
  1563. } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
  1564. /* TODO
  1565. * use flowlabel
  1566. *igmpmld cases to be handled in phase 2
  1567. */
  1568. unsigned long ver_pri_flowlabel;
  1569. unsigned long pri;
  1570. ver_pri_flowlabel = *(unsigned long *) L3datap;
  1571. pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
  1572. DP_IPV6_PRIORITY_SHIFT;
  1573. tos = pri;
  1574. dscp_tid_override = 1;
  1575. } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
  1576. msdu_info->tid = DP_VO_TID;
  1577. else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
  1578. /* Only for unicast frames */
  1579. if (!is_mcast) {
  1580. /* send ucast arp on VO queue */
  1581. msdu_info->tid = DP_VO_TID;
  1582. }
  1583. }
  1584. /*
  1585. * Assign all MCAST packets to BE
  1586. */
  1587. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1588. if (is_mcast) {
  1589. tos = 0;
  1590. dscp_tid_override = 1;
  1591. }
  1592. }
  1593. if (dscp_tid_override == 1) {
  1594. tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  1595. msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
  1596. }
  1597. if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
  1598. msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
  1599. return;
  1600. }
  1601. /**
  1602. * dp_tx_classify_tid() - Obtain TID to be used for this frame
  1603. * @vdev: DP vdev handle
  1604. * @nbuf: skb
  1605. *
  1606. * Software based TID classification is required when more than 2 DSCP-TID
  1607. * mapping tables are needed.
  1608. * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
  1609. *
  1610. * Return: void
  1611. */
  1612. static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1613. struct dp_tx_msdu_info_s *msdu_info)
  1614. {
  1615. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1616. /*
  1617. * skip_sw_tid_classification flag will set in below cases-
  1618. * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
  1619. * 2. hlos_tid_override enabled for vdev
  1620. * 3. mesh mode enabled for vdev
  1621. */
  1622. if (qdf_likely(vdev->skip_sw_tid_classification)) {
  1623. /* Update tid in msdu_info from skb priority */
  1624. if (qdf_unlikely(vdev->skip_sw_tid_classification
  1625. & DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
  1626. uint32_t tid = qdf_nbuf_get_priority(nbuf);
  1627. if (tid == DP_TX_INVALID_QOS_TAG)
  1628. return;
  1629. msdu_info->tid = tid;
  1630. return;
  1631. }
  1632. return;
  1633. }
  1634. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1635. }
  1636. #ifdef FEATURE_WLAN_TDLS
  1637. /**
  1638. * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
  1639. * @soc: datapath SOC
  1640. * @vdev: datapath vdev
  1641. * @tx_desc: TX descriptor
  1642. *
  1643. * Return: None
  1644. */
  1645. static void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1646. struct dp_vdev *vdev,
  1647. struct dp_tx_desc_s *tx_desc)
  1648. {
  1649. if (vdev) {
  1650. if (vdev->is_tdls_frame) {
  1651. tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
  1652. vdev->is_tdls_frame = false;
  1653. }
  1654. }
  1655. }
  1656. static uint8_t dp_htt_tx_comp_get_status(struct dp_soc *soc, char *htt_desc)
  1657. {
  1658. uint8_t tx_status = HTT_TX_FW2WBM_TX_STATUS_MAX;
  1659. switch (soc->arch_id) {
  1660. case CDP_ARCH_TYPE_LI:
  1661. tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
  1662. break;
  1663. case CDP_ARCH_TYPE_BE:
  1664. tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
  1665. break;
  1666. default:
  1667. dp_err("Incorrect CDP_ARCH %d", soc->arch_id);
  1668. QDF_BUG(0);
  1669. }
  1670. return tx_status;
  1671. }
  1672. /**
  1673. * dp_non_std_htt_tx_comp_free_buff() - Free the non std tx packet buffer
  1674. * @soc: dp_soc handle
  1675. * @tx_desc: TX descriptor
  1676. * @vdev: datapath vdev handle
  1677. *
  1678. * Return: None
  1679. */
  1680. static void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
  1681. struct dp_tx_desc_s *tx_desc)
  1682. {
  1683. uint8_t tx_status = 0;
  1684. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  1685. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1686. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
  1687. DP_MOD_ID_TDLS);
  1688. if (qdf_unlikely(!vdev)) {
  1689. dp_err_rl("vdev is null!");
  1690. goto error;
  1691. }
  1692. hal_tx_comp_get_htt_desc(&tx_desc->comp, htt_tx_status);
  1693. tx_status = dp_htt_tx_comp_get_status(soc, htt_tx_status);
  1694. dp_debug("vdev_id: %d tx_status: %d", tx_desc->vdev_id, tx_status);
  1695. if (vdev->tx_non_std_data_callback.func) {
  1696. qdf_nbuf_set_next(nbuf, NULL);
  1697. vdev->tx_non_std_data_callback.func(
  1698. vdev->tx_non_std_data_callback.ctxt,
  1699. nbuf, tx_status);
  1700. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1701. return;
  1702. } else {
  1703. dp_err_rl("callback func is null");
  1704. }
  1705. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1706. error:
  1707. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  1708. qdf_nbuf_free(nbuf);
  1709. }
  1710. /**
  1711. * dp_tx_msdu_single_map() - do nbuf map
  1712. * @vdev: DP vdev handle
  1713. * @tx_desc: DP TX descriptor pointer
  1714. * @nbuf: skb pointer
  1715. *
  1716. * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
  1717. * operation done in other component.
  1718. *
  1719. * Return: QDF_STATUS
  1720. */
  1721. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1722. struct dp_tx_desc_s *tx_desc,
  1723. qdf_nbuf_t nbuf)
  1724. {
  1725. if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
  1726. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1727. nbuf,
  1728. QDF_DMA_TO_DEVICE,
  1729. nbuf->len);
  1730. else
  1731. return qdf_nbuf_map_single(vdev->osdev, nbuf,
  1732. QDF_DMA_TO_DEVICE);
  1733. }
  1734. #else
  1735. static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1736. struct dp_vdev *vdev,
  1737. struct dp_tx_desc_s *tx_desc)
  1738. {
  1739. }
  1740. static inline void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
  1741. struct dp_tx_desc_s *tx_desc)
  1742. {
  1743. }
  1744. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1745. struct dp_tx_desc_s *tx_desc,
  1746. qdf_nbuf_t nbuf)
  1747. {
  1748. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1749. nbuf,
  1750. QDF_DMA_TO_DEVICE,
  1751. nbuf->len);
  1752. }
  1753. #endif
  1754. static inline
  1755. qdf_dma_addr_t dp_tx_nbuf_map_regular(struct dp_vdev *vdev,
  1756. struct dp_tx_desc_s *tx_desc,
  1757. qdf_nbuf_t nbuf)
  1758. {
  1759. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  1760. ret = dp_tx_msdu_single_map(vdev, tx_desc, nbuf);
  1761. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
  1762. return 0;
  1763. return qdf_nbuf_mapped_paddr_get(nbuf);
  1764. }
  1765. static inline
  1766. void dp_tx_nbuf_unmap_regular(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1767. {
  1768. qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev,
  1769. desc->nbuf,
  1770. desc->dma_addr,
  1771. QDF_DMA_TO_DEVICE,
  1772. desc->length);
  1773. }
  1774. #ifdef QCA_DP_TX_RMNET_OPTIMIZATION
  1775. static inline bool
  1776. is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
  1777. {
  1778. struct net_device *ingress_dev;
  1779. skb_frag_t *frag;
  1780. uint16_t buf_len = 0;
  1781. uint16_t linear_data_len = 0;
  1782. uint8_t *payload_addr = NULL;
  1783. ingress_dev = dev_get_by_index(dev_net(nbuf->dev), nbuf->skb_iif);
  1784. if ((ingress_dev->priv_flags & IFF_PHONY_HEADROOM)) {
  1785. dev_put(ingress_dev);
  1786. frag = &(skb_shinfo(nbuf)->frags[0]);
  1787. buf_len = skb_frag_size(frag);
  1788. payload_addr = (uint8_t *)skb_frag_address(frag);
  1789. linear_data_len = skb_headlen(nbuf);
  1790. buf_len += linear_data_len;
  1791. payload_addr = payload_addr - linear_data_len;
  1792. memcpy(payload_addr, nbuf->data, linear_data_len);
  1793. msdu_info->frm_type = dp_tx_frm_rmnet;
  1794. msdu_info->buf_len = buf_len;
  1795. msdu_info->payload_addr = payload_addr;
  1796. return true;
  1797. }
  1798. dev_put(ingress_dev);
  1799. return false;
  1800. }
  1801. static inline
  1802. qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info,
  1803. struct dp_tx_desc_s *tx_desc)
  1804. {
  1805. qdf_dma_addr_t paddr;
  1806. paddr = (qdf_dma_addr_t)qdf_mem_virt_to_phys(msdu_info->payload_addr);
  1807. tx_desc->length = msdu_info->buf_len;
  1808. qdf_nbuf_dma_clean_range((void *)msdu_info->payload_addr,
  1809. (void *)(msdu_info->payload_addr +
  1810. msdu_info->buf_len));
  1811. tx_desc->flags |= DP_TX_DESC_FLAG_RMNET;
  1812. return paddr;
  1813. }
  1814. #else
  1815. static inline bool
  1816. is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
  1817. {
  1818. return false;
  1819. }
  1820. static inline
  1821. qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info,
  1822. struct dp_tx_desc_s *tx_desc)
  1823. {
  1824. return 0;
  1825. }
  1826. #endif
  1827. #if defined(QCA_DP_TX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
  1828. static inline
  1829. qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
  1830. struct dp_tx_desc_s *tx_desc,
  1831. qdf_nbuf_t nbuf)
  1832. {
  1833. if (qdf_likely(tx_desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
  1834. qdf_nbuf_dma_clean_range((void *)nbuf->data,
  1835. (void *)(nbuf->data + nbuf->len));
  1836. return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
  1837. } else {
  1838. return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
  1839. }
  1840. }
  1841. static inline
  1842. void dp_tx_nbuf_unmap(struct dp_soc *soc,
  1843. struct dp_tx_desc_s *desc)
  1844. {
  1845. if (qdf_unlikely(!(desc->flags &
  1846. (DP_TX_DESC_FLAG_SIMPLE | DP_TX_DESC_FLAG_RMNET))))
  1847. return dp_tx_nbuf_unmap_regular(soc, desc);
  1848. }
  1849. #else
  1850. static inline
  1851. qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
  1852. struct dp_tx_desc_s *tx_desc,
  1853. qdf_nbuf_t nbuf)
  1854. {
  1855. return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
  1856. }
  1857. static inline
  1858. void dp_tx_nbuf_unmap(struct dp_soc *soc,
  1859. struct dp_tx_desc_s *desc)
  1860. {
  1861. return dp_tx_nbuf_unmap_regular(soc, desc);
  1862. }
  1863. #endif
  1864. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
  1865. static inline
  1866. void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1867. {
  1868. dp_tx_nbuf_unmap(soc, desc);
  1869. desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
  1870. }
  1871. static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1872. {
  1873. if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
  1874. dp_tx_nbuf_unmap(soc, desc);
  1875. }
  1876. #else
  1877. static inline
  1878. void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1879. {
  1880. }
  1881. static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1882. {
  1883. dp_tx_nbuf_unmap(soc, desc);
  1884. }
  1885. #endif
  1886. #ifdef MESH_MODE_SUPPORT
  1887. /**
  1888. * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
  1889. * @soc: datapath SOC
  1890. * @vdev: datapath vdev
  1891. * @tx_desc: TX descriptor
  1892. *
  1893. * Return: None
  1894. */
  1895. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1896. struct dp_vdev *vdev,
  1897. struct dp_tx_desc_s *tx_desc)
  1898. {
  1899. if (qdf_unlikely(vdev->mesh_vdev))
  1900. tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
  1901. }
  1902. /**
  1903. * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
  1904. * @soc: dp_soc handle
  1905. * @tx_desc: TX descriptor
  1906. * @delayed_free: delay the nbuf free
  1907. *
  1908. * Return: nbuf to be freed late
  1909. */
  1910. static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1911. struct dp_tx_desc_s *tx_desc,
  1912. bool delayed_free)
  1913. {
  1914. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1915. struct dp_vdev *vdev = NULL;
  1916. vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, DP_MOD_ID_MESH);
  1917. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
  1918. if (vdev)
  1919. DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
  1920. if (delayed_free)
  1921. return nbuf;
  1922. qdf_nbuf_free(nbuf);
  1923. } else {
  1924. if (vdev && vdev->osif_tx_free_ext) {
  1925. vdev->osif_tx_free_ext((nbuf));
  1926. } else {
  1927. if (delayed_free)
  1928. return nbuf;
  1929. qdf_nbuf_free(nbuf);
  1930. }
  1931. }
  1932. if (vdev)
  1933. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  1934. return NULL;
  1935. }
  1936. #else
  1937. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1938. struct dp_vdev *vdev,
  1939. struct dp_tx_desc_s *tx_desc)
  1940. {
  1941. }
  1942. static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1943. struct dp_tx_desc_s *tx_desc,
  1944. bool delayed_free)
  1945. {
  1946. return NULL;
  1947. }
  1948. #endif
  1949. /**
  1950. * dp_tx_frame_is_drop() - checks if the packet is loopback
  1951. * @vdev: DP vdev handle
  1952. * @nbuf: skb
  1953. *
  1954. * Return: 1 if frame needs to be dropped else 0
  1955. */
  1956. int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
  1957. {
  1958. struct dp_pdev *pdev = NULL;
  1959. struct dp_ast_entry *src_ast_entry = NULL;
  1960. struct dp_ast_entry *dst_ast_entry = NULL;
  1961. struct dp_soc *soc = NULL;
  1962. qdf_assert(vdev);
  1963. pdev = vdev->pdev;
  1964. qdf_assert(pdev);
  1965. soc = pdev->soc;
  1966. dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1967. (soc, dstmac, vdev->pdev->pdev_id);
  1968. src_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1969. (soc, srcmac, vdev->pdev->pdev_id);
  1970. if (dst_ast_entry && src_ast_entry) {
  1971. if (dst_ast_entry->peer_id ==
  1972. src_ast_entry->peer_id)
  1973. return 1;
  1974. }
  1975. return 0;
  1976. }
  1977. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  1978. defined(WLAN_MCAST_MLO)
  1979. /* MLO peer id for reinject*/
  1980. #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
  1981. /* MLO vdev id inc offset */
  1982. #define DP_MLO_VDEV_ID_OFFSET 0x80
  1983. static inline void
  1984. dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  1985. {
  1986. if (!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) {
  1987. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1988. qdf_atomic_inc(&soc->num_tx_exception);
  1989. }
  1990. }
  1991. static inline void
  1992. dp_tx_update_mcast_param(uint16_t peer_id,
  1993. uint16_t *htt_tcl_metadata,
  1994. struct dp_vdev *vdev,
  1995. struct dp_tx_msdu_info_s *msdu_info)
  1996. {
  1997. if (peer_id == DP_MLO_MCAST_REINJECT_PEER_ID) {
  1998. *htt_tcl_metadata = 0;
  1999. DP_TX_TCL_METADATA_TYPE_SET(
  2000. *htt_tcl_metadata,
  2001. HTT_TCL_METADATA_V2_TYPE_GLOBAL_SEQ_BASED);
  2002. HTT_TX_TCL_METADATA_GLBL_SEQ_NO_SET(*htt_tcl_metadata,
  2003. msdu_info->gsn);
  2004. msdu_info->vdev_id = vdev->vdev_id + DP_MLO_VDEV_ID_OFFSET;
  2005. if (qdf_unlikely(vdev->nawds_enabled))
  2006. HTT_TX_TCL_METADATA_GLBL_SEQ_HOST_INSPECTED_SET(
  2007. *htt_tcl_metadata, 1);
  2008. } else {
  2009. msdu_info->vdev_id = vdev->vdev_id;
  2010. }
  2011. }
  2012. #else
  2013. static inline void
  2014. dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  2015. {
  2016. }
  2017. static inline void
  2018. dp_tx_update_mcast_param(uint16_t peer_id,
  2019. uint16_t *htt_tcl_metadata,
  2020. struct dp_vdev *vdev,
  2021. struct dp_tx_msdu_info_s *msdu_info)
  2022. {
  2023. }
  2024. #endif
  2025. #ifdef DP_TX_SW_DROP_STATS_INC
  2026. static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
  2027. qdf_nbuf_t nbuf,
  2028. enum cdp_tx_sw_drop drop_code)
  2029. {
  2030. /* EAPOL Drop stats */
  2031. if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) {
  2032. switch (drop_code) {
  2033. case TX_DESC_ERR:
  2034. DP_STATS_INC(pdev, eap_drop_stats.tx_desc_err, 1);
  2035. break;
  2036. case TX_HAL_RING_ACCESS_ERR:
  2037. DP_STATS_INC(pdev,
  2038. eap_drop_stats.tx_hal_ring_access_err, 1);
  2039. break;
  2040. case TX_DMA_MAP_ERR:
  2041. DP_STATS_INC(pdev, eap_drop_stats.tx_dma_map_err, 1);
  2042. break;
  2043. case TX_HW_ENQUEUE:
  2044. DP_STATS_INC(pdev, eap_drop_stats.tx_hw_enqueue, 1);
  2045. break;
  2046. case TX_SW_ENQUEUE:
  2047. DP_STATS_INC(pdev, eap_drop_stats.tx_sw_enqueue, 1);
  2048. break;
  2049. default:
  2050. dp_info_rl("Invalid eapol_drop code: %d", drop_code);
  2051. break;
  2052. }
  2053. }
  2054. }
  2055. #else
  2056. static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
  2057. qdf_nbuf_t nbuf,
  2058. enum cdp_tx_sw_drop drop_code)
  2059. {
  2060. }
  2061. #endif
  2062. /**
  2063. * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
  2064. * @vdev: DP vdev handle
  2065. * @nbuf: skb
  2066. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  2067. * @meta_data: Metadata to the fw
  2068. * @tx_q: Tx queue to be used for this Tx frame
  2069. * @peer_id: peer_id of the peer in case of NAWDS frames
  2070. * @tx_exc_metadata: Handle that holds exception path metadata
  2071. *
  2072. * Return: NULL on success,
  2073. * nbuf when it fails to send
  2074. */
  2075. qdf_nbuf_t
  2076. dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2077. struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
  2078. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2079. {
  2080. struct dp_pdev *pdev = vdev->pdev;
  2081. struct dp_soc *soc = pdev->soc;
  2082. struct dp_tx_desc_s *tx_desc;
  2083. QDF_STATUS status;
  2084. struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
  2085. uint16_t htt_tcl_metadata = 0;
  2086. enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
  2087. uint8_t tid = msdu_info->tid;
  2088. struct cdp_tid_tx_stats *tid_stats = NULL;
  2089. qdf_dma_addr_t paddr;
  2090. /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
  2091. tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
  2092. msdu_info, tx_exc_metadata);
  2093. if (!tx_desc) {
  2094. dp_err_rl("Tx_desc prepare Fail vdev_id %d vdev %pK queue %d",
  2095. vdev->vdev_id, vdev, tx_q->desc_pool_id);
  2096. drop_code = TX_DESC_ERR;
  2097. goto fail_return;
  2098. }
  2099. dp_tx_update_tdls_flags(soc, vdev, tx_desc);
  2100. if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
  2101. htt_tcl_metadata = vdev->htt_tcl_metadata;
  2102. DP_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
  2103. } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
  2104. DP_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
  2105. DP_TCL_METADATA_TYPE_PEER_BASED);
  2106. DP_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
  2107. peer_id);
  2108. dp_tx_bypass_reinjection(soc, tx_desc);
  2109. } else
  2110. htt_tcl_metadata = vdev->htt_tcl_metadata;
  2111. if (msdu_info->exception_fw)
  2112. DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  2113. dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
  2114. !pdev->enhanced_stats_en);
  2115. dp_tx_update_mesh_flags(soc, vdev, tx_desc);
  2116. if (qdf_unlikely(msdu_info->frm_type == dp_tx_frm_rmnet))
  2117. paddr = dp_tx_rmnet_nbuf_map(msdu_info, tx_desc);
  2118. else
  2119. paddr = dp_tx_nbuf_map(vdev, tx_desc, nbuf);
  2120. if (!paddr) {
  2121. /* Handle failure */
  2122. dp_err("qdf_nbuf_map failed");
  2123. DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
  2124. drop_code = TX_DMA_MAP_ERR;
  2125. goto release_desc;
  2126. }
  2127. tx_desc->dma_addr = paddr;
  2128. dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
  2129. tx_desc->id, DP_TX_DESC_MAP);
  2130. dp_tx_update_mcast_param(peer_id, &htt_tcl_metadata, vdev, msdu_info);
  2131. /* Enqueue the Tx MSDU descriptor to HW for transmit */
  2132. status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
  2133. htt_tcl_metadata,
  2134. tx_exc_metadata, msdu_info);
  2135. if (status != QDF_STATUS_SUCCESS) {
  2136. dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
  2137. tx_desc, tx_q->ring_id);
  2138. dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
  2139. tx_desc->id, DP_TX_DESC_UNMAP);
  2140. dp_tx_nbuf_unmap(soc, tx_desc);
  2141. drop_code = TX_HW_ENQUEUE;
  2142. goto release_desc;
  2143. }
  2144. tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
  2145. return NULL;
  2146. release_desc:
  2147. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2148. fail_return:
  2149. dp_tx_get_tid(vdev, nbuf, msdu_info);
  2150. tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
  2151. tid_stats = &pdev->stats.tid_stats.
  2152. tid_tx_stats[tx_q->ring_id][tid];
  2153. tid_stats->swdrop_cnt[drop_code]++;
  2154. return nbuf;
  2155. }
  2156. /**
  2157. * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
  2158. * @soc: Soc handle
  2159. * @desc: software Tx descriptor to be processed
  2160. * @delayed_free: defer freeing of nbuf
  2161. *
  2162. * Return: nbuf to be freed later
  2163. */
  2164. qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
  2165. bool delayed_free)
  2166. {
  2167. qdf_nbuf_t nbuf = desc->nbuf;
  2168. enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
  2169. /* nbuf already freed in vdev detach path */
  2170. if (!nbuf)
  2171. return NULL;
  2172. /* If it is TDLS mgmt, don't unmap or free the frame */
  2173. if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) {
  2174. dp_non_std_htt_tx_comp_free_buff(soc, desc);
  2175. return NULL;
  2176. }
  2177. /* 0 : MSDU buffer, 1 : MLE */
  2178. if (desc->msdu_ext_desc) {
  2179. /* TSO free */
  2180. if (hal_tx_ext_desc_get_tso_enable(
  2181. desc->msdu_ext_desc->vaddr)) {
  2182. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
  2183. desc->id, DP_TX_COMP_MSDU_EXT);
  2184. dp_tx_tso_seg_history_add(soc,
  2185. desc->msdu_ext_desc->tso_desc,
  2186. desc->nbuf, desc->id, type);
  2187. /* unmap eash TSO seg before free the nbuf */
  2188. dp_tx_tso_unmap_segment(soc,
  2189. desc->msdu_ext_desc->tso_desc,
  2190. desc->msdu_ext_desc->
  2191. tso_num_desc);
  2192. goto nbuf_free;
  2193. }
  2194. if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) {
  2195. void *msdu_ext_desc = desc->msdu_ext_desc->vaddr;
  2196. qdf_dma_addr_t iova;
  2197. uint32_t frag_len;
  2198. uint32_t i;
  2199. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  2200. QDF_DMA_TO_DEVICE,
  2201. qdf_nbuf_headlen(nbuf));
  2202. for (i = 1; i < DP_TX_MAX_NUM_FRAGS; i++) {
  2203. hal_tx_ext_desc_get_frag_info(msdu_ext_desc, i,
  2204. &iova,
  2205. &frag_len);
  2206. if (!iova || !frag_len)
  2207. break;
  2208. qdf_mem_unmap_page(soc->osdev, iova, frag_len,
  2209. QDF_DMA_TO_DEVICE);
  2210. }
  2211. goto nbuf_free;
  2212. }
  2213. }
  2214. /* If it's ME frame, dont unmap the cloned nbuf's */
  2215. if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
  2216. goto nbuf_free;
  2217. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
  2218. dp_tx_unmap(soc, desc);
  2219. if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
  2220. return dp_mesh_tx_comp_free_buff(soc, desc, delayed_free);
  2221. if (dp_tx_traffic_end_indication_enq_ind_pkt(soc, desc, nbuf))
  2222. return NULL;
  2223. nbuf_free:
  2224. if (delayed_free)
  2225. return nbuf;
  2226. qdf_nbuf_free(nbuf);
  2227. return NULL;
  2228. }
  2229. /**
  2230. * dp_tx_sg_unmap_buf() - Unmap scatter gather fragments
  2231. * @soc: DP soc handle
  2232. * @nbuf: skb
  2233. * @msdu_info: MSDU info
  2234. *
  2235. * Return: None
  2236. */
  2237. static inline void
  2238. dp_tx_sg_unmap_buf(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2239. struct dp_tx_msdu_info_s *msdu_info)
  2240. {
  2241. uint32_t cur_idx;
  2242. struct dp_tx_seg_info_s *seg = msdu_info->u.sg_info.curr_seg;
  2243. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE,
  2244. qdf_nbuf_headlen(nbuf));
  2245. for (cur_idx = 1; cur_idx < seg->frag_cnt; cur_idx++)
  2246. qdf_mem_unmap_page(soc->osdev, (qdf_dma_addr_t)
  2247. (seg->frags[cur_idx].paddr_lo | ((uint64_t)
  2248. seg->frags[cur_idx].paddr_hi) << 32),
  2249. seg->frags[cur_idx].len,
  2250. QDF_DMA_TO_DEVICE);
  2251. }
  2252. /**
  2253. * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
  2254. * @vdev: DP vdev handle
  2255. * @nbuf: skb
  2256. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  2257. *
  2258. * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
  2259. *
  2260. * Return: NULL on success,
  2261. * nbuf when it fails to send
  2262. */
  2263. #if QDF_LOCK_STATS
  2264. noinline
  2265. #else
  2266. #endif
  2267. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2268. struct dp_tx_msdu_info_s *msdu_info)
  2269. {
  2270. uint32_t i;
  2271. struct dp_pdev *pdev = vdev->pdev;
  2272. struct dp_soc *soc = pdev->soc;
  2273. struct dp_tx_desc_s *tx_desc;
  2274. bool is_cce_classified = false;
  2275. QDF_STATUS status;
  2276. uint16_t htt_tcl_metadata = 0;
  2277. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  2278. struct cdp_tid_tx_stats *tid_stats = NULL;
  2279. uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
  2280. if (msdu_info->frm_type == dp_tx_frm_me)
  2281. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  2282. i = 0;
  2283. /* Print statement to track i and num_seg */
  2284. /*
  2285. * For each segment (maps to 1 MSDU) , prepare software and hardware
  2286. * descriptors using information in msdu_info
  2287. */
  2288. while (i < msdu_info->num_seg) {
  2289. /*
  2290. * Setup Tx descriptor for an MSDU, and MSDU extension
  2291. * descriptor
  2292. */
  2293. tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
  2294. tx_q->desc_pool_id);
  2295. if (!tx_desc) {
  2296. if (msdu_info->frm_type == dp_tx_frm_me) {
  2297. prep_desc_fail++;
  2298. dp_tx_me_free_buf(pdev,
  2299. (void *)(msdu_info->u.sg_info
  2300. .curr_seg->frags[0].vaddr));
  2301. if (prep_desc_fail == msdu_info->num_seg) {
  2302. /*
  2303. * Unmap is needed only if descriptor
  2304. * preparation failed for all segments.
  2305. */
  2306. qdf_nbuf_unmap(soc->osdev,
  2307. msdu_info->u.sg_info.
  2308. curr_seg->nbuf,
  2309. QDF_DMA_TO_DEVICE);
  2310. }
  2311. /*
  2312. * Free the nbuf for the current segment
  2313. * and make it point to the next in the list.
  2314. * For me, there are as many segments as there
  2315. * are no of clients.
  2316. */
  2317. qdf_nbuf_free(msdu_info->u.sg_info
  2318. .curr_seg->nbuf);
  2319. if (msdu_info->u.sg_info.curr_seg->next) {
  2320. msdu_info->u.sg_info.curr_seg =
  2321. msdu_info->u.sg_info
  2322. .curr_seg->next;
  2323. nbuf = msdu_info->u.sg_info
  2324. .curr_seg->nbuf;
  2325. }
  2326. i++;
  2327. continue;
  2328. }
  2329. if (msdu_info->frm_type == dp_tx_frm_tso) {
  2330. dp_tx_tso_seg_history_add(
  2331. soc,
  2332. msdu_info->u.tso_info.curr_seg,
  2333. nbuf, 0, DP_TX_DESC_UNMAP);
  2334. dp_tx_tso_unmap_segment(soc,
  2335. msdu_info->u.tso_info.
  2336. curr_seg,
  2337. msdu_info->u.tso_info.
  2338. tso_num_seg_list);
  2339. if (msdu_info->u.tso_info.curr_seg->next) {
  2340. msdu_info->u.tso_info.curr_seg =
  2341. msdu_info->u.tso_info.curr_seg->next;
  2342. i++;
  2343. continue;
  2344. }
  2345. }
  2346. if (msdu_info->frm_type == dp_tx_frm_sg)
  2347. dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
  2348. goto done;
  2349. }
  2350. if (msdu_info->frm_type == dp_tx_frm_me) {
  2351. tx_desc->msdu_ext_desc->me_buffer =
  2352. (struct dp_tx_me_buf_t *)msdu_info->
  2353. u.sg_info.curr_seg->frags[0].vaddr;
  2354. tx_desc->flags |= DP_TX_DESC_FLAG_ME;
  2355. }
  2356. if (is_cce_classified)
  2357. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  2358. htt_tcl_metadata = vdev->htt_tcl_metadata;
  2359. if (msdu_info->exception_fw) {
  2360. DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  2361. }
  2362. dp_tx_is_hp_update_required(i, msdu_info);
  2363. /*
  2364. * For frames with multiple segments (TSO, ME), jump to next
  2365. * segment.
  2366. */
  2367. if (msdu_info->frm_type == dp_tx_frm_tso) {
  2368. if (msdu_info->u.tso_info.curr_seg->next) {
  2369. msdu_info->u.tso_info.curr_seg =
  2370. msdu_info->u.tso_info.curr_seg->next;
  2371. /*
  2372. * If this is a jumbo nbuf, then increment the
  2373. * number of nbuf users for each additional
  2374. * segment of the msdu. This will ensure that
  2375. * the skb is freed only after receiving tx
  2376. * completion for all segments of an nbuf
  2377. */
  2378. qdf_nbuf_inc_users(nbuf);
  2379. /* Check with MCL if this is needed */
  2380. /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
  2381. */
  2382. }
  2383. }
  2384. dp_tx_update_mcast_param(DP_INVALID_PEER,
  2385. &htt_tcl_metadata,
  2386. vdev,
  2387. msdu_info);
  2388. /*
  2389. * Enqueue the Tx MSDU descriptor to HW for transmit
  2390. */
  2391. status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
  2392. htt_tcl_metadata,
  2393. NULL, msdu_info);
  2394. dp_tx_check_and_flush_hp(soc, status, msdu_info);
  2395. if (status != QDF_STATUS_SUCCESS) {
  2396. dp_info_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
  2397. tx_desc, tx_q->ring_id);
  2398. dp_tx_get_tid(vdev, nbuf, msdu_info);
  2399. tid_stats = &pdev->stats.tid_stats.
  2400. tid_tx_stats[tx_q->ring_id][msdu_info->tid];
  2401. tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
  2402. if (msdu_info->frm_type == dp_tx_frm_me) {
  2403. hw_enq_fail++;
  2404. if (hw_enq_fail == msdu_info->num_seg) {
  2405. /*
  2406. * Unmap is needed only if enqueue
  2407. * failed for all segments.
  2408. */
  2409. qdf_nbuf_unmap(soc->osdev,
  2410. msdu_info->u.sg_info.
  2411. curr_seg->nbuf,
  2412. QDF_DMA_TO_DEVICE);
  2413. }
  2414. /*
  2415. * Free the nbuf for the current segment
  2416. * and make it point to the next in the list.
  2417. * For me, there are as many segments as there
  2418. * are no of clients.
  2419. */
  2420. qdf_nbuf_free(msdu_info->u.sg_info
  2421. .curr_seg->nbuf);
  2422. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2423. if (msdu_info->u.sg_info.curr_seg->next) {
  2424. msdu_info->u.sg_info.curr_seg =
  2425. msdu_info->u.sg_info
  2426. .curr_seg->next;
  2427. nbuf = msdu_info->u.sg_info
  2428. .curr_seg->nbuf;
  2429. } else
  2430. break;
  2431. i++;
  2432. continue;
  2433. }
  2434. /*
  2435. * For TSO frames, the nbuf users increment done for
  2436. * the current segment has to be reverted, since the
  2437. * hw enqueue for this segment failed
  2438. */
  2439. if (msdu_info->frm_type == dp_tx_frm_tso &&
  2440. msdu_info->u.tso_info.curr_seg) {
  2441. /*
  2442. * unmap and free current,
  2443. * retransmit remaining segments
  2444. */
  2445. dp_tx_comp_free_buf(soc, tx_desc, false);
  2446. i++;
  2447. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2448. continue;
  2449. }
  2450. if (msdu_info->frm_type == dp_tx_frm_sg)
  2451. dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
  2452. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2453. goto done;
  2454. }
  2455. /*
  2456. * TODO
  2457. * if tso_info structure can be modified to have curr_seg
  2458. * as first element, following 2 blocks of code (for TSO and SG)
  2459. * can be combined into 1
  2460. */
  2461. /*
  2462. * For Multicast-Unicast converted packets,
  2463. * each converted frame (for a client) is represented as
  2464. * 1 segment
  2465. */
  2466. if ((msdu_info->frm_type == dp_tx_frm_sg) ||
  2467. (msdu_info->frm_type == dp_tx_frm_me)) {
  2468. if (msdu_info->u.sg_info.curr_seg->next) {
  2469. msdu_info->u.sg_info.curr_seg =
  2470. msdu_info->u.sg_info.curr_seg->next;
  2471. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  2472. } else
  2473. break;
  2474. }
  2475. i++;
  2476. }
  2477. nbuf = NULL;
  2478. done:
  2479. return nbuf;
  2480. }
  2481. /**
  2482. * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
  2483. * for SG frames
  2484. * @vdev: DP vdev handle
  2485. * @nbuf: skb
  2486. * @seg_info: Pointer to Segment info Descriptor to be prepared
  2487. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2488. *
  2489. * Return: NULL on success,
  2490. * nbuf when it fails to send
  2491. */
  2492. static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2493. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  2494. {
  2495. uint32_t cur_frag, nr_frags, i;
  2496. qdf_dma_addr_t paddr;
  2497. struct dp_tx_sg_info_s *sg_info;
  2498. sg_info = &msdu_info->u.sg_info;
  2499. nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  2500. if (QDF_STATUS_SUCCESS !=
  2501. qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
  2502. QDF_DMA_TO_DEVICE,
  2503. qdf_nbuf_headlen(nbuf))) {
  2504. dp_tx_err("dma map error");
  2505. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  2506. qdf_nbuf_free(nbuf);
  2507. return NULL;
  2508. }
  2509. paddr = qdf_nbuf_mapped_paddr_get(nbuf);
  2510. seg_info->frags[0].paddr_lo = paddr;
  2511. seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
  2512. seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
  2513. seg_info->frags[0].vaddr = (void *) nbuf;
  2514. for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
  2515. if (QDF_STATUS_SUCCESS != qdf_nbuf_frag_map(vdev->osdev,
  2516. nbuf, 0,
  2517. QDF_DMA_TO_DEVICE,
  2518. cur_frag)) {
  2519. dp_tx_err("frag dma map error");
  2520. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  2521. goto map_err;
  2522. }
  2523. paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
  2524. seg_info->frags[cur_frag + 1].paddr_lo = paddr;
  2525. seg_info->frags[cur_frag + 1].paddr_hi =
  2526. ((uint64_t) paddr) >> 32;
  2527. seg_info->frags[cur_frag + 1].len =
  2528. qdf_nbuf_get_frag_size(nbuf, cur_frag);
  2529. }
  2530. seg_info->frag_cnt = (cur_frag + 1);
  2531. seg_info->total_len = qdf_nbuf_len(nbuf);
  2532. seg_info->next = NULL;
  2533. sg_info->curr_seg = seg_info;
  2534. msdu_info->frm_type = dp_tx_frm_sg;
  2535. msdu_info->num_seg = 1;
  2536. return nbuf;
  2537. map_err:
  2538. /* restore paddr into nbuf before calling unmap */
  2539. qdf_nbuf_mapped_paddr_set(nbuf,
  2540. (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
  2541. ((uint64_t)
  2542. seg_info->frags[0].paddr_hi) << 32));
  2543. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  2544. QDF_DMA_TO_DEVICE,
  2545. seg_info->frags[0].len);
  2546. for (i = 1; i <= cur_frag; i++) {
  2547. qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
  2548. (seg_info->frags[i].paddr_lo | ((uint64_t)
  2549. seg_info->frags[i].paddr_hi) << 32),
  2550. seg_info->frags[i].len,
  2551. QDF_DMA_TO_DEVICE);
  2552. }
  2553. qdf_nbuf_free(nbuf);
  2554. return NULL;
  2555. }
  2556. /**
  2557. * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
  2558. * @vdev: DP vdev handle
  2559. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2560. * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
  2561. *
  2562. * Return: NULL on failure,
  2563. * nbuf when extracted successfully
  2564. */
  2565. static
  2566. void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
  2567. struct dp_tx_msdu_info_s *msdu_info,
  2568. uint16_t ppdu_cookie)
  2569. {
  2570. struct htt_tx_msdu_desc_ext2_t *meta_data =
  2571. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  2572. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  2573. HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
  2574. (msdu_info->meta_data[5], 1);
  2575. HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
  2576. (msdu_info->meta_data[5], 1);
  2577. HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
  2578. (msdu_info->meta_data[6], ppdu_cookie);
  2579. msdu_info->exception_fw = 1;
  2580. msdu_info->is_tx_sniffer = 1;
  2581. }
  2582. #ifdef MESH_MODE_SUPPORT
  2583. /**
  2584. * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
  2585. and prepare msdu_info for mesh frames.
  2586. * @vdev: DP vdev handle
  2587. * @nbuf: skb
  2588. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2589. *
  2590. * Return: NULL on failure,
  2591. * nbuf when extracted successfully
  2592. */
  2593. static
  2594. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2595. struct dp_tx_msdu_info_s *msdu_info)
  2596. {
  2597. struct meta_hdr_s *mhdr;
  2598. struct htt_tx_msdu_desc_ext2_t *meta_data =
  2599. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  2600. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  2601. if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
  2602. msdu_info->exception_fw = 0;
  2603. goto remove_meta_hdr;
  2604. }
  2605. msdu_info->exception_fw = 1;
  2606. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  2607. meta_data->host_tx_desc_pool = 1;
  2608. meta_data->update_peer_cache = 1;
  2609. meta_data->learning_frame = 1;
  2610. if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
  2611. meta_data->power = mhdr->power;
  2612. meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
  2613. meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
  2614. meta_data->pream_type = mhdr->rate_info[0].preamble_type;
  2615. meta_data->retry_limit = mhdr->rate_info[0].max_tries;
  2616. meta_data->dyn_bw = 1;
  2617. meta_data->valid_pwr = 1;
  2618. meta_data->valid_mcs_mask = 1;
  2619. meta_data->valid_nss_mask = 1;
  2620. meta_data->valid_preamble_type = 1;
  2621. meta_data->valid_retries = 1;
  2622. meta_data->valid_bw_info = 1;
  2623. }
  2624. if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
  2625. meta_data->encrypt_type = 0;
  2626. meta_data->valid_encrypt_type = 1;
  2627. meta_data->learning_frame = 0;
  2628. }
  2629. meta_data->valid_key_flags = 1;
  2630. meta_data->key_flags = (mhdr->keyix & 0x3);
  2631. remove_meta_hdr:
  2632. if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
  2633. dp_tx_err("qdf_nbuf_pull_head failed");
  2634. qdf_nbuf_free(nbuf);
  2635. return NULL;
  2636. }
  2637. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  2638. dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
  2639. " tid %d to_fw %d",
  2640. msdu_info->meta_data[0],
  2641. msdu_info->meta_data[1],
  2642. msdu_info->meta_data[2],
  2643. msdu_info->meta_data[3],
  2644. msdu_info->meta_data[4],
  2645. msdu_info->meta_data[5],
  2646. msdu_info->tid, msdu_info->exception_fw);
  2647. return nbuf;
  2648. }
  2649. #else
  2650. static
  2651. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2652. struct dp_tx_msdu_info_s *msdu_info)
  2653. {
  2654. return nbuf;
  2655. }
  2656. #endif
  2657. /**
  2658. * dp_check_exc_metadata() - Checks if parameters are valid
  2659. * @tx_exc - holds all exception path parameters
  2660. *
  2661. * Returns true when all the parameters are valid else false
  2662. *
  2663. */
  2664. static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
  2665. {
  2666. bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
  2667. HTT_INVALID_TID);
  2668. bool invalid_encap_type =
  2669. (tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
  2670. tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
  2671. bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
  2672. tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
  2673. bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
  2674. tx_exc->ppdu_cookie == 0);
  2675. if (tx_exc->is_intrabss_fwd)
  2676. return true;
  2677. if (invalid_tid || invalid_encap_type || invalid_sec_type ||
  2678. invalid_cookie) {
  2679. return false;
  2680. }
  2681. return true;
  2682. }
  2683. #ifdef ATH_SUPPORT_IQUE
  2684. /**
  2685. * dp_tx_mcast_enhance() - Multicast enhancement on TX
  2686. * @vdev: vdev handle
  2687. * @nbuf: skb
  2688. *
  2689. * Return: true on success,
  2690. * false on failure
  2691. */
  2692. static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2693. {
  2694. qdf_ether_header_t *eh;
  2695. /* Mcast to Ucast Conversion*/
  2696. if (qdf_likely(!vdev->mcast_enhancement_en))
  2697. return true;
  2698. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2699. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
  2700. !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
  2701. dp_verbose_debug("Mcast frm for ME %pK", vdev);
  2702. qdf_nbuf_set_next(nbuf, NULL);
  2703. DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
  2704. qdf_nbuf_len(nbuf));
  2705. if (dp_tx_prepare_send_me(vdev, nbuf) ==
  2706. QDF_STATUS_SUCCESS) {
  2707. return false;
  2708. }
  2709. if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
  2710. if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
  2711. QDF_STATUS_SUCCESS) {
  2712. return false;
  2713. }
  2714. }
  2715. }
  2716. return true;
  2717. }
  2718. #else
  2719. static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2720. {
  2721. return true;
  2722. }
  2723. #endif
  2724. #ifdef QCA_SUPPORT_WDS_EXTENDED
  2725. /**
  2726. * dp_tx_mcast_drop() - Drop mcast frame if drop_tx_mcast is set in WDS_EXT
  2727. * @vdev: vdev handle
  2728. * @nbuf: skb
  2729. *
  2730. * Return: true if frame is dropped, false otherwise
  2731. */
  2732. static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2733. {
  2734. /* Drop tx mcast and WDS Extended feature check */
  2735. if (qdf_unlikely((vdev->drop_tx_mcast) && (vdev->wds_ext_enabled))) {
  2736. qdf_ether_header_t *eh = (qdf_ether_header_t *)
  2737. qdf_nbuf_data(nbuf);
  2738. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  2739. DP_STATS_INC(vdev, tx_i.dropped.tx_mcast_drop, 1);
  2740. return true;
  2741. }
  2742. }
  2743. return false;
  2744. }
  2745. #else
  2746. static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2747. {
  2748. return false;
  2749. }
  2750. #endif
  2751. /**
  2752. * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
  2753. * @nbuf: qdf_nbuf_t
  2754. * @vdev: struct dp_vdev *
  2755. *
  2756. * Allow packet for processing only if it is for peer client which is
  2757. * connected with same vap. Drop packet if client is connected to
  2758. * different vap.
  2759. *
  2760. * Return: QDF_STATUS
  2761. */
  2762. static inline QDF_STATUS
  2763. dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
  2764. {
  2765. struct dp_ast_entry *dst_ast_entry = NULL;
  2766. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2767. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
  2768. DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
  2769. return QDF_STATUS_SUCCESS;
  2770. qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
  2771. dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
  2772. eh->ether_dhost,
  2773. vdev->vdev_id);
  2774. /* If there is no ast entry, return failure */
  2775. if (qdf_unlikely(!dst_ast_entry)) {
  2776. qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
  2777. return QDF_STATUS_E_FAILURE;
  2778. }
  2779. qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
  2780. return QDF_STATUS_SUCCESS;
  2781. }
  2782. /**
  2783. * dp_tx_nawds_handler() - NAWDS handler
  2784. *
  2785. * @soc: DP soc handle
  2786. * @vdev_id: id of DP vdev handle
  2787. * @msdu_info: msdu_info required to create HTT metadata
  2788. * @nbuf: skb
  2789. *
  2790. * This API transfers the multicast frames with the peer id
  2791. * on NAWDS enabled peer.
  2792. * Return: none
  2793. */
  2794. static inline
  2795. void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
  2796. struct dp_tx_msdu_info_s *msdu_info,
  2797. qdf_nbuf_t nbuf, uint16_t sa_peer_id)
  2798. {
  2799. struct dp_peer *peer = NULL;
  2800. qdf_nbuf_t nbuf_clone = NULL;
  2801. uint16_t peer_id = DP_INVALID_PEER;
  2802. struct dp_txrx_peer *txrx_peer;
  2803. /* This check avoids pkt forwarding which is entered
  2804. * in the ast table but still doesn't have valid peerid.
  2805. */
  2806. if (sa_peer_id == HTT_INVALID_PEER)
  2807. return;
  2808. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2809. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2810. txrx_peer = dp_get_txrx_peer(peer);
  2811. if (!txrx_peer)
  2812. continue;
  2813. if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) {
  2814. peer_id = peer->peer_id;
  2815. if (!dp_peer_is_primary_link_peer(peer))
  2816. continue;
  2817. /* Multicast packets needs to be
  2818. * dropped in case of intra bss forwarding
  2819. */
  2820. if (sa_peer_id == txrx_peer->peer_id) {
  2821. dp_tx_debug("multicast packet");
  2822. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2823. tx.nawds_mcast_drop,
  2824. 1);
  2825. continue;
  2826. }
  2827. nbuf_clone = qdf_nbuf_clone(nbuf);
  2828. if (!nbuf_clone) {
  2829. QDF_TRACE(QDF_MODULE_ID_DP,
  2830. QDF_TRACE_LEVEL_ERROR,
  2831. FL("nbuf clone failed"));
  2832. break;
  2833. }
  2834. nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
  2835. msdu_info, peer_id,
  2836. NULL);
  2837. if (nbuf_clone) {
  2838. dp_tx_debug("pkt send failed");
  2839. qdf_nbuf_free(nbuf_clone);
  2840. } else {
  2841. if (peer_id != DP_INVALID_PEER)
  2842. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  2843. tx.nawds_mcast,
  2844. 1, qdf_nbuf_len(nbuf));
  2845. }
  2846. }
  2847. }
  2848. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2849. }
  2850. /**
  2851. * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
  2852. * @soc: DP soc handle
  2853. * @vdev_id: id of DP vdev handle
  2854. * @nbuf: skb
  2855. * @tx_exc_metadata: Handle that holds exception path meta data
  2856. *
  2857. * Entry point for Core Tx layer (DP_TX) invoked from
  2858. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  2859. *
  2860. * Return: NULL on success,
  2861. * nbuf when it fails to send
  2862. */
  2863. qdf_nbuf_t
  2864. dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2865. qdf_nbuf_t nbuf,
  2866. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2867. {
  2868. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2869. qdf_ether_header_t *eh = NULL;
  2870. struct dp_tx_msdu_info_s msdu_info;
  2871. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2872. DP_MOD_ID_TX_EXCEPTION);
  2873. if (qdf_unlikely(!vdev))
  2874. goto fail;
  2875. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  2876. if (!tx_exc_metadata)
  2877. goto fail;
  2878. msdu_info.tid = tx_exc_metadata->tid;
  2879. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2880. dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
  2881. QDF_MAC_ADDR_REF(nbuf->data));
  2882. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  2883. if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
  2884. dp_tx_err("Invalid parameters in exception path");
  2885. goto fail;
  2886. }
  2887. /* for peer based metadata check if peer is valid */
  2888. if (tx_exc_metadata->peer_id != CDP_INVALID_PEER) {
  2889. struct dp_peer *peer = NULL;
  2890. peer = dp_peer_get_ref_by_id(vdev->pdev->soc,
  2891. tx_exc_metadata->peer_id,
  2892. DP_MOD_ID_TX_EXCEPTION);
  2893. if (qdf_unlikely(!peer)) {
  2894. DP_STATS_INC(vdev,
  2895. tx_i.dropped.invalid_peer_id_in_exc_path,
  2896. 1);
  2897. goto fail;
  2898. }
  2899. dp_peer_unref_delete(peer, DP_MOD_ID_TX_EXCEPTION);
  2900. }
  2901. /* Basic sanity checks for unsupported packets */
  2902. /* MESH mode */
  2903. if (qdf_unlikely(vdev->mesh_vdev)) {
  2904. dp_tx_err("Mesh mode is not supported in exception path");
  2905. goto fail;
  2906. }
  2907. /*
  2908. * Classify the frame and call corresponding
  2909. * "prepare" function which extracts the segment (TSO)
  2910. * and fragmentation information (for TSO , SG, ME, or Raw)
  2911. * into MSDU_INFO structure which is later used to fill
  2912. * SW and HW descriptors.
  2913. */
  2914. if (qdf_nbuf_is_tso(nbuf)) {
  2915. dp_verbose_debug("TSO frame %pK", vdev);
  2916. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  2917. qdf_nbuf_len(nbuf));
  2918. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  2919. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  2920. qdf_nbuf_len(nbuf));
  2921. goto fail;
  2922. }
  2923. goto send_multiple;
  2924. }
  2925. /* SG */
  2926. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  2927. struct dp_tx_seg_info_s seg_info = {0};
  2928. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  2929. if (!nbuf)
  2930. goto fail;
  2931. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  2932. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  2933. qdf_nbuf_len(nbuf));
  2934. goto send_multiple;
  2935. }
  2936. if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
  2937. DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
  2938. qdf_nbuf_len(nbuf));
  2939. dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
  2940. tx_exc_metadata->ppdu_cookie);
  2941. }
  2942. /*
  2943. * Get HW Queue to use for this frame.
  2944. * TCL supports upto 4 DMA rings, out of which 3 rings are
  2945. * dedicated for data and 1 for command.
  2946. * "queue_id" maps to one hardware ring.
  2947. * With each ring, we also associate a unique Tx descriptor pool
  2948. * to minimize lock contention for these resources.
  2949. */
  2950. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2951. if (qdf_likely(tx_exc_metadata->is_intrabss_fwd)) {
  2952. if (qdf_unlikely(vdev->nawds_enabled)) {
  2953. /*
  2954. * This is a multicast packet
  2955. */
  2956. dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
  2957. tx_exc_metadata->peer_id);
  2958. DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
  2959. 1, qdf_nbuf_len(nbuf));
  2960. }
  2961. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
  2962. DP_INVALID_PEER, NULL);
  2963. } else {
  2964. /*
  2965. * Check exception descriptors
  2966. */
  2967. if (dp_tx_exception_limit_check(vdev))
  2968. goto fail;
  2969. /* Single linear frame */
  2970. /*
  2971. * If nbuf is a simple linear frame, use send_single function to
  2972. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  2973. * SRNG. There is no need to setup a MSDU extension descriptor.
  2974. */
  2975. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
  2976. tx_exc_metadata->peer_id,
  2977. tx_exc_metadata);
  2978. }
  2979. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2980. return nbuf;
  2981. send_multiple:
  2982. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2983. fail:
  2984. if (vdev)
  2985. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2986. dp_verbose_debug("pkt send failed");
  2987. return nbuf;
  2988. }
  2989. /**
  2990. * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
  2991. * in exception path in special case to avoid regular exception path chk.
  2992. * @soc: DP soc handle
  2993. * @vdev_id: id of DP vdev handle
  2994. * @nbuf: skb
  2995. * @tx_exc_metadata: Handle that holds exception path meta data
  2996. *
  2997. * Entry point for Core Tx layer (DP_TX) invoked from
  2998. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  2999. *
  3000. * Return: NULL on success,
  3001. * nbuf when it fails to send
  3002. */
  3003. qdf_nbuf_t
  3004. dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
  3005. uint8_t vdev_id, qdf_nbuf_t nbuf,
  3006. struct cdp_tx_exception_metadata *tx_exc_metadata)
  3007. {
  3008. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3009. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  3010. DP_MOD_ID_TX_EXCEPTION);
  3011. if (qdf_unlikely(!vdev))
  3012. goto fail;
  3013. if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
  3014. == QDF_STATUS_E_FAILURE)) {
  3015. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  3016. goto fail;
  3017. }
  3018. /* Unref count as it will again be taken inside dp_tx_exception */
  3019. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  3020. return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
  3021. fail:
  3022. if (vdev)
  3023. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  3024. dp_verbose_debug("pkt send failed");
  3025. return nbuf;
  3026. }
  3027. /**
  3028. * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
  3029. * @soc: DP soc handle
  3030. * @vdev_id: DP vdev handle
  3031. * @nbuf: skb
  3032. *
  3033. * Entry point for Core Tx layer (DP_TX) invoked from
  3034. * hard_start_xmit in OSIF/HDD
  3035. *
  3036. * Return: NULL on success,
  3037. * nbuf when it fails to send
  3038. */
  3039. #ifdef MESH_MODE_SUPPORT
  3040. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3041. qdf_nbuf_t nbuf)
  3042. {
  3043. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3044. struct meta_hdr_s *mhdr;
  3045. qdf_nbuf_t nbuf_mesh = NULL;
  3046. qdf_nbuf_t nbuf_clone = NULL;
  3047. struct dp_vdev *vdev;
  3048. uint8_t no_enc_frame = 0;
  3049. nbuf_mesh = qdf_nbuf_unshare(nbuf);
  3050. if (!nbuf_mesh) {
  3051. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3052. "qdf_nbuf_unshare failed");
  3053. return nbuf;
  3054. }
  3055. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
  3056. if (!vdev) {
  3057. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3058. "vdev is NULL for vdev_id %d", vdev_id);
  3059. return nbuf;
  3060. }
  3061. nbuf = nbuf_mesh;
  3062. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  3063. if ((vdev->sec_type != cdp_sec_type_none) &&
  3064. (mhdr->flags & METAHDR_FLAG_NOENCRYPT))
  3065. no_enc_frame = 1;
  3066. if (mhdr->flags & METAHDR_FLAG_NOQOS)
  3067. qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
  3068. if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
  3069. !no_enc_frame) {
  3070. nbuf_clone = qdf_nbuf_clone(nbuf);
  3071. if (!nbuf_clone) {
  3072. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3073. "qdf_nbuf_clone failed");
  3074. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  3075. return nbuf;
  3076. }
  3077. qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
  3078. }
  3079. if (nbuf_clone) {
  3080. if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
  3081. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  3082. } else {
  3083. qdf_nbuf_free(nbuf_clone);
  3084. }
  3085. }
  3086. if (no_enc_frame)
  3087. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
  3088. else
  3089. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
  3090. nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
  3091. if ((!nbuf) && no_enc_frame) {
  3092. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  3093. }
  3094. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  3095. return nbuf;
  3096. }
  3097. #else
  3098. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
  3099. qdf_nbuf_t nbuf)
  3100. {
  3101. return dp_tx_send(soc, vdev_id, nbuf);
  3102. }
  3103. #endif
  3104. #ifdef QCA_DP_TX_NBUF_AND_NBUF_DATA_PREFETCH
  3105. static inline
  3106. void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
  3107. {
  3108. if (nbuf) {
  3109. qdf_prefetch(&nbuf->len);
  3110. qdf_prefetch(&nbuf->data);
  3111. }
  3112. }
  3113. #else
  3114. static inline
  3115. void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
  3116. {
  3117. }
  3118. #endif
  3119. #ifdef DP_UMAC_HW_RESET_SUPPORT
  3120. /*
  3121. * dp_tx_drop() - Drop the frame on a given VAP
  3122. * @soc: DP soc handle
  3123. * @vdev_id: id of DP vdev handle
  3124. * @nbuf: skb
  3125. *
  3126. * Drop all the incoming packets
  3127. *
  3128. * Return: nbuf
  3129. *
  3130. */
  3131. qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3132. qdf_nbuf_t nbuf)
  3133. {
  3134. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3135. struct dp_vdev *vdev = NULL;
  3136. vdev = soc->vdev_id_map[vdev_id];
  3137. if (qdf_unlikely(!vdev))
  3138. return nbuf;
  3139. DP_STATS_INC(vdev, tx_i.dropped.drop_ingress, 1);
  3140. return nbuf;
  3141. }
  3142. /*
  3143. * dp_tx_exc_drop() - Drop the frame on a given VAP
  3144. * @soc: DP soc handle
  3145. * @vdev_id: id of DP vdev handle
  3146. * @nbuf: skb
  3147. * @tx_exc_metadata: Handle that holds exception path meta data
  3148. *
  3149. * Drop all the incoming packets
  3150. *
  3151. * Return: nbuf
  3152. *
  3153. */
  3154. qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3155. qdf_nbuf_t nbuf,
  3156. struct cdp_tx_exception_metadata *tx_exc_metadata)
  3157. {
  3158. return dp_tx_drop(soc_hdl, vdev_id, nbuf);
  3159. }
  3160. #endif
  3161. /*
  3162. * dp_tx_send() - Transmit a frame on a given VAP
  3163. * @soc: DP soc handle
  3164. * @vdev_id: id of DP vdev handle
  3165. * @nbuf: skb
  3166. *
  3167. * Entry point for Core Tx layer (DP_TX) invoked from
  3168. * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
  3169. * cases
  3170. *
  3171. * Return: NULL on success,
  3172. * nbuf when it fails to send
  3173. */
  3174. qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3175. qdf_nbuf_t nbuf)
  3176. {
  3177. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3178. uint16_t peer_id = HTT_INVALID_PEER;
  3179. /*
  3180. * doing a memzero is causing additional function call overhead
  3181. * so doing static stack clearing
  3182. */
  3183. struct dp_tx_msdu_info_s msdu_info = {0};
  3184. struct dp_vdev *vdev = NULL;
  3185. qdf_nbuf_t end_nbuf = NULL;
  3186. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  3187. return nbuf;
  3188. /*
  3189. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  3190. * this in per packet path.
  3191. *
  3192. * As in this path vdev memory is already protected with netdev
  3193. * tx lock
  3194. */
  3195. vdev = soc->vdev_id_map[vdev_id];
  3196. if (qdf_unlikely(!vdev))
  3197. return nbuf;
  3198. /*
  3199. * Set Default Host TID value to invalid TID
  3200. * (TID override disabled)
  3201. */
  3202. msdu_info.tid = HTT_TX_EXT_TID_INVALID;
  3203. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_headlen(nbuf));
  3204. if (qdf_unlikely(vdev->mesh_vdev)) {
  3205. qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
  3206. &msdu_info);
  3207. if (!nbuf_mesh) {
  3208. dp_verbose_debug("Extracting mesh metadata failed");
  3209. return nbuf;
  3210. }
  3211. nbuf = nbuf_mesh;
  3212. }
  3213. /*
  3214. * Get HW Queue to use for this frame.
  3215. * TCL supports upto 4 DMA rings, out of which 3 rings are
  3216. * dedicated for data and 1 for command.
  3217. * "queue_id" maps to one hardware ring.
  3218. * With each ring, we also associate a unique Tx descriptor pool
  3219. * to minimize lock contention for these resources.
  3220. */
  3221. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  3222. DP_STATS_INC(vdev, tx_i.rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
  3223. 1);
  3224. /*
  3225. * TCL H/W supports 2 DSCP-TID mapping tables.
  3226. * Table 1 - Default DSCP-TID mapping table
  3227. * Table 2 - 1 DSCP-TID override table
  3228. *
  3229. * If we need a different DSCP-TID mapping for this vap,
  3230. * call tid_classify to extract DSCP/ToS from frame and
  3231. * map to a TID and store in msdu_info. This is later used
  3232. * to fill in TCL Input descriptor (per-packet TID override).
  3233. */
  3234. dp_tx_classify_tid(vdev, nbuf, &msdu_info);
  3235. /*
  3236. * Classify the frame and call corresponding
  3237. * "prepare" function which extracts the segment (TSO)
  3238. * and fragmentation information (for TSO , SG, ME, or Raw)
  3239. * into MSDU_INFO structure which is later used to fill
  3240. * SW and HW descriptors.
  3241. */
  3242. if (qdf_nbuf_is_tso(nbuf)) {
  3243. dp_verbose_debug("TSO frame %pK", vdev);
  3244. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  3245. qdf_nbuf_len(nbuf));
  3246. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  3247. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  3248. qdf_nbuf_len(nbuf));
  3249. return nbuf;
  3250. }
  3251. goto send_multiple;
  3252. }
  3253. /* SG */
  3254. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  3255. if (qdf_nbuf_get_nr_frags(nbuf) > DP_TX_MAX_NUM_FRAGS - 1) {
  3256. if (qdf_unlikely(qdf_nbuf_linearize(nbuf)))
  3257. return nbuf;
  3258. } else {
  3259. struct dp_tx_seg_info_s seg_info = {0};
  3260. if (qdf_unlikely(is_nbuf_frm_rmnet(nbuf, &msdu_info)))
  3261. goto send_single;
  3262. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info,
  3263. &msdu_info);
  3264. if (!nbuf)
  3265. return NULL;
  3266. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  3267. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  3268. qdf_nbuf_len(nbuf));
  3269. goto send_multiple;
  3270. }
  3271. }
  3272. if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
  3273. return NULL;
  3274. if (qdf_unlikely(dp_tx_mcast_drop(vdev, nbuf)))
  3275. return nbuf;
  3276. /* RAW */
  3277. if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
  3278. struct dp_tx_seg_info_s seg_info = {0};
  3279. nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
  3280. if (!nbuf)
  3281. return NULL;
  3282. dp_verbose_debug("Raw frame %pK", vdev);
  3283. goto send_multiple;
  3284. }
  3285. if (qdf_unlikely(vdev->nawds_enabled)) {
  3286. qdf_ether_header_t *eh = (qdf_ether_header_t *)
  3287. qdf_nbuf_data(nbuf);
  3288. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  3289. uint16_t sa_peer_id = DP_INVALID_PEER;
  3290. if (!soc->ast_offload_support) {
  3291. struct dp_ast_entry *ast_entry = NULL;
  3292. qdf_spin_lock_bh(&soc->ast_lock);
  3293. ast_entry = dp_peer_ast_hash_find_by_pdevid
  3294. (soc,
  3295. (uint8_t *)(eh->ether_shost),
  3296. vdev->pdev->pdev_id);
  3297. if (ast_entry)
  3298. sa_peer_id = ast_entry->peer_id;
  3299. qdf_spin_unlock_bh(&soc->ast_lock);
  3300. }
  3301. dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
  3302. sa_peer_id);
  3303. }
  3304. peer_id = DP_INVALID_PEER;
  3305. DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
  3306. 1, qdf_nbuf_len(nbuf));
  3307. }
  3308. send_single:
  3309. /* Single linear frame */
  3310. /*
  3311. * If nbuf is a simple linear frame, use send_single function to
  3312. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  3313. * SRNG. There is no need to setup a MSDU extension descriptor.
  3314. */
  3315. dp_tx_prefetch_nbuf_data(nbuf);
  3316. nbuf = dp_tx_send_msdu_single_wrapper(vdev, nbuf, &msdu_info,
  3317. peer_id, end_nbuf);
  3318. return nbuf;
  3319. send_multiple:
  3320. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  3321. if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
  3322. dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
  3323. return nbuf;
  3324. }
  3325. /**
  3326. * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
  3327. * case to vaoid check in perpkt path.
  3328. * @soc: DP soc handle
  3329. * @vdev_id: id of DP vdev handle
  3330. * @nbuf: skb
  3331. *
  3332. * Entry point for Core Tx layer (DP_TX) invoked from
  3333. * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
  3334. * with special condition to avoid per pkt check in dp_tx_send
  3335. *
  3336. * Return: NULL on success,
  3337. * nbuf when it fails to send
  3338. */
  3339. qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
  3340. uint8_t vdev_id, qdf_nbuf_t nbuf)
  3341. {
  3342. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3343. struct dp_vdev *vdev = NULL;
  3344. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  3345. return nbuf;
  3346. /*
  3347. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  3348. * this in per packet path.
  3349. *
  3350. * As in this path vdev memory is already protected with netdev
  3351. * tx lock
  3352. */
  3353. vdev = soc->vdev_id_map[vdev_id];
  3354. if (qdf_unlikely(!vdev))
  3355. return nbuf;
  3356. if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
  3357. == QDF_STATUS_E_FAILURE)) {
  3358. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  3359. return nbuf;
  3360. }
  3361. return dp_tx_send(soc_hdl, vdev_id, nbuf);
  3362. }
  3363. #ifdef UMAC_SUPPORT_PROXY_ARP
  3364. /**
  3365. * dp_tx_proxy_arp() - Tx proxy arp handler
  3366. * @vdev: datapath vdev handle
  3367. * @buf: sk buffer
  3368. *
  3369. * Return: status
  3370. */
  3371. static inline
  3372. int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  3373. {
  3374. if (vdev->osif_proxy_arp)
  3375. return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
  3376. /*
  3377. * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
  3378. * osif_proxy_arp has a valid function pointer assigned
  3379. * to it
  3380. */
  3381. dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
  3382. return QDF_STATUS_NOT_INITIALIZED;
  3383. }
  3384. #else
  3385. /**
  3386. * dp_tx_proxy_arp() - Tx proxy arp handler
  3387. * @vdev: datapath vdev handle
  3388. * @buf: sk buffer
  3389. *
  3390. * This function always return 0 when UMAC_SUPPORT_PROXY_ARP
  3391. * is not defined.
  3392. *
  3393. * Return: status
  3394. */
  3395. static inline
  3396. int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  3397. {
  3398. return QDF_STATUS_SUCCESS;
  3399. }
  3400. #endif
  3401. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  3402. #ifdef WLAN_MCAST_MLO
  3403. static bool
  3404. dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
  3405. struct dp_tx_desc_s *tx_desc,
  3406. qdf_nbuf_t nbuf,
  3407. uint8_t reinject_reason)
  3408. {
  3409. if (reinject_reason == HTT_TX_FW2WBM_REINJECT_REASON_MLO_MCAST) {
  3410. if (soc->arch_ops.dp_tx_mcast_handler)
  3411. soc->arch_ops.dp_tx_mcast_handler(soc, vdev, nbuf);
  3412. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3413. return true;
  3414. }
  3415. return false;
  3416. }
  3417. #else /* WLAN_MCAST_MLO */
  3418. static inline bool
  3419. dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
  3420. struct dp_tx_desc_s *tx_desc,
  3421. qdf_nbuf_t nbuf,
  3422. uint8_t reinject_reason)
  3423. {
  3424. return false;
  3425. }
  3426. #endif /* WLAN_MCAST_MLO */
  3427. #else
  3428. static inline bool
  3429. dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
  3430. struct dp_tx_desc_s *tx_desc,
  3431. qdf_nbuf_t nbuf,
  3432. uint8_t reinject_reason)
  3433. {
  3434. return false;
  3435. }
  3436. #endif
  3437. /**
  3438. * dp_tx_reinject_handler() - Tx Reinject Handler
  3439. * @soc: datapath soc handle
  3440. * @vdev: datapath vdev handle
  3441. * @tx_desc: software descriptor head pointer
  3442. * @status : Tx completion status from HTT descriptor
  3443. * @reinject_reason : reinject reason from HTT descriptor
  3444. *
  3445. * This function reinjects frames back to Target.
  3446. * Todo - Host queue needs to be added
  3447. *
  3448. * Return: none
  3449. */
  3450. void dp_tx_reinject_handler(struct dp_soc *soc,
  3451. struct dp_vdev *vdev,
  3452. struct dp_tx_desc_s *tx_desc,
  3453. uint8_t *status,
  3454. uint8_t reinject_reason)
  3455. {
  3456. struct dp_peer *peer = NULL;
  3457. uint32_t peer_id = HTT_INVALID_PEER;
  3458. qdf_nbuf_t nbuf = tx_desc->nbuf;
  3459. qdf_nbuf_t nbuf_copy = NULL;
  3460. struct dp_tx_msdu_info_s msdu_info;
  3461. #ifdef WDS_VENDOR_EXTENSION
  3462. int is_mcast = 0, is_ucast = 0;
  3463. int num_peers_3addr = 0;
  3464. qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
  3465. struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
  3466. #endif
  3467. struct dp_txrx_peer *txrx_peer;
  3468. qdf_assert(vdev);
  3469. dp_tx_debug("Tx reinject path");
  3470. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  3471. qdf_nbuf_len(tx_desc->nbuf));
  3472. if (dp_tx_reinject_mlo_hdl(soc, vdev, tx_desc, nbuf, reinject_reason))
  3473. return;
  3474. #ifdef WDS_VENDOR_EXTENSION
  3475. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  3476. is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
  3477. } else {
  3478. is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
  3479. }
  3480. is_ucast = !is_mcast;
  3481. qdf_spin_lock_bh(&vdev->peer_list_lock);
  3482. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  3483. txrx_peer = dp_get_txrx_peer(peer);
  3484. if (!txrx_peer || txrx_peer->bss_peer)
  3485. continue;
  3486. /* Detect wds peers that use 3-addr framing for mcast.
  3487. * if there are any, the bss_peer is used to send the
  3488. * the mcast frame using 3-addr format. all wds enabled
  3489. * peers that use 4-addr framing for mcast frames will
  3490. * be duplicated and sent as 4-addr frames below.
  3491. */
  3492. if (!txrx_peer->wds_enabled ||
  3493. !txrx_peer->wds_ecm.wds_tx_mcast_4addr) {
  3494. num_peers_3addr = 1;
  3495. break;
  3496. }
  3497. }
  3498. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  3499. #endif
  3500. if (qdf_unlikely(vdev->mesh_vdev)) {
  3501. DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
  3502. } else {
  3503. qdf_spin_lock_bh(&vdev->peer_list_lock);
  3504. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  3505. txrx_peer = dp_get_txrx_peer(peer);
  3506. if (!txrx_peer)
  3507. continue;
  3508. if ((txrx_peer->peer_id != HTT_INVALID_PEER) &&
  3509. #ifdef WDS_VENDOR_EXTENSION
  3510. /*
  3511. * . if 3-addr STA, then send on BSS Peer
  3512. * . if Peer WDS enabled and accept 4-addr mcast,
  3513. * send mcast on that peer only
  3514. * . if Peer WDS enabled and accept 4-addr ucast,
  3515. * send ucast on that peer only
  3516. */
  3517. ((txrx_peer->bss_peer && num_peers_3addr && is_mcast) ||
  3518. (txrx_peer->wds_enabled &&
  3519. ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) ||
  3520. (is_ucast &&
  3521. txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) {
  3522. #else
  3523. (txrx_peer->bss_peer &&
  3524. (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
  3525. #endif
  3526. peer_id = DP_INVALID_PEER;
  3527. nbuf_copy = qdf_nbuf_copy(nbuf);
  3528. if (!nbuf_copy) {
  3529. dp_tx_debug("nbuf copy failed");
  3530. break;
  3531. }
  3532. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  3533. dp_tx_get_queue(vdev, nbuf,
  3534. &msdu_info.tx_queue);
  3535. nbuf_copy = dp_tx_send_msdu_single(vdev,
  3536. nbuf_copy,
  3537. &msdu_info,
  3538. peer_id,
  3539. NULL);
  3540. if (nbuf_copy) {
  3541. dp_tx_debug("pkt send failed");
  3542. qdf_nbuf_free(nbuf_copy);
  3543. }
  3544. }
  3545. }
  3546. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  3547. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  3548. QDF_DMA_TO_DEVICE, nbuf->len);
  3549. qdf_nbuf_free(nbuf);
  3550. }
  3551. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3552. }
  3553. /**
  3554. * dp_tx_inspect_handler() - Tx Inspect Handler
  3555. * @soc: datapath soc handle
  3556. * @vdev: datapath vdev handle
  3557. * @tx_desc: software descriptor head pointer
  3558. * @status : Tx completion status from HTT descriptor
  3559. *
  3560. * Handles Tx frames sent back to Host for inspection
  3561. * (ProxyARP)
  3562. *
  3563. * Return: none
  3564. */
  3565. void dp_tx_inspect_handler(struct dp_soc *soc,
  3566. struct dp_vdev *vdev,
  3567. struct dp_tx_desc_s *tx_desc,
  3568. uint8_t *status)
  3569. {
  3570. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3571. "%s Tx inspect path",
  3572. __func__);
  3573. DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
  3574. qdf_nbuf_len(tx_desc->nbuf));
  3575. DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
  3576. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3577. }
  3578. #ifdef MESH_MODE_SUPPORT
  3579. /**
  3580. * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
  3581. * in mesh meta header
  3582. * @tx_desc: software descriptor head pointer
  3583. * @ts: pointer to tx completion stats
  3584. * Return: none
  3585. */
  3586. static
  3587. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  3588. struct hal_tx_completion_status *ts)
  3589. {
  3590. qdf_nbuf_t netbuf = tx_desc->nbuf;
  3591. if (!tx_desc->msdu_ext_desc) {
  3592. if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
  3593. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3594. "netbuf %pK offset %d",
  3595. netbuf, tx_desc->pkt_offset);
  3596. return;
  3597. }
  3598. }
  3599. }
  3600. #else
  3601. static
  3602. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  3603. struct hal_tx_completion_status *ts)
  3604. {
  3605. }
  3606. #endif
  3607. #ifdef CONFIG_SAWF
  3608. static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
  3609. struct dp_vdev *vdev,
  3610. struct dp_txrx_peer *txrx_peer,
  3611. struct dp_tx_desc_s *tx_desc,
  3612. struct hal_tx_completion_status *ts,
  3613. uint8_t tid)
  3614. {
  3615. dp_sawf_tx_compl_update_peer_stats(soc, vdev, txrx_peer, tx_desc,
  3616. ts, tid);
  3617. }
  3618. static void dp_tx_compute_delay_avg(struct cdp_delay_tx_stats *tx_delay,
  3619. uint32_t nw_delay,
  3620. uint32_t sw_delay,
  3621. uint32_t hw_delay)
  3622. {
  3623. dp_peer_tid_delay_avg(tx_delay,
  3624. nw_delay,
  3625. sw_delay,
  3626. hw_delay);
  3627. }
  3628. #else
  3629. static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
  3630. struct dp_vdev *vdev,
  3631. struct dp_txrx_peer *txrx_peer,
  3632. struct dp_tx_desc_s *tx_desc,
  3633. struct hal_tx_completion_status *ts,
  3634. uint8_t tid)
  3635. {
  3636. }
  3637. static inline void
  3638. dp_tx_compute_delay_avg(struct cdp_delay_tx_stats *tx_delay,
  3639. uint32_t nw_delay, uint32_t sw_delay,
  3640. uint32_t hw_delay)
  3641. {
  3642. }
  3643. #endif
  3644. #ifdef QCA_PEER_EXT_STATS
  3645. #ifdef WLAN_CONFIG_TX_DELAY
  3646. static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  3647. struct dp_tx_desc_s *tx_desc,
  3648. struct hal_tx_completion_status *ts,
  3649. struct dp_vdev *vdev)
  3650. {
  3651. struct dp_soc *soc = vdev->pdev->soc;
  3652. struct cdp_delay_tx_stats *tx_delay = &stats->tx_delay;
  3653. int64_t timestamp_ingress, timestamp_hw_enqueue;
  3654. uint32_t sw_enqueue_delay, fwhw_transmit_delay = 0;
  3655. if (!ts->valid)
  3656. return;
  3657. timestamp_ingress = qdf_nbuf_get_timestamp_us(tx_desc->nbuf);
  3658. timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
  3659. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3660. dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
  3661. if (soc->arch_ops.dp_tx_compute_hw_delay)
  3662. if (!soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
  3663. &fwhw_transmit_delay))
  3664. dp_hist_update_stats(&tx_delay->hwtx_delay,
  3665. fwhw_transmit_delay);
  3666. dp_tx_compute_delay_avg(tx_delay, 0, sw_enqueue_delay,
  3667. fwhw_transmit_delay);
  3668. }
  3669. #else
  3670. /*
  3671. * dp_tx_compute_tid_delay() - Compute per TID delay
  3672. * @stats: Per TID delay stats
  3673. * @tx_desc: Software Tx descriptor
  3674. * @ts: Tx completion status
  3675. * @vdev: vdev
  3676. *
  3677. * Compute the software enqueue and hw enqueue delays and
  3678. * update the respective histograms
  3679. *
  3680. * Return: void
  3681. */
  3682. static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  3683. struct dp_tx_desc_s *tx_desc,
  3684. struct hal_tx_completion_status *ts,
  3685. struct dp_vdev *vdev)
  3686. {
  3687. struct cdp_delay_tx_stats *tx_delay = &stats->tx_delay;
  3688. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  3689. uint32_t sw_enqueue_delay, fwhw_transmit_delay;
  3690. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  3691. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  3692. timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
  3693. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3694. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  3695. timestamp_hw_enqueue);
  3696. /*
  3697. * Update the Tx software enqueue delay and HW enque-Completion delay.
  3698. */
  3699. dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
  3700. dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
  3701. }
  3702. #endif
  3703. /*
  3704. * dp_tx_update_peer_delay_stats() - Update the peer delay stats
  3705. * @txrx_peer: DP peer context
  3706. * @tx_desc: Tx software descriptor
  3707. * @tid: Transmission ID
  3708. * @ring_id: Rx CPU context ID/CPU_ID
  3709. *
  3710. * Update the peer extended stats. These are enhanced other
  3711. * delay stats per msdu level.
  3712. *
  3713. * Return: void
  3714. */
  3715. static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
  3716. struct dp_tx_desc_s *tx_desc,
  3717. struct hal_tx_completion_status *ts,
  3718. uint8_t ring_id)
  3719. {
  3720. struct dp_pdev *pdev = txrx_peer->vdev->pdev;
  3721. struct dp_soc *soc = NULL;
  3722. struct dp_peer_delay_stats *delay_stats = NULL;
  3723. uint8_t tid;
  3724. soc = pdev->soc;
  3725. if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
  3726. return;
  3727. tid = ts->tid;
  3728. delay_stats = txrx_peer->delay_stats;
  3729. qdf_assert(delay_stats);
  3730. qdf_assert(ring < CDP_MAX_TXRX_CTX);
  3731. /*
  3732. * For non-TID packets use the TID 9
  3733. */
  3734. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3735. tid = CDP_MAX_DATA_TIDS - 1;
  3736. dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
  3737. tx_desc, ts, txrx_peer->vdev);
  3738. }
  3739. #else
  3740. static inline
  3741. void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
  3742. struct dp_tx_desc_s *tx_desc,
  3743. struct hal_tx_completion_status *ts,
  3744. uint8_t ring_id)
  3745. {
  3746. }
  3747. #endif
  3748. #ifdef WLAN_PEER_JITTER
  3749. /*
  3750. * dp_tx_jitter_get_avg_jitter() - compute the average jitter
  3751. * @curr_delay: Current delay
  3752. * @prev_Delay: Previous delay
  3753. * @avg_jitter: Average Jitter
  3754. * Return: Newly Computed Average Jitter
  3755. */
  3756. static uint32_t dp_tx_jitter_get_avg_jitter(uint32_t curr_delay,
  3757. uint32_t prev_delay,
  3758. uint32_t avg_jitter)
  3759. {
  3760. uint32_t curr_jitter;
  3761. int32_t jitter_diff;
  3762. curr_jitter = qdf_abs(curr_delay - prev_delay);
  3763. if (!avg_jitter)
  3764. return curr_jitter;
  3765. jitter_diff = curr_jitter - avg_jitter;
  3766. if (jitter_diff < 0)
  3767. avg_jitter = avg_jitter -
  3768. (qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
  3769. else
  3770. avg_jitter = avg_jitter +
  3771. (qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
  3772. return avg_jitter;
  3773. }
  3774. /*
  3775. * dp_tx_jitter_get_avg_delay() - compute the average delay
  3776. * @curr_delay: Current delay
  3777. * @avg_Delay: Average delay
  3778. * Return: Newly Computed Average Delay
  3779. */
  3780. static uint32_t dp_tx_jitter_get_avg_delay(uint32_t curr_delay,
  3781. uint32_t avg_delay)
  3782. {
  3783. int32_t delay_diff;
  3784. if (!avg_delay)
  3785. return curr_delay;
  3786. delay_diff = curr_delay - avg_delay;
  3787. if (delay_diff < 0)
  3788. avg_delay = avg_delay - (qdf_abs(delay_diff) >>
  3789. DP_AVG_DELAY_WEIGHT_DENOM);
  3790. else
  3791. avg_delay = avg_delay + (qdf_abs(delay_diff) >>
  3792. DP_AVG_DELAY_WEIGHT_DENOM);
  3793. return avg_delay;
  3794. }
  3795. #ifdef WLAN_CONFIG_TX_DELAY
  3796. /*
  3797. * dp_tx_compute_cur_delay() - get the current delay
  3798. * @soc: soc handle
  3799. * @vdev: vdev structure for data path state
  3800. * @ts: Tx completion status
  3801. * @curr_delay: current delay
  3802. * @tx_desc: tx descriptor
  3803. * Return: void
  3804. */
  3805. static
  3806. QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
  3807. struct dp_vdev *vdev,
  3808. struct hal_tx_completion_status *ts,
  3809. uint32_t *curr_delay,
  3810. struct dp_tx_desc_s *tx_desc)
  3811. {
  3812. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  3813. if (soc->arch_ops.dp_tx_compute_hw_delay)
  3814. status = soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
  3815. curr_delay);
  3816. return status;
  3817. }
  3818. #else
  3819. static
  3820. QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
  3821. struct dp_vdev *vdev,
  3822. struct hal_tx_completion_status *ts,
  3823. uint32_t *curr_delay,
  3824. struct dp_tx_desc_s *tx_desc)
  3825. {
  3826. int64_t current_timestamp, timestamp_hw_enqueue;
  3827. current_timestamp = qdf_ktime_to_us(qdf_ktime_real_get());
  3828. timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
  3829. *curr_delay = (uint32_t)(current_timestamp - timestamp_hw_enqueue);
  3830. return QDF_STATUS_SUCCESS;
  3831. }
  3832. #endif
  3833. /* dp_tx_compute_tid_jitter() - compute per tid per ring jitter
  3834. * @jiiter - per tid per ring jitter stats
  3835. * @ts: Tx completion status
  3836. * @vdev - vdev structure for data path state
  3837. * @tx_desc - tx descriptor
  3838. * Return: void
  3839. */
  3840. static void dp_tx_compute_tid_jitter(struct cdp_peer_tid_stats *jitter,
  3841. struct hal_tx_completion_status *ts,
  3842. struct dp_vdev *vdev,
  3843. struct dp_tx_desc_s *tx_desc)
  3844. {
  3845. uint32_t curr_delay, avg_delay, avg_jitter, prev_delay;
  3846. struct dp_soc *soc = vdev->pdev->soc;
  3847. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  3848. if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
  3849. jitter->tx_drop += 1;
  3850. return;
  3851. }
  3852. status = dp_tx_compute_cur_delay(soc, vdev, ts, &curr_delay,
  3853. tx_desc);
  3854. if (QDF_IS_STATUS_SUCCESS(status)) {
  3855. avg_delay = jitter->tx_avg_delay;
  3856. avg_jitter = jitter->tx_avg_jitter;
  3857. prev_delay = jitter->tx_prev_delay;
  3858. avg_jitter = dp_tx_jitter_get_avg_jitter(curr_delay,
  3859. prev_delay,
  3860. avg_jitter);
  3861. avg_delay = dp_tx_jitter_get_avg_delay(curr_delay, avg_delay);
  3862. jitter->tx_avg_delay = avg_delay;
  3863. jitter->tx_avg_jitter = avg_jitter;
  3864. jitter->tx_prev_delay = curr_delay;
  3865. jitter->tx_total_success += 1;
  3866. } else if (status == QDF_STATUS_E_FAILURE) {
  3867. jitter->tx_avg_err += 1;
  3868. }
  3869. }
  3870. /* dp_tx_update_peer_jitter_stats() - Update the peer jitter stats
  3871. * @txrx_peer: DP peer context
  3872. * @tx_desc: Tx software descriptor
  3873. * @ts: Tx completion status
  3874. * @ring_id: Rx CPU context ID/CPU_ID
  3875. * Return: void
  3876. */
  3877. static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
  3878. struct dp_tx_desc_s *tx_desc,
  3879. struct hal_tx_completion_status *ts,
  3880. uint8_t ring_id)
  3881. {
  3882. struct dp_pdev *pdev = txrx_peer->vdev->pdev;
  3883. struct dp_soc *soc = pdev->soc;
  3884. struct cdp_peer_tid_stats *jitter_stats = NULL;
  3885. uint8_t tid;
  3886. struct cdp_peer_tid_stats *rx_tid = NULL;
  3887. if (qdf_likely(!wlan_cfg_is_peer_jitter_stats_enabled(soc->wlan_cfg_ctx)))
  3888. return;
  3889. tid = ts->tid;
  3890. jitter_stats = txrx_peer->jitter_stats;
  3891. qdf_assert_always(jitter_stats);
  3892. qdf_assert(ring < CDP_MAX_TXRX_CTX);
  3893. /*
  3894. * For non-TID packets use the TID 9
  3895. */
  3896. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3897. tid = CDP_MAX_DATA_TIDS - 1;
  3898. rx_tid = &jitter_stats[tid * CDP_MAX_TXRX_CTX + ring_id];
  3899. dp_tx_compute_tid_jitter(rx_tid,
  3900. ts, txrx_peer->vdev, tx_desc);
  3901. }
  3902. #else
  3903. static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
  3904. struct dp_tx_desc_s *tx_desc,
  3905. struct hal_tx_completion_status *ts,
  3906. uint8_t ring_id)
  3907. {
  3908. }
  3909. #endif
  3910. #ifdef HW_TX_DELAY_STATS_ENABLE
  3911. /**
  3912. * dp_update_tx_delay_stats() - update the delay stats
  3913. * @vdev: vdev handle
  3914. * @delay: delay in ms or us based on the flag delay_in_us
  3915. * @tid: tid value
  3916. * @mode: type of tx delay mode
  3917. * @ring id: ring number
  3918. * @delay_in_us: flag to indicate whether the delay is in ms or us
  3919. *
  3920. * Return: none
  3921. */
  3922. static inline
  3923. void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
  3924. uint8_t mode, uint8_t ring_id, bool delay_in_us)
  3925. {
  3926. struct cdp_tid_tx_stats *tstats =
  3927. &vdev->stats.tid_tx_stats[ring_id][tid];
  3928. dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
  3929. delay_in_us);
  3930. }
  3931. #else
  3932. static inline
  3933. void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
  3934. uint8_t mode, uint8_t ring_id, bool delay_in_us)
  3935. {
  3936. struct cdp_tid_tx_stats *tstats =
  3937. &vdev->pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  3938. dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
  3939. delay_in_us);
  3940. }
  3941. #endif
  3942. /**
  3943. * dp_tx_compute_delay() - Compute and fill in all timestamps
  3944. * to pass in correct fields
  3945. *
  3946. * @vdev: pdev handle
  3947. * @tx_desc: tx descriptor
  3948. * @tid: tid value
  3949. * @ring_id: TCL or WBM ring number for transmit path
  3950. * Return: none
  3951. */
  3952. void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
  3953. uint8_t tid, uint8_t ring_id)
  3954. {
  3955. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  3956. uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
  3957. uint32_t fwhw_transmit_delay_us;
  3958. if (qdf_likely(!vdev->pdev->delay_stats_flag) &&
  3959. qdf_likely(!dp_is_vdev_tx_delay_stats_enabled(vdev)))
  3960. return;
  3961. if (dp_is_vdev_tx_delay_stats_enabled(vdev)) {
  3962. fwhw_transmit_delay_us =
  3963. qdf_ktime_to_us(qdf_ktime_real_get()) -
  3964. qdf_ktime_to_us(tx_desc->timestamp);
  3965. /*
  3966. * Delay between packet enqueued to HW and Tx completion in us
  3967. */
  3968. dp_update_tx_delay_stats(vdev, fwhw_transmit_delay_us, tid,
  3969. CDP_DELAY_STATS_FW_HW_TRANSMIT,
  3970. ring_id, true);
  3971. /*
  3972. * For MCL, only enqueue to completion delay is required
  3973. * so return if the vdev flag is enabled.
  3974. */
  3975. return;
  3976. }
  3977. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  3978. timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
  3979. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  3980. timestamp_hw_enqueue);
  3981. /*
  3982. * Delay between packet enqueued to HW and Tx completion in ms
  3983. */
  3984. dp_update_tx_delay_stats(vdev, fwhw_transmit_delay, tid,
  3985. CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id,
  3986. false);
  3987. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  3988. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3989. interframe_delay = (uint32_t)(timestamp_ingress -
  3990. vdev->prev_tx_enq_tstamp);
  3991. /*
  3992. * Delay in software enqueue
  3993. */
  3994. dp_update_tx_delay_stats(vdev, sw_enqueue_delay, tid,
  3995. CDP_DELAY_STATS_SW_ENQ, ring_id,
  3996. false);
  3997. /*
  3998. * Update interframe delay stats calculated at hardstart receive point.
  3999. * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
  4000. * interframe delay will not be calculate correctly for 1st frame.
  4001. * On the other side, this will help in avoiding extra per packet check
  4002. * of !vdev->prev_tx_enq_tstamp.
  4003. */
  4004. dp_update_tx_delay_stats(vdev, interframe_delay, tid,
  4005. CDP_DELAY_STATS_TX_INTERFRAME, ring_id,
  4006. false);
  4007. vdev->prev_tx_enq_tstamp = timestamp_ingress;
  4008. }
  4009. #ifdef DISABLE_DP_STATS
  4010. static
  4011. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf,
  4012. struct dp_txrx_peer *txrx_peer)
  4013. {
  4014. }
  4015. #else
  4016. static inline void
  4017. dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer)
  4018. {
  4019. enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
  4020. DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
  4021. if (subtype != QDF_PROTO_INVALID)
  4022. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype],
  4023. 1);
  4024. }
  4025. #endif
  4026. #ifndef QCA_ENHANCED_STATS_SUPPORT
  4027. #ifdef DP_PEER_EXTENDED_API
  4028. static inline uint8_t
  4029. dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
  4030. {
  4031. return txrx_peer->mpdu_retry_threshold;
  4032. }
  4033. #else
  4034. static inline uint8_t
  4035. dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
  4036. {
  4037. return 0;
  4038. }
  4039. #endif
  4040. /**
  4041. * dp_tx_update_peer_extd_stats()- Update Tx extended path stats for peer
  4042. *
  4043. * @ts: Tx compltion status
  4044. * @txrx_peer: datapath txrx_peer handle
  4045. *
  4046. * Return: void
  4047. */
  4048. static inline void
  4049. dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
  4050. struct dp_txrx_peer *txrx_peer)
  4051. {
  4052. uint8_t mcs, pkt_type, dst_mcs_idx;
  4053. uint8_t retry_threshold = dp_tx_get_mpdu_retry_threshold(txrx_peer);
  4054. mcs = ts->mcs;
  4055. pkt_type = ts->pkt_type;
  4056. /* do HW to SW pkt type conversion */
  4057. pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
  4058. hal_2_dp_pkt_type_map[pkt_type]);
  4059. dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
  4060. if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
  4061. DP_PEER_EXTD_STATS_INC(txrx_peer,
  4062. tx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
  4063. 1);
  4064. DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1);
  4065. DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1);
  4066. DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi);
  4067. DP_PEER_EXTD_STATS_INC(txrx_peer,
  4068. tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
  4069. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc);
  4070. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc);
  4071. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1);
  4072. if (ts->first_msdu) {
  4073. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1,
  4074. ts->transmit_cnt > 1);
  4075. if (!retry_threshold)
  4076. return;
  4077. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries,
  4078. qdf_do_div(ts->transmit_cnt,
  4079. retry_threshold),
  4080. ts->transmit_cnt > retry_threshold);
  4081. }
  4082. }
  4083. #else
  4084. static inline void
  4085. dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
  4086. struct dp_txrx_peer *txrx_peer)
  4087. {
  4088. }
  4089. #endif
  4090. /**
  4091. * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
  4092. * per wbm ring
  4093. *
  4094. * @tx_desc: software descriptor head pointer
  4095. * @ts: Tx completion status
  4096. * @peer: peer handle
  4097. * @ring_id: ring number
  4098. *
  4099. * Return: None
  4100. */
  4101. static inline void
  4102. dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
  4103. struct hal_tx_completion_status *ts,
  4104. struct dp_txrx_peer *txrx_peer, uint8_t ring_id)
  4105. {
  4106. struct dp_pdev *pdev = txrx_peer->vdev->pdev;
  4107. uint8_t tid = ts->tid;
  4108. uint32_t length;
  4109. struct cdp_tid_tx_stats *tid_stats;
  4110. if (!pdev)
  4111. return;
  4112. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  4113. tid = CDP_MAX_DATA_TIDS - 1;
  4114. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  4115. if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
  4116. dp_err_rl("Release source:%d is not from TQM", ts->release_src);
  4117. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1);
  4118. return;
  4119. }
  4120. length = qdf_nbuf_len(tx_desc->nbuf);
  4121. DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
  4122. if (qdf_unlikely(pdev->delay_stats_flag) ||
  4123. qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(txrx_peer->vdev)))
  4124. dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
  4125. if (ts->status < CDP_MAX_TX_TQM_STATUS) {
  4126. tid_stats->tqm_status_cnt[ts->status]++;
  4127. }
  4128. if (qdf_likely(ts->status == HAL_TX_TQM_RR_FRAME_ACKED)) {
  4129. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1,
  4130. ts->transmit_cnt > 1);
  4131. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count,
  4132. 1, ts->transmit_cnt > 2);
  4133. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma);
  4134. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1,
  4135. ts->msdu_part_of_amsdu);
  4136. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1,
  4137. !ts->msdu_part_of_amsdu);
  4138. txrx_peer->stats.per_pkt_stats.tx.last_tx_ts =
  4139. qdf_system_ticks();
  4140. dp_tx_update_peer_extd_stats(ts, txrx_peer);
  4141. return;
  4142. }
  4143. /*
  4144. * tx_failed is ideally supposed to be updated from HTT ppdu
  4145. * completion stats. But in IPQ807X/IPQ6018 chipsets owing to
  4146. * hw limitation there are no completions for failed cases.
  4147. * Hence updating tx_failed from data path. Please note that
  4148. * if tx_failed is fixed to be from ppdu, then this has to be
  4149. * removed
  4150. */
  4151. DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
  4152. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1,
  4153. ts->transmit_cnt > DP_RETRY_COUNT);
  4154. dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer);
  4155. if (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) {
  4156. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1);
  4157. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_REM) {
  4158. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.dropped.fw_rem, 1,
  4159. length);
  4160. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX) {
  4161. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1);
  4162. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TX) {
  4163. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1);
  4164. } else if (ts->status == HAL_TX_TQM_RR_FW_REASON1) {
  4165. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1);
  4166. } else if (ts->status == HAL_TX_TQM_RR_FW_REASON2) {
  4167. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1);
  4168. } else if (ts->status == HAL_TX_TQM_RR_FW_REASON3) {
  4169. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1);
  4170. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE) {
  4171. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  4172. tx.dropped.fw_rem_queue_disable, 1);
  4173. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TILL_NONMATCHING) {
  4174. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  4175. tx.dropped.fw_rem_no_match, 1);
  4176. } else if (ts->status == HAL_TX_TQM_RR_DROP_THRESHOLD) {
  4177. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  4178. tx.dropped.drop_threshold, 1);
  4179. } else if (ts->status == HAL_TX_TQM_RR_LINK_DESC_UNAVAILABLE) {
  4180. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  4181. tx.dropped.drop_link_desc_na, 1);
  4182. } else if (ts->status == HAL_TX_TQM_RR_DROP_OR_INVALID_MSDU) {
  4183. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  4184. tx.dropped.invalid_drop, 1);
  4185. } else if (ts->status == HAL_TX_TQM_RR_MULTICAST_DROP) {
  4186. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  4187. tx.dropped.mcast_vdev_drop, 1);
  4188. } else {
  4189. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1);
  4190. }
  4191. }
  4192. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  4193. /**
  4194. * dp_tx_flow_pool_lock() - take flow pool lock
  4195. * @soc: core txrx main context
  4196. * @tx_desc: tx desc
  4197. *
  4198. * Return: None
  4199. */
  4200. static inline
  4201. void dp_tx_flow_pool_lock(struct dp_soc *soc,
  4202. struct dp_tx_desc_s *tx_desc)
  4203. {
  4204. struct dp_tx_desc_pool_s *pool;
  4205. uint8_t desc_pool_id;
  4206. desc_pool_id = tx_desc->pool_id;
  4207. pool = &soc->tx_desc[desc_pool_id];
  4208. qdf_spin_lock_bh(&pool->flow_pool_lock);
  4209. }
  4210. /**
  4211. * dp_tx_flow_pool_unlock() - release flow pool lock
  4212. * @soc: core txrx main context
  4213. * @tx_desc: tx desc
  4214. *
  4215. * Return: None
  4216. */
  4217. static inline
  4218. void dp_tx_flow_pool_unlock(struct dp_soc *soc,
  4219. struct dp_tx_desc_s *tx_desc)
  4220. {
  4221. struct dp_tx_desc_pool_s *pool;
  4222. uint8_t desc_pool_id;
  4223. desc_pool_id = tx_desc->pool_id;
  4224. pool = &soc->tx_desc[desc_pool_id];
  4225. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  4226. }
  4227. #else
  4228. static inline
  4229. void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  4230. {
  4231. }
  4232. static inline
  4233. void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  4234. {
  4235. }
  4236. #endif
  4237. /**
  4238. * dp_tx_notify_completion() - Notify tx completion for this desc
  4239. * @soc: core txrx main context
  4240. * @vdev: datapath vdev handle
  4241. * @tx_desc: tx desc
  4242. * @netbuf: buffer
  4243. * @status: tx status
  4244. *
  4245. * Return: none
  4246. */
  4247. static inline void dp_tx_notify_completion(struct dp_soc *soc,
  4248. struct dp_vdev *vdev,
  4249. struct dp_tx_desc_s *tx_desc,
  4250. qdf_nbuf_t netbuf,
  4251. uint8_t status)
  4252. {
  4253. void *osif_dev;
  4254. ol_txrx_completion_fp tx_compl_cbk = NULL;
  4255. uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
  4256. qdf_assert(tx_desc);
  4257. if (!vdev ||
  4258. !vdev->osif_vdev) {
  4259. return;
  4260. }
  4261. osif_dev = vdev->osif_vdev;
  4262. tx_compl_cbk = vdev->tx_comp;
  4263. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  4264. flag |= BIT(QDF_TX_RX_STATUS_OK);
  4265. if (tx_compl_cbk)
  4266. tx_compl_cbk(netbuf, osif_dev, flag);
  4267. }
  4268. /** dp_tx_sojourn_stats_process() - Collect sojourn stats
  4269. * @pdev: pdev handle
  4270. * @tid: tid value
  4271. * @txdesc_ts: timestamp from txdesc
  4272. * @ppdu_id: ppdu id
  4273. *
  4274. * Return: none
  4275. */
  4276. #ifdef FEATURE_PERPKT_INFO
  4277. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  4278. struct dp_txrx_peer *txrx_peer,
  4279. uint8_t tid,
  4280. uint64_t txdesc_ts,
  4281. uint32_t ppdu_id)
  4282. {
  4283. uint64_t delta_ms;
  4284. struct cdp_tx_sojourn_stats *sojourn_stats;
  4285. struct dp_peer *primary_link_peer = NULL;
  4286. struct dp_soc *link_peer_soc = NULL;
  4287. if (qdf_unlikely(!pdev->enhanced_stats_en))
  4288. return;
  4289. if (qdf_unlikely(tid == HTT_INVALID_TID ||
  4290. tid >= CDP_DATA_TID_MAX))
  4291. return;
  4292. if (qdf_unlikely(!pdev->sojourn_buf))
  4293. return;
  4294. primary_link_peer = dp_get_primary_link_peer_by_id(pdev->soc,
  4295. txrx_peer->peer_id,
  4296. DP_MOD_ID_TX_COMP);
  4297. if (qdf_unlikely(!primary_link_peer))
  4298. return;
  4299. sojourn_stats = (struct cdp_tx_sojourn_stats *)
  4300. qdf_nbuf_data(pdev->sojourn_buf);
  4301. link_peer_soc = primary_link_peer->vdev->pdev->soc;
  4302. sojourn_stats->cookie = (void *)
  4303. dp_monitor_peer_get_peerstats_ctx(link_peer_soc,
  4304. primary_link_peer);
  4305. delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) -
  4306. txdesc_ts;
  4307. qdf_ewma_tx_lag_add(&txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid],
  4308. delta_ms);
  4309. sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
  4310. sojourn_stats->num_msdus[tid] = 1;
  4311. sojourn_stats->avg_sojourn_msdu[tid].internal =
  4312. txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid].internal;
  4313. dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
  4314. pdev->sojourn_buf, HTT_INVALID_PEER,
  4315. WDI_NO_VAL, pdev->pdev_id);
  4316. sojourn_stats->sum_sojourn_msdu[tid] = 0;
  4317. sojourn_stats->num_msdus[tid] = 0;
  4318. sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
  4319. dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_TX_COMP);
  4320. }
  4321. #else
  4322. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  4323. struct dp_txrx_peer *txrx_peer,
  4324. uint8_t tid,
  4325. uint64_t txdesc_ts,
  4326. uint32_t ppdu_id)
  4327. {
  4328. }
  4329. #endif
  4330. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  4331. /**
  4332. * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
  4333. * @soc: dp_soc handle
  4334. * @desc: Tx Descriptor
  4335. * @ts: HAL Tx completion descriptor contents
  4336. *
  4337. * This function is used to send tx completion to packet capture
  4338. */
  4339. void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
  4340. struct dp_tx_desc_s *desc,
  4341. struct hal_tx_completion_status *ts)
  4342. {
  4343. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
  4344. desc, ts->peer_id,
  4345. WDI_NO_VAL, desc->pdev->pdev_id);
  4346. }
  4347. #endif
  4348. /**
  4349. * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
  4350. * @soc: DP Soc handle
  4351. * @tx_desc: software Tx descriptor
  4352. * @ts : Tx completion status from HAL/HTT descriptor
  4353. *
  4354. * Return: none
  4355. */
  4356. void
  4357. dp_tx_comp_process_desc(struct dp_soc *soc,
  4358. struct dp_tx_desc_s *desc,
  4359. struct hal_tx_completion_status *ts,
  4360. struct dp_txrx_peer *txrx_peer)
  4361. {
  4362. uint64_t time_latency = 0;
  4363. uint16_t peer_id = DP_INVALID_PEER_ID;
  4364. /*
  4365. * m_copy/tx_capture modes are not supported for
  4366. * scatter gather packets
  4367. */
  4368. if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
  4369. time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
  4370. qdf_ktime_to_ms(desc->timestamp));
  4371. }
  4372. dp_send_completion_to_pkt_capture(soc, desc, ts);
  4373. if (dp_tx_pkt_tracepoints_enabled())
  4374. qdf_trace_dp_packet(desc->nbuf, QDF_TX,
  4375. desc->msdu_ext_desc ?
  4376. desc->msdu_ext_desc->tso_desc : NULL,
  4377. qdf_ktime_to_ms(desc->timestamp));
  4378. if (!(desc->msdu_ext_desc)) {
  4379. dp_tx_enh_unmap(soc, desc);
  4380. if (txrx_peer)
  4381. peer_id = txrx_peer->peer_id;
  4382. if (QDF_STATUS_SUCCESS ==
  4383. dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer_id)) {
  4384. return;
  4385. }
  4386. if (QDF_STATUS_SUCCESS ==
  4387. dp_get_completion_indication_for_stack(soc,
  4388. desc->pdev,
  4389. txrx_peer, ts,
  4390. desc->nbuf,
  4391. time_latency)) {
  4392. dp_send_completion_to_stack(soc,
  4393. desc->pdev,
  4394. ts->peer_id,
  4395. ts->ppdu_id,
  4396. desc->nbuf);
  4397. return;
  4398. }
  4399. }
  4400. desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
  4401. dp_tx_comp_free_buf(soc, desc, false);
  4402. }
  4403. #ifdef DISABLE_DP_STATS
  4404. /**
  4405. * dp_tx_update_connectivity_stats() - update tx connectivity stats
  4406. * @soc: core txrx main context
  4407. * @tx_desc: tx desc
  4408. * @status: tx status
  4409. *
  4410. * Return: none
  4411. */
  4412. static inline
  4413. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  4414. struct dp_vdev *vdev,
  4415. struct dp_tx_desc_s *tx_desc,
  4416. uint8_t status)
  4417. {
  4418. }
  4419. #else
  4420. static inline
  4421. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  4422. struct dp_vdev *vdev,
  4423. struct dp_tx_desc_s *tx_desc,
  4424. uint8_t status)
  4425. {
  4426. void *osif_dev;
  4427. ol_txrx_stats_rx_fp stats_cbk;
  4428. uint8_t pkt_type;
  4429. qdf_assert(tx_desc);
  4430. if (!vdev ||
  4431. !vdev->osif_vdev ||
  4432. !vdev->stats_cb)
  4433. return;
  4434. osif_dev = vdev->osif_vdev;
  4435. stats_cbk = vdev->stats_cb;
  4436. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
  4437. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  4438. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
  4439. &pkt_type);
  4440. }
  4441. #endif
  4442. #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
  4443. /* Mask for bit29 ~ bit31 */
  4444. #define DP_TX_TS_BIT29_31_MASK 0xE0000000
  4445. /* Timestamp value (unit us) if bit29 is set */
  4446. #define DP_TX_TS_BIT29_SET_VALUE BIT(29)
  4447. /**
  4448. * dp_tx_adjust_enqueue_buffer_ts() - adjust the enqueue buffer_timestamp
  4449. * @ack_ts: OTA ack timestamp, unit us.
  4450. * @enqueue_ts: TCL enqueue TX data to TQM timestamp, unit us.
  4451. * @base_delta_ts: base timestamp delta for ack_ts and enqueue_ts
  4452. *
  4453. * this function will restore the bit29 ~ bit31 3 bits value for
  4454. * buffer_timestamp in wbm2sw ring entry, currently buffer_timestamp only
  4455. * can support 0x7FFF * 1024 us (29 bits), but if the timestamp is >
  4456. * 0x7FFF * 1024 us, bit29~ bit31 will be lost.
  4457. *
  4458. * Return: the adjusted buffer_timestamp value
  4459. */
  4460. static inline
  4461. uint32_t dp_tx_adjust_enqueue_buffer_ts(uint32_t ack_ts,
  4462. uint32_t enqueue_ts,
  4463. uint32_t base_delta_ts)
  4464. {
  4465. uint32_t ack_buffer_ts;
  4466. uint32_t ack_buffer_ts_bit29_31;
  4467. uint32_t adjusted_enqueue_ts;
  4468. /* corresponding buffer_timestamp value when receive OTA Ack */
  4469. ack_buffer_ts = ack_ts - base_delta_ts;
  4470. ack_buffer_ts_bit29_31 = ack_buffer_ts & DP_TX_TS_BIT29_31_MASK;
  4471. /* restore the bit29 ~ bit31 value */
  4472. adjusted_enqueue_ts = ack_buffer_ts_bit29_31 | enqueue_ts;
  4473. /*
  4474. * if actual enqueue_ts value occupied 29 bits only, this enqueue_ts
  4475. * value + real UL delay overflow 29 bits, then 30th bit (bit-29)
  4476. * should not be marked, otherwise extra 0x20000000 us is added to
  4477. * enqueue_ts.
  4478. */
  4479. if (qdf_unlikely(adjusted_enqueue_ts > ack_buffer_ts))
  4480. adjusted_enqueue_ts -= DP_TX_TS_BIT29_SET_VALUE;
  4481. return adjusted_enqueue_ts;
  4482. }
  4483. QDF_STATUS
  4484. dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
  4485. uint32_t delta_tsf,
  4486. uint32_t *delay_us)
  4487. {
  4488. uint32_t buffer_ts;
  4489. uint32_t delay;
  4490. if (!delay_us)
  4491. return QDF_STATUS_E_INVAL;
  4492. /* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
  4493. if (!ts->valid)
  4494. return QDF_STATUS_E_INVAL;
  4495. /* buffer_timestamp is in units of 1024 us and is [31:13] of
  4496. * WBM_RELEASE_RING_4. After left shift 10 bits, it's
  4497. * valid up to 29 bits.
  4498. */
  4499. buffer_ts = ts->buffer_timestamp << 10;
  4500. buffer_ts = dp_tx_adjust_enqueue_buffer_ts(ts->tsf,
  4501. buffer_ts, delta_tsf);
  4502. delay = ts->tsf - buffer_ts - delta_tsf;
  4503. if (qdf_unlikely(delay & 0x80000000)) {
  4504. dp_err_rl("delay = 0x%x (-ve)\n"
  4505. "release_src = %d\n"
  4506. "ppdu_id = 0x%x\n"
  4507. "peer_id = 0x%x\n"
  4508. "tid = 0x%x\n"
  4509. "release_reason = %d\n"
  4510. "tsf = %u (0x%x)\n"
  4511. "buffer_timestamp = %u (0x%x)\n"
  4512. "delta_tsf = %u (0x%x)\n",
  4513. delay, ts->release_src, ts->ppdu_id, ts->peer_id,
  4514. ts->tid, ts->status, ts->tsf, ts->tsf,
  4515. ts->buffer_timestamp, ts->buffer_timestamp,
  4516. delta_tsf, delta_tsf);
  4517. delay = 0;
  4518. goto end;
  4519. }
  4520. delay &= 0x1FFFFFFF; /* mask 29 BITS */
  4521. if (delay > 0x1000000) {
  4522. dp_info_rl("----------------------\n"
  4523. "Tx completion status:\n"
  4524. "----------------------\n"
  4525. "release_src = %d\n"
  4526. "ppdu_id = 0x%x\n"
  4527. "release_reason = %d\n"
  4528. "tsf = %u (0x%x)\n"
  4529. "buffer_timestamp = %u (0x%x)\n"
  4530. "delta_tsf = %u (0x%x)\n",
  4531. ts->release_src, ts->ppdu_id, ts->status,
  4532. ts->tsf, ts->tsf, ts->buffer_timestamp,
  4533. ts->buffer_timestamp, delta_tsf, delta_tsf);
  4534. return QDF_STATUS_E_FAILURE;
  4535. }
  4536. end:
  4537. *delay_us = delay;
  4538. return QDF_STATUS_SUCCESS;
  4539. }
  4540. void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  4541. uint32_t delta_tsf)
  4542. {
  4543. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4544. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  4545. DP_MOD_ID_CDP);
  4546. if (!vdev) {
  4547. dp_err_rl("vdev %d does not exist", vdev_id);
  4548. return;
  4549. }
  4550. vdev->delta_tsf = delta_tsf;
  4551. dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
  4552. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  4553. }
  4554. #endif
  4555. #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
  4556. QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
  4557. uint8_t vdev_id, bool enable)
  4558. {
  4559. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4560. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  4561. DP_MOD_ID_CDP);
  4562. if (!vdev) {
  4563. dp_err_rl("vdev %d does not exist", vdev_id);
  4564. return QDF_STATUS_E_FAILURE;
  4565. }
  4566. qdf_atomic_set(&vdev->ul_delay_report, enable);
  4567. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  4568. return QDF_STATUS_SUCCESS;
  4569. }
  4570. QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  4571. uint32_t *val)
  4572. {
  4573. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4574. struct dp_vdev *vdev;
  4575. uint32_t delay_accum;
  4576. uint32_t pkts_accum;
  4577. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
  4578. if (!vdev) {
  4579. dp_err_rl("vdev %d does not exist", vdev_id);
  4580. return QDF_STATUS_E_FAILURE;
  4581. }
  4582. if (!qdf_atomic_read(&vdev->ul_delay_report)) {
  4583. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  4584. return QDF_STATUS_E_FAILURE;
  4585. }
  4586. /* Average uplink delay based on current accumulated values */
  4587. delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
  4588. pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
  4589. *val = delay_accum / pkts_accum;
  4590. dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
  4591. delay_accum, pkts_accum);
  4592. /* Reset accumulated values to 0 */
  4593. qdf_atomic_set(&vdev->ul_delay_accum, 0);
  4594. qdf_atomic_set(&vdev->ul_pkts_accum, 0);
  4595. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  4596. return QDF_STATUS_SUCCESS;
  4597. }
  4598. static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
  4599. struct hal_tx_completion_status *ts)
  4600. {
  4601. uint32_t ul_delay;
  4602. if (qdf_unlikely(!vdev)) {
  4603. dp_info_rl("vdev is null or delete in progress");
  4604. return;
  4605. }
  4606. if (!qdf_atomic_read(&vdev->ul_delay_report))
  4607. return;
  4608. if (QDF_IS_STATUS_ERROR(dp_tx_compute_hw_delay_us(ts,
  4609. vdev->delta_tsf,
  4610. &ul_delay)))
  4611. return;
  4612. ul_delay /= 1000; /* in unit of ms */
  4613. qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
  4614. qdf_atomic_inc(&vdev->ul_pkts_accum);
  4615. }
  4616. #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
  4617. static inline
  4618. void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
  4619. struct hal_tx_completion_status *ts)
  4620. {
  4621. }
  4622. #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
  4623. /**
  4624. * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
  4625. * @soc: DP soc handle
  4626. * @tx_desc: software descriptor head pointer
  4627. * @ts: Tx completion status
  4628. * @txrx_peer: txrx peer handle
  4629. * @ring_id: ring number
  4630. *
  4631. * Return: none
  4632. */
  4633. void dp_tx_comp_process_tx_status(struct dp_soc *soc,
  4634. struct dp_tx_desc_s *tx_desc,
  4635. struct hal_tx_completion_status *ts,
  4636. struct dp_txrx_peer *txrx_peer,
  4637. uint8_t ring_id)
  4638. {
  4639. uint32_t length;
  4640. qdf_ether_header_t *eh;
  4641. struct dp_vdev *vdev = NULL;
  4642. qdf_nbuf_t nbuf = tx_desc->nbuf;
  4643. enum qdf_dp_tx_rx_status dp_status;
  4644. if (!nbuf) {
  4645. dp_info_rl("invalid tx descriptor. nbuf NULL");
  4646. goto out;
  4647. }
  4648. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  4649. length = qdf_nbuf_len(nbuf);
  4650. dp_status = dp_tx_hw_to_qdf(ts->status);
  4651. DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
  4652. QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
  4653. QDF_TRACE_DEFAULT_PDEV_ID,
  4654. qdf_nbuf_data_addr(nbuf),
  4655. sizeof(qdf_nbuf_data(nbuf)),
  4656. tx_desc->id, ts->status, dp_status));
  4657. dp_tx_comp_debug("-------------------- \n"
  4658. "Tx Completion Stats: \n"
  4659. "-------------------- \n"
  4660. "ack_frame_rssi = %d \n"
  4661. "first_msdu = %d \n"
  4662. "last_msdu = %d \n"
  4663. "msdu_part_of_amsdu = %d \n"
  4664. "rate_stats valid = %d \n"
  4665. "bw = %d \n"
  4666. "pkt_type = %d \n"
  4667. "stbc = %d \n"
  4668. "ldpc = %d \n"
  4669. "sgi = %d \n"
  4670. "mcs = %d \n"
  4671. "ofdma = %d \n"
  4672. "tones_in_ru = %d \n"
  4673. "tsf = %d \n"
  4674. "ppdu_id = %d \n"
  4675. "transmit_cnt = %d \n"
  4676. "tid = %d \n"
  4677. "peer_id = %d\n"
  4678. "tx_status = %d\n",
  4679. ts->ack_frame_rssi, ts->first_msdu,
  4680. ts->last_msdu, ts->msdu_part_of_amsdu,
  4681. ts->valid, ts->bw, ts->pkt_type, ts->stbc,
  4682. ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
  4683. ts->tones_in_ru, ts->tsf, ts->ppdu_id,
  4684. ts->transmit_cnt, ts->tid, ts->peer_id,
  4685. ts->status);
  4686. /* Update SoC level stats */
  4687. DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
  4688. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  4689. if (!txrx_peer) {
  4690. dp_info_rl("peer is null or deletion in progress");
  4691. DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
  4692. goto out;
  4693. }
  4694. vdev = txrx_peer->vdev;
  4695. dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
  4696. dp_tx_update_uplink_delay(soc, vdev, ts);
  4697. /* check tx complete notification */
  4698. if (qdf_nbuf_tx_notify_comp_get(nbuf))
  4699. dp_tx_notify_completion(soc, vdev, tx_desc,
  4700. nbuf, ts->status);
  4701. /* Update per-packet stats for mesh mode */
  4702. if (qdf_unlikely(vdev->mesh_vdev) &&
  4703. !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
  4704. dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
  4705. /* Update peer level stats */
  4706. if (qdf_unlikely(txrx_peer->bss_peer &&
  4707. vdev->opmode == wlan_op_mode_ap)) {
  4708. if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
  4709. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
  4710. length);
  4711. if (txrx_peer->vdev->tx_encap_type ==
  4712. htt_cmn_pkt_type_ethernet &&
  4713. QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  4714. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  4715. tx.bcast, 1,
  4716. length);
  4717. }
  4718. }
  4719. } else {
  4720. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length);
  4721. if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
  4722. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success,
  4723. 1, length);
  4724. if (qdf_unlikely(txrx_peer->in_twt)) {
  4725. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  4726. tx.tx_success_twt,
  4727. 1, length);
  4728. }
  4729. }
  4730. }
  4731. dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id);
  4732. dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts, ring_id);
  4733. dp_tx_update_peer_jitter_stats(txrx_peer, tx_desc, ts, ring_id);
  4734. dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc,
  4735. ts, ts->tid);
  4736. dp_tx_send_pktlog(soc, vdev->pdev, tx_desc, nbuf, dp_status);
  4737. #ifdef QCA_SUPPORT_RDK_STATS
  4738. if (soc->peerstats_enabled)
  4739. dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
  4740. qdf_ktime_to_ms(tx_desc->timestamp),
  4741. ts->ppdu_id);
  4742. #endif
  4743. out:
  4744. return;
  4745. }
  4746. #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \
  4747. defined(QCA_ENHANCED_STATS_SUPPORT)
  4748. /*
  4749. * dp_tx_update_peer_basic_stats(): Update peer basic stats
  4750. * @txrx_peer: Datapath txrx_peer handle
  4751. * @length: Length of the packet
  4752. * @tx_status: Tx status from TQM/FW
  4753. * @update: enhanced flag value present in dp_pdev
  4754. *
  4755. * Return: none
  4756. */
  4757. void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
  4758. uint32_t length, uint8_t tx_status,
  4759. bool update)
  4760. {
  4761. if (update || (!txrx_peer->hw_txrx_stats_en)) {
  4762. DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
  4763. if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
  4764. DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
  4765. }
  4766. }
  4767. #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
  4768. void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
  4769. uint32_t length, uint8_t tx_status,
  4770. bool update)
  4771. {
  4772. if (!txrx_peer->hw_txrx_stats_en) {
  4773. DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
  4774. if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
  4775. DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
  4776. }
  4777. }
  4778. #else
  4779. void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
  4780. uint32_t length, uint8_t tx_status,
  4781. bool update)
  4782. {
  4783. DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
  4784. if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
  4785. DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
  4786. }
  4787. #endif
  4788. /*
  4789. * dp_tx_prefetch_next_nbuf_data(): Prefetch nbuf and nbuf data
  4790. * @nbuf: skb buffer
  4791. *
  4792. * Return: none
  4793. */
  4794. #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
  4795. static inline
  4796. void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
  4797. {
  4798. qdf_nbuf_t nbuf = NULL;
  4799. if (next)
  4800. nbuf = next->nbuf;
  4801. if (nbuf)
  4802. qdf_prefetch(nbuf);
  4803. }
  4804. #else
  4805. static inline
  4806. void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
  4807. {
  4808. }
  4809. #endif
  4810. /**
  4811. * dp_tx_mcast_reinject_handler() - Tx reinjected multicast packets handler
  4812. * @soc: core txrx main context
  4813. * @desc: software descriptor
  4814. *
  4815. * Return: true when packet is reinjected
  4816. */
  4817. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  4818. defined(WLAN_MCAST_MLO)
  4819. static inline bool
  4820. dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  4821. {
  4822. struct dp_vdev *vdev = NULL;
  4823. if (desc->tx_status == HAL_TX_TQM_RR_MULTICAST_DROP) {
  4824. if (!soc->arch_ops.dp_tx_mcast_handler ||
  4825. !soc->arch_ops.dp_tx_is_mcast_primary)
  4826. return false;
  4827. vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
  4828. DP_MOD_ID_REINJECT);
  4829. if (qdf_unlikely(!vdev)) {
  4830. dp_tx_comp_info_rl("Unable to get vdev ref %d",
  4831. desc->id);
  4832. return false;
  4833. }
  4834. if (!(soc->arch_ops.dp_tx_is_mcast_primary(soc, vdev))) {
  4835. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
  4836. return false;
  4837. }
  4838. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  4839. qdf_nbuf_len(desc->nbuf));
  4840. soc->arch_ops.dp_tx_mcast_handler(soc, vdev, desc->nbuf);
  4841. dp_tx_desc_release(desc, desc->pool_id);
  4842. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
  4843. return true;
  4844. }
  4845. return false;
  4846. }
  4847. #else
  4848. static inline bool
  4849. dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  4850. {
  4851. return false;
  4852. }
  4853. #endif
  4854. #ifdef QCA_DP_TX_NBUF_LIST_FREE
  4855. static inline void
  4856. dp_tx_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head)
  4857. {
  4858. qdf_nbuf_queue_head_init(nbuf_queue_head);
  4859. }
  4860. static inline void
  4861. dp_tx_nbuf_dev_queue_free(qdf_nbuf_queue_head_t *nbuf_queue_head,
  4862. struct dp_tx_desc_s *desc)
  4863. {
  4864. qdf_nbuf_t nbuf = NULL;
  4865. nbuf = desc->nbuf;
  4866. if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_FAST))
  4867. qdf_nbuf_dev_queue_head(nbuf_queue_head, nbuf);
  4868. else
  4869. qdf_nbuf_free(nbuf);
  4870. }
  4871. static inline void
  4872. dp_tx_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head)
  4873. {
  4874. qdf_nbuf_dev_kfree_list(nbuf_queue_head);
  4875. }
  4876. #else
  4877. static inline void
  4878. dp_tx_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head)
  4879. {
  4880. }
  4881. static inline void
  4882. dp_tx_nbuf_dev_queue_free(qdf_nbuf_queue_head_t *nbuf_queue_head,
  4883. struct dp_tx_desc_s *desc)
  4884. {
  4885. qdf_nbuf_free(desc->nbuf);
  4886. }
  4887. static inline void
  4888. dp_tx_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head)
  4889. {
  4890. }
  4891. #endif
  4892. /**
  4893. * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
  4894. * @soc: core txrx main context
  4895. * @comp_head: software descriptor head pointer
  4896. * @ring_id: ring number
  4897. *
  4898. * This function will process batch of descriptors reaped by dp_tx_comp_handler
  4899. * and release the software descriptors after processing is complete
  4900. *
  4901. * Return: none
  4902. */
  4903. void
  4904. dp_tx_comp_process_desc_list(struct dp_soc *soc,
  4905. struct dp_tx_desc_s *comp_head, uint8_t ring_id)
  4906. {
  4907. struct dp_tx_desc_s *desc;
  4908. struct dp_tx_desc_s *next;
  4909. struct hal_tx_completion_status ts;
  4910. struct dp_txrx_peer *txrx_peer = NULL;
  4911. uint16_t peer_id = DP_INVALID_PEER;
  4912. dp_txrx_ref_handle txrx_ref_handle = NULL;
  4913. qdf_nbuf_queue_head_t h;
  4914. desc = comp_head;
  4915. dp_tx_nbuf_queue_head_init(&h);
  4916. while (desc) {
  4917. next = desc->next;
  4918. dp_tx_prefetch_next_nbuf_data(next);
  4919. if (peer_id != desc->peer_id) {
  4920. if (txrx_peer)
  4921. dp_txrx_peer_unref_delete(txrx_ref_handle,
  4922. DP_MOD_ID_TX_COMP);
  4923. peer_id = desc->peer_id;
  4924. txrx_peer =
  4925. dp_txrx_peer_get_ref_by_id(soc, peer_id,
  4926. &txrx_ref_handle,
  4927. DP_MOD_ID_TX_COMP);
  4928. }
  4929. if (dp_tx_mcast_reinject_handler(soc, desc)) {
  4930. desc = next;
  4931. continue;
  4932. }
  4933. if (desc->flags & DP_TX_DESC_FLAG_PPEDS) {
  4934. if (qdf_likely(txrx_peer))
  4935. dp_tx_update_peer_basic_stats(txrx_peer,
  4936. desc->length,
  4937. desc->tx_status,
  4938. false);
  4939. dp_tx_nbuf_dev_queue_free(&h, desc);
  4940. dp_ppeds_tx_desc_free(soc, desc);
  4941. desc = next;
  4942. continue;
  4943. }
  4944. if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
  4945. struct dp_pdev *pdev = desc->pdev;
  4946. if (qdf_likely(txrx_peer))
  4947. dp_tx_update_peer_basic_stats(txrx_peer,
  4948. desc->length,
  4949. desc->tx_status,
  4950. false);
  4951. qdf_assert(pdev);
  4952. dp_tx_outstanding_dec(pdev);
  4953. /*
  4954. * Calling a QDF WRAPPER here is creating significant
  4955. * performance impact so avoided the wrapper call here
  4956. */
  4957. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
  4958. desc->id, DP_TX_COMP_UNMAP);
  4959. dp_tx_nbuf_unmap(soc, desc);
  4960. dp_tx_nbuf_dev_queue_free(&h, desc);
  4961. dp_tx_desc_free(soc, desc, desc->pool_id);
  4962. desc = next;
  4963. continue;
  4964. }
  4965. hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
  4966. dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer,
  4967. ring_id);
  4968. dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer);
  4969. dp_tx_desc_release(desc, desc->pool_id);
  4970. desc = next;
  4971. }
  4972. dp_tx_nbuf_dev_kfree_list(&h);
  4973. if (txrx_peer)
  4974. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
  4975. }
  4976. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  4977. static inline
  4978. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
  4979. int max_reap_limit)
  4980. {
  4981. bool limit_hit = false;
  4982. limit_hit =
  4983. (num_reaped >= max_reap_limit) ? true : false;
  4984. if (limit_hit)
  4985. DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
  4986. return limit_hit;
  4987. }
  4988. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  4989. {
  4990. return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
  4991. }
  4992. static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
  4993. {
  4994. struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
  4995. return cfg->tx_comp_loop_pkt_limit;
  4996. }
  4997. #else
  4998. static inline
  4999. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
  5000. int max_reap_limit)
  5001. {
  5002. return false;
  5003. }
  5004. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  5005. {
  5006. return false;
  5007. }
  5008. static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
  5009. {
  5010. return 0;
  5011. }
  5012. #endif
  5013. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  5014. static inline int
  5015. dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
  5016. int *max_reap_limit)
  5017. {
  5018. return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
  5019. max_reap_limit);
  5020. }
  5021. #else
  5022. static inline int
  5023. dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
  5024. int *max_reap_limit)
  5025. {
  5026. return 0;
  5027. }
  5028. #endif
  5029. #ifdef DP_TX_TRACKING
  5030. void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
  5031. {
  5032. if ((tx_desc->magic != DP_TX_MAGIC_PATTERN_INUSE) &&
  5033. (tx_desc->magic != DP_TX_MAGIC_PATTERN_FREE)) {
  5034. dp_err_rl("tx_desc %u is corrupted", tx_desc->id);
  5035. qdf_trigger_self_recovery(NULL, QDF_TX_DESC_LEAK);
  5036. }
  5037. }
  5038. #endif
  5039. uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  5040. hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
  5041. uint32_t quota)
  5042. {
  5043. void *tx_comp_hal_desc;
  5044. void *last_prefetched_hw_desc = NULL;
  5045. struct dp_tx_desc_s *last_prefetched_sw_desc = NULL;
  5046. hal_soc_handle_t hal_soc;
  5047. uint8_t buffer_src;
  5048. struct dp_tx_desc_s *tx_desc = NULL;
  5049. struct dp_tx_desc_s *head_desc = NULL;
  5050. struct dp_tx_desc_s *tail_desc = NULL;
  5051. uint32_t num_processed = 0;
  5052. uint32_t count;
  5053. uint32_t num_avail_for_reap = 0;
  5054. bool force_break = false;
  5055. struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
  5056. int max_reap_limit, ring_near_full;
  5057. uint32_t num_entries;
  5058. DP_HIST_INIT();
  5059. num_entries = hal_srng_get_num_entries(soc->hal_soc, hal_ring_hdl);
  5060. more_data:
  5061. hal_soc = soc->hal_soc;
  5062. /* Re-initialize local variables to be re-used */
  5063. head_desc = NULL;
  5064. tail_desc = NULL;
  5065. count = 0;
  5066. max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
  5067. ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
  5068. &max_reap_limit);
  5069. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  5070. dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
  5071. return 0;
  5072. }
  5073. if (!num_avail_for_reap)
  5074. num_avail_for_reap = hal_srng_dst_num_valid(hal_soc,
  5075. hal_ring_hdl, 0);
  5076. if (num_avail_for_reap >= quota)
  5077. num_avail_for_reap = quota;
  5078. dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
  5079. last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
  5080. hal_ring_hdl,
  5081. num_avail_for_reap);
  5082. /* Find head descriptor from completion ring */
  5083. while (qdf_likely(num_avail_for_reap--)) {
  5084. tx_comp_hal_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
  5085. if (qdf_unlikely(!tx_comp_hal_desc))
  5086. break;
  5087. buffer_src = hal_tx_comp_get_buffer_source(hal_soc,
  5088. tx_comp_hal_desc);
  5089. /* If this buffer was not released by TQM or FW, then it is not
  5090. * Tx completion indication, assert */
  5091. if (qdf_unlikely(buffer_src !=
  5092. HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
  5093. (qdf_unlikely(buffer_src !=
  5094. HAL_TX_COMP_RELEASE_SOURCE_FW))) {
  5095. uint8_t wbm_internal_error;
  5096. dp_err_rl(
  5097. "Tx comp release_src != TQM | FW but from %d",
  5098. buffer_src);
  5099. hal_dump_comp_desc(tx_comp_hal_desc);
  5100. DP_STATS_INC(soc, tx.invalid_release_source, 1);
  5101. /* When WBM sees NULL buffer_addr_info in any of
  5102. * ingress rings it sends an error indication,
  5103. * with wbm_internal_error=1, to a specific ring.
  5104. * The WBM2SW ring used to indicate these errors is
  5105. * fixed in HW, and that ring is being used as Tx
  5106. * completion ring. These errors are not related to
  5107. * Tx completions, and should just be ignored
  5108. */
  5109. wbm_internal_error = hal_get_wbm_internal_error(
  5110. hal_soc,
  5111. tx_comp_hal_desc);
  5112. if (wbm_internal_error) {
  5113. dp_err_rl("Tx comp wbm_internal_error!!");
  5114. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
  5115. if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
  5116. buffer_src)
  5117. dp_handle_wbm_internal_error(
  5118. soc,
  5119. tx_comp_hal_desc,
  5120. hal_tx_comp_get_buffer_type(
  5121. tx_comp_hal_desc));
  5122. } else {
  5123. dp_err_rl("Tx comp wbm_internal_error false");
  5124. DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
  5125. }
  5126. continue;
  5127. }
  5128. soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
  5129. tx_comp_hal_desc,
  5130. &tx_desc);
  5131. if (qdf_unlikely(!tx_desc)) {
  5132. dp_err("unable to retrieve tx_desc!");
  5133. hal_dump_comp_desc(tx_comp_hal_desc);
  5134. DP_STATS_INC(soc, tx.invalid_tx_comp_desc, 1);
  5135. QDF_BUG(0);
  5136. continue;
  5137. }
  5138. tx_desc->buffer_src = buffer_src;
  5139. if (tx_desc->flags & DP_TX_DESC_FLAG_PPEDS)
  5140. goto add_to_pool2;
  5141. /*
  5142. * If the release source is FW, process the HTT status
  5143. */
  5144. if (qdf_unlikely(buffer_src ==
  5145. HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  5146. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  5147. hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
  5148. htt_tx_status);
  5149. /* Collect hw completion contents */
  5150. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  5151. &tx_desc->comp, 1);
  5152. soc->arch_ops.dp_tx_process_htt_completion(
  5153. soc,
  5154. tx_desc,
  5155. htt_tx_status,
  5156. ring_id);
  5157. } else {
  5158. tx_desc->tx_status =
  5159. hal_tx_comp_get_tx_status(tx_comp_hal_desc);
  5160. tx_desc->buffer_src = buffer_src;
  5161. /*
  5162. * If the fast completion mode is enabled extended
  5163. * metadata from descriptor is not copied
  5164. */
  5165. if (qdf_likely(tx_desc->flags &
  5166. DP_TX_DESC_FLAG_SIMPLE))
  5167. goto add_to_pool;
  5168. /*
  5169. * If the descriptor is already freed in vdev_detach,
  5170. * continue to next descriptor
  5171. */
  5172. if (qdf_unlikely
  5173. ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
  5174. !tx_desc->flags)) {
  5175. dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
  5176. tx_desc->id);
  5177. DP_STATS_INC(soc, tx.tx_comp_exception, 1);
  5178. dp_tx_desc_check_corruption(tx_desc);
  5179. continue;
  5180. }
  5181. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  5182. dp_tx_comp_info_rl("pdev in down state %d",
  5183. tx_desc->id);
  5184. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  5185. dp_tx_comp_free_buf(soc, tx_desc, false);
  5186. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  5187. goto next_desc;
  5188. }
  5189. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  5190. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  5191. dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
  5192. tx_desc->flags, tx_desc->id);
  5193. qdf_assert_always(0);
  5194. }
  5195. /* Collect hw completion contents */
  5196. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  5197. &tx_desc->comp, 1);
  5198. add_to_pool:
  5199. DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
  5200. add_to_pool2:
  5201. /* First ring descriptor on the cycle */
  5202. if (!head_desc) {
  5203. head_desc = tx_desc;
  5204. tail_desc = tx_desc;
  5205. }
  5206. tail_desc->next = tx_desc;
  5207. tx_desc->next = NULL;
  5208. tail_desc = tx_desc;
  5209. }
  5210. next_desc:
  5211. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  5212. /*
  5213. * Processed packet count is more than given quota
  5214. * stop to processing
  5215. */
  5216. count++;
  5217. dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
  5218. num_avail_for_reap,
  5219. hal_ring_hdl,
  5220. &last_prefetched_hw_desc,
  5221. &last_prefetched_sw_desc);
  5222. if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
  5223. break;
  5224. }
  5225. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  5226. /* Process the reaped descriptors */
  5227. if (head_desc)
  5228. dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
  5229. DP_STATS_INC(soc, tx.tx_comp[ring_id], count);
  5230. /*
  5231. * If we are processing in near-full condition, there are 3 scenario
  5232. * 1) Ring entries has reached critical state
  5233. * 2) Ring entries are still near high threshold
  5234. * 3) Ring entries are below the safe level
  5235. *
  5236. * One more loop will move the state to normal processing and yield
  5237. */
  5238. if (ring_near_full)
  5239. goto more_data;
  5240. if (dp_tx_comp_enable_eol_data_check(soc)) {
  5241. if (num_processed >= quota)
  5242. force_break = true;
  5243. if (!force_break &&
  5244. hal_srng_dst_peek_sync_locked(soc->hal_soc,
  5245. hal_ring_hdl)) {
  5246. DP_STATS_INC(soc, tx.hp_oos2, 1);
  5247. if (!hif_exec_should_yield(soc->hif_handle,
  5248. int_ctx->dp_intr_id))
  5249. goto more_data;
  5250. num_avail_for_reap =
  5251. hal_srng_dst_num_valid_locked(soc->hal_soc,
  5252. hal_ring_hdl,
  5253. true);
  5254. if (qdf_unlikely(num_entries &&
  5255. (num_avail_for_reap >=
  5256. num_entries >> 1))) {
  5257. DP_STATS_INC(soc, tx.near_full, 1);
  5258. goto more_data;
  5259. }
  5260. }
  5261. }
  5262. DP_TX_HIST_STATS_PER_PDEV();
  5263. return num_processed;
  5264. }
  5265. #ifdef FEATURE_WLAN_TDLS
  5266. qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  5267. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
  5268. {
  5269. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  5270. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  5271. DP_MOD_ID_TDLS);
  5272. if (!vdev) {
  5273. dp_err("vdev handle for id %d is NULL", vdev_id);
  5274. return NULL;
  5275. }
  5276. if (tx_spec & OL_TX_SPEC_NO_FREE)
  5277. vdev->is_tdls_frame = true;
  5278. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  5279. return dp_tx_send(soc_hdl, vdev_id, msdu_list);
  5280. }
  5281. #endif
  5282. /**
  5283. * dp_tx_vdev_attach() - attach vdev to dp tx
  5284. * @vdev: virtual device instance
  5285. *
  5286. * Return: QDF_STATUS_SUCCESS: success
  5287. * QDF_STATUS_E_RESOURCES: Error return
  5288. */
  5289. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
  5290. {
  5291. int pdev_id;
  5292. /*
  5293. * Fill HTT TCL Metadata with Vdev ID and MAC ID
  5294. */
  5295. DP_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
  5296. DP_TCL_METADATA_TYPE_VDEV_BASED);
  5297. DP_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
  5298. vdev->vdev_id);
  5299. pdev_id =
  5300. dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
  5301. vdev->pdev->pdev_id);
  5302. DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
  5303. /*
  5304. * Set HTT Extension Valid bit to 0 by default
  5305. */
  5306. DP_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
  5307. dp_tx_vdev_update_search_flags(vdev);
  5308. return QDF_STATUS_SUCCESS;
  5309. }
  5310. #ifndef FEATURE_WDS
  5311. static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
  5312. {
  5313. return false;
  5314. }
  5315. #endif
  5316. /**
  5317. * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
  5318. * @vdev: virtual device instance
  5319. *
  5320. * Return: void
  5321. *
  5322. */
  5323. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
  5324. {
  5325. struct dp_soc *soc = vdev->pdev->soc;
  5326. /*
  5327. * Enable both AddrY (SA based search) and AddrX (Da based search)
  5328. * for TDLS link
  5329. *
  5330. * Enable AddrY (SA based search) only for non-WDS STA and
  5331. * ProxySTA VAP (in HKv1) modes.
  5332. *
  5333. * In all other VAP modes, only DA based search should be
  5334. * enabled
  5335. */
  5336. if (vdev->opmode == wlan_op_mode_sta &&
  5337. vdev->tdls_link_connected)
  5338. vdev->hal_desc_addr_search_flags =
  5339. (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
  5340. else if ((vdev->opmode == wlan_op_mode_sta) &&
  5341. !dp_tx_da_search_override(vdev))
  5342. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
  5343. else
  5344. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
  5345. if (vdev->opmode == wlan_op_mode_sta && !vdev->tdls_link_connected)
  5346. vdev->search_type = soc->sta_mode_search_policy;
  5347. else
  5348. vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
  5349. }
  5350. static inline bool
  5351. dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
  5352. struct dp_vdev *vdev,
  5353. struct dp_tx_desc_s *tx_desc)
  5354. {
  5355. if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
  5356. return false;
  5357. /*
  5358. * if vdev is given, then only check whether desc
  5359. * vdev match. if vdev is NULL, then check whether
  5360. * desc pdev match.
  5361. */
  5362. return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
  5363. (tx_desc->pdev == pdev);
  5364. }
  5365. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  5366. /**
  5367. * dp_tx_desc_flush() - release resources associated
  5368. * to TX Desc
  5369. *
  5370. * @dp_pdev: Handle to DP pdev structure
  5371. * @vdev: virtual device instance
  5372. * NULL: no specific Vdev is required and check all allcated TX desc
  5373. * on this pdev.
  5374. * Non-NULL: only check the allocated TX Desc associated to this Vdev.
  5375. *
  5376. * @force_free:
  5377. * true: flush the TX desc.
  5378. * false: only reset the Vdev in each allocated TX desc
  5379. * that associated to current Vdev.
  5380. *
  5381. * This function will go through the TX desc pool to flush
  5382. * the outstanding TX data or reset Vdev to NULL in associated TX
  5383. * Desc.
  5384. */
  5385. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  5386. bool force_free)
  5387. {
  5388. uint8_t i;
  5389. uint32_t j;
  5390. uint32_t num_desc, page_id, offset;
  5391. uint16_t num_desc_per_page;
  5392. struct dp_soc *soc = pdev->soc;
  5393. struct dp_tx_desc_s *tx_desc = NULL;
  5394. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  5395. if (!vdev && !force_free) {
  5396. dp_err("Reset TX desc vdev, Vdev param is required!");
  5397. return;
  5398. }
  5399. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  5400. tx_desc_pool = &soc->tx_desc[i];
  5401. if (!(tx_desc_pool->pool_size) ||
  5402. IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
  5403. !(tx_desc_pool->desc_pages.cacheable_pages))
  5404. continue;
  5405. /*
  5406. * Add flow pool lock protection in case pool is freed
  5407. * due to all tx_desc is recycled when handle TX completion.
  5408. * this is not necessary when do force flush as:
  5409. * a. double lock will happen if dp_tx_desc_release is
  5410. * also trying to acquire it.
  5411. * b. dp interrupt has been disabled before do force TX desc
  5412. * flush in dp_pdev_deinit().
  5413. */
  5414. if (!force_free)
  5415. qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
  5416. num_desc = tx_desc_pool->pool_size;
  5417. num_desc_per_page =
  5418. tx_desc_pool->desc_pages.num_element_per_page;
  5419. for (j = 0; j < num_desc; j++) {
  5420. page_id = j / num_desc_per_page;
  5421. offset = j % num_desc_per_page;
  5422. if (qdf_unlikely(!(tx_desc_pool->
  5423. desc_pages.cacheable_pages)))
  5424. break;
  5425. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  5426. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  5427. /*
  5428. * Free TX desc if force free is
  5429. * required, otherwise only reset vdev
  5430. * in this TX desc.
  5431. */
  5432. if (force_free) {
  5433. tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
  5434. dp_tx_comp_free_buf(soc, tx_desc,
  5435. false);
  5436. dp_tx_desc_release(tx_desc, i);
  5437. } else {
  5438. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  5439. }
  5440. }
  5441. }
  5442. if (!force_free)
  5443. qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
  5444. }
  5445. }
  5446. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  5447. /**
  5448. * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
  5449. *
  5450. * @soc: Handle to DP soc structure
  5451. * @tx_desc: pointer of one TX desc
  5452. * @desc_pool_id: TX Desc pool id
  5453. */
  5454. static inline void
  5455. dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  5456. uint8_t desc_pool_id)
  5457. {
  5458. TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
  5459. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  5460. TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
  5461. }
  5462. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  5463. bool force_free)
  5464. {
  5465. uint8_t i, num_pool;
  5466. uint32_t j;
  5467. uint32_t num_desc, page_id, offset;
  5468. uint16_t num_desc_per_page;
  5469. struct dp_soc *soc = pdev->soc;
  5470. struct dp_tx_desc_s *tx_desc = NULL;
  5471. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  5472. if (!vdev && !force_free) {
  5473. dp_err("Reset TX desc vdev, Vdev param is required!");
  5474. return;
  5475. }
  5476. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  5477. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5478. for (i = 0; i < num_pool; i++) {
  5479. tx_desc_pool = &soc->tx_desc[i];
  5480. if (!tx_desc_pool->desc_pages.cacheable_pages)
  5481. continue;
  5482. num_desc_per_page =
  5483. tx_desc_pool->desc_pages.num_element_per_page;
  5484. for (j = 0; j < num_desc; j++) {
  5485. page_id = j / num_desc_per_page;
  5486. offset = j % num_desc_per_page;
  5487. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  5488. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  5489. if (force_free) {
  5490. tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
  5491. dp_tx_comp_free_buf(soc, tx_desc,
  5492. false);
  5493. dp_tx_desc_release(tx_desc, i);
  5494. } else {
  5495. dp_tx_desc_reset_vdev(soc, tx_desc,
  5496. i);
  5497. }
  5498. }
  5499. }
  5500. }
  5501. }
  5502. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  5503. /**
  5504. * dp_tx_vdev_detach() - detach vdev from dp tx
  5505. * @vdev: virtual device instance
  5506. *
  5507. * Return: QDF_STATUS_SUCCESS: success
  5508. * QDF_STATUS_E_RESOURCES: Error return
  5509. */
  5510. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
  5511. {
  5512. struct dp_pdev *pdev = vdev->pdev;
  5513. /* Reset TX desc associated to this Vdev as NULL */
  5514. dp_tx_desc_flush(pdev, vdev, false);
  5515. return QDF_STATUS_SUCCESS;
  5516. }
  5517. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  5518. /* Pools will be allocated dynamically */
  5519. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  5520. int num_desc)
  5521. {
  5522. uint8_t i;
  5523. for (i = 0; i < num_pool; i++) {
  5524. qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
  5525. soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
  5526. }
  5527. return QDF_STATUS_SUCCESS;
  5528. }
  5529. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  5530. uint32_t num_desc)
  5531. {
  5532. return QDF_STATUS_SUCCESS;
  5533. }
  5534. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  5535. {
  5536. }
  5537. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  5538. {
  5539. uint8_t i;
  5540. for (i = 0; i < num_pool; i++)
  5541. qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
  5542. }
  5543. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  5544. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  5545. uint32_t num_desc)
  5546. {
  5547. uint8_t i, count;
  5548. /* Allocate software Tx descriptor pools */
  5549. for (i = 0; i < num_pool; i++) {
  5550. if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
  5551. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5552. FL("Tx Desc Pool alloc %d failed %pK"),
  5553. i, soc);
  5554. goto fail;
  5555. }
  5556. }
  5557. return QDF_STATUS_SUCCESS;
  5558. fail:
  5559. for (count = 0; count < i; count++)
  5560. dp_tx_desc_pool_free(soc, count);
  5561. return QDF_STATUS_E_NOMEM;
  5562. }
  5563. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  5564. uint32_t num_desc)
  5565. {
  5566. uint8_t i;
  5567. for (i = 0; i < num_pool; i++) {
  5568. if (dp_tx_desc_pool_init(soc, i, num_desc)) {
  5569. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5570. FL("Tx Desc Pool init %d failed %pK"),
  5571. i, soc);
  5572. return QDF_STATUS_E_NOMEM;
  5573. }
  5574. }
  5575. return QDF_STATUS_SUCCESS;
  5576. }
  5577. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  5578. {
  5579. uint8_t i;
  5580. for (i = 0; i < num_pool; i++)
  5581. dp_tx_desc_pool_deinit(soc, i);
  5582. }
  5583. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  5584. {
  5585. uint8_t i;
  5586. for (i = 0; i < num_pool; i++)
  5587. dp_tx_desc_pool_free(soc, i);
  5588. }
  5589. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  5590. /**
  5591. * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
  5592. * @soc: core txrx main context
  5593. * @num_pool: number of pools
  5594. *
  5595. */
  5596. void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  5597. {
  5598. dp_tx_tso_desc_pool_deinit(soc, num_pool);
  5599. dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
  5600. }
  5601. /**
  5602. * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
  5603. * @soc: core txrx main context
  5604. * @num_pool: number of pools
  5605. *
  5606. */
  5607. void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  5608. {
  5609. dp_tx_tso_desc_pool_free(soc, num_pool);
  5610. dp_tx_tso_num_seg_pool_free(soc, num_pool);
  5611. }
  5612. /**
  5613. * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
  5614. * @soc: core txrx main context
  5615. *
  5616. * This function frees all tx related descriptors as below
  5617. * 1. Regular TX descriptors (static pools)
  5618. * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
  5619. * 3. TSO descriptors
  5620. *
  5621. */
  5622. void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
  5623. {
  5624. uint8_t num_pool;
  5625. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5626. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  5627. dp_tx_ext_desc_pool_free(soc, num_pool);
  5628. dp_tx_delete_static_pools(soc, num_pool);
  5629. }
  5630. /**
  5631. * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
  5632. * @soc: core txrx main context
  5633. *
  5634. * This function de-initializes all tx related descriptors as below
  5635. * 1. Regular TX descriptors (static pools)
  5636. * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
  5637. * 3. TSO descriptors
  5638. *
  5639. */
  5640. void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
  5641. {
  5642. uint8_t num_pool;
  5643. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5644. dp_tx_flow_control_deinit(soc);
  5645. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  5646. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  5647. dp_tx_deinit_static_pools(soc, num_pool);
  5648. }
  5649. /**
  5650. * dp_tso_attach() - TSO attach handler
  5651. * @txrx_soc: Opaque Dp handle
  5652. *
  5653. * Reserve TSO descriptor buffers
  5654. *
  5655. * Return: QDF_STATUS_E_FAILURE on failure or
  5656. * QDF_STATUS_SUCCESS on success
  5657. */
  5658. QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
  5659. uint8_t num_pool,
  5660. uint32_t num_desc)
  5661. {
  5662. if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
  5663. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  5664. return QDF_STATUS_E_FAILURE;
  5665. }
  5666. if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
  5667. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  5668. num_pool, soc);
  5669. return QDF_STATUS_E_FAILURE;
  5670. }
  5671. return QDF_STATUS_SUCCESS;
  5672. }
  5673. /**
  5674. * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
  5675. * @soc: DP soc handle
  5676. * @num_pool: Number of pools
  5677. * @num_desc: Number of descriptors
  5678. *
  5679. * Initialize TSO descriptor pools
  5680. *
  5681. * Return: QDF_STATUS_E_FAILURE on failure or
  5682. * QDF_STATUS_SUCCESS on success
  5683. */
  5684. QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
  5685. uint8_t num_pool,
  5686. uint32_t num_desc)
  5687. {
  5688. if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
  5689. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  5690. return QDF_STATUS_E_FAILURE;
  5691. }
  5692. if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
  5693. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  5694. num_pool, soc);
  5695. return QDF_STATUS_E_FAILURE;
  5696. }
  5697. return QDF_STATUS_SUCCESS;
  5698. }
  5699. /**
  5700. * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
  5701. * @soc: core txrx main context
  5702. *
  5703. * This function allocates memory for following descriptor pools
  5704. * 1. regular sw tx descriptor pools (static pools)
  5705. * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
  5706. * 3. TSO descriptor pools
  5707. *
  5708. * Return: QDF_STATUS_SUCCESS: success
  5709. * QDF_STATUS_E_RESOURCES: Error return
  5710. */
  5711. QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
  5712. {
  5713. uint8_t num_pool;
  5714. uint32_t num_desc;
  5715. uint32_t num_ext_desc;
  5716. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5717. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  5718. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  5719. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  5720. "%s Tx Desc Alloc num_pool = %d, descs = %d",
  5721. __func__, num_pool, num_desc);
  5722. if ((num_pool > MAX_TXDESC_POOLS) ||
  5723. (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
  5724. goto fail1;
  5725. if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
  5726. goto fail1;
  5727. if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
  5728. goto fail2;
  5729. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  5730. return QDF_STATUS_SUCCESS;
  5731. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  5732. goto fail3;
  5733. return QDF_STATUS_SUCCESS;
  5734. fail3:
  5735. dp_tx_ext_desc_pool_free(soc, num_pool);
  5736. fail2:
  5737. dp_tx_delete_static_pools(soc, num_pool);
  5738. fail1:
  5739. return QDF_STATUS_E_RESOURCES;
  5740. }
  5741. /**
  5742. * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
  5743. * @soc: core txrx main context
  5744. *
  5745. * This function initializes the following TX descriptor pools
  5746. * 1. regular sw tx descriptor pools (static pools)
  5747. * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
  5748. * 3. TSO descriptor pools
  5749. *
  5750. * Return: QDF_STATUS_SUCCESS: success
  5751. * QDF_STATUS_E_RESOURCES: Error return
  5752. */
  5753. QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
  5754. {
  5755. uint8_t num_pool;
  5756. uint32_t num_desc;
  5757. uint32_t num_ext_desc;
  5758. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5759. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  5760. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  5761. if (dp_tx_init_static_pools(soc, num_pool, num_desc))
  5762. goto fail1;
  5763. if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
  5764. goto fail2;
  5765. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  5766. return QDF_STATUS_SUCCESS;
  5767. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  5768. goto fail3;
  5769. dp_tx_flow_control_init(soc);
  5770. soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
  5771. return QDF_STATUS_SUCCESS;
  5772. fail3:
  5773. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  5774. fail2:
  5775. dp_tx_deinit_static_pools(soc, num_pool);
  5776. fail1:
  5777. return QDF_STATUS_E_RESOURCES;
  5778. }
  5779. /**
  5780. * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
  5781. * @txrx_soc: dp soc handle
  5782. *
  5783. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  5784. * QDF_STATUS_E_FAILURE
  5785. */
  5786. QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
  5787. {
  5788. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  5789. uint8_t num_pool;
  5790. uint32_t num_desc;
  5791. uint32_t num_ext_desc;
  5792. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5793. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  5794. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  5795. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  5796. return QDF_STATUS_E_FAILURE;
  5797. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  5798. return QDF_STATUS_E_FAILURE;
  5799. return QDF_STATUS_SUCCESS;
  5800. }
  5801. /**
  5802. * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
  5803. * @txrx_soc: dp soc handle
  5804. *
  5805. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  5806. */
  5807. QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
  5808. {
  5809. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  5810. uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5811. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  5812. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  5813. return QDF_STATUS_SUCCESS;
  5814. }
  5815. #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
  5816. void dp_pkt_add_timestamp(struct dp_vdev *vdev,
  5817. enum qdf_pkt_timestamp_index index, uint64_t time,
  5818. qdf_nbuf_t nbuf)
  5819. {
  5820. if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) {
  5821. uint64_t tsf_time;
  5822. if (vdev->get_tsf_time) {
  5823. vdev->get_tsf_time(vdev->osif_vdev, time, &tsf_time);
  5824. qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time);
  5825. }
  5826. }
  5827. }
  5828. void dp_pkt_get_timestamp(uint64_t *time)
  5829. {
  5830. if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled()))
  5831. *time = qdf_get_log_timestamp();
  5832. }
  5833. #endif