dp_tx.c 175 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "htt.h"
  20. #include "dp_htt.h"
  21. #include "hal_hw_headers.h"
  22. #include "dp_tx.h"
  23. #include "dp_tx_desc.h"
  24. #include "dp_peer.h"
  25. #include "dp_types.h"
  26. #include "hal_tx.h"
  27. #include "qdf_mem.h"
  28. #include "qdf_nbuf.h"
  29. #include "qdf_net_types.h"
  30. #include "qdf_module.h"
  31. #include <wlan_cfg.h>
  32. #include "dp_ipa.h"
  33. #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
  34. #include "if_meta_hdr.h"
  35. #endif
  36. #include "enet.h"
  37. #include "dp_internal.h"
  38. #ifdef ATH_SUPPORT_IQUE
  39. #include "dp_txrx_me.h"
  40. #endif
  41. #include "dp_hist.h"
  42. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  43. #include <wlan_dp_swlm.h>
  44. #endif
  45. #ifdef WIFI_MONITOR_SUPPORT
  46. #include <dp_mon.h>
  47. #endif
  48. #ifdef FEATURE_WDS
  49. #include "dp_txrx_wds.h"
  50. #endif
  51. #include "cdp_txrx_cmn_reg.h"
  52. #ifdef CONFIG_SAWF
  53. #include <dp_sawf.h>
  54. #endif
  55. /* Flag to skip CCE classify when mesh or tid override enabled */
  56. #define DP_TX_SKIP_CCE_CLASSIFY \
  57. (DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
  58. /* TODO Add support in TSO */
  59. #define DP_DESC_NUM_FRAG(x) 0
  60. /* disable TQM_BYPASS */
  61. #define TQM_BYPASS_WAR 0
  62. #define DP_RETRY_COUNT 7
  63. #ifdef WLAN_PEER_JITTER
  64. #define DP_AVG_JITTER_WEIGHT_DENOM 4
  65. #define DP_AVG_DELAY_WEIGHT_DENOM 3
  66. #endif
  67. #ifdef QCA_DP_TX_FW_METADATA_V2
  68. #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
  69. HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
  70. #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
  71. HTT_TX_TCL_METADATA_V2_VALID_HTT_SET(_var, _val)
  72. #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
  73. HTT_TX_TCL_METADATA_TYPE_V2_SET(_var, _val)
  74. #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
  75. HTT_TX_TCL_METADATA_V2_HOST_INSPECTED_SET(_var, _val)
  76. #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
  77. HTT_TX_TCL_METADATA_V2_PEER_ID_SET(_var, _val)
  78. #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
  79. HTT_TX_TCL_METADATA_V2_VDEV_ID_SET(_var, _val)
  80. #define DP_TCL_METADATA_TYPE_PEER_BASED \
  81. HTT_TCL_METADATA_V2_TYPE_PEER_BASED
  82. #define DP_TCL_METADATA_TYPE_VDEV_BASED \
  83. HTT_TCL_METADATA_V2_TYPE_VDEV_BASED
  84. #else
  85. #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
  86. HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
  87. #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
  88. HTT_TX_TCL_METADATA_VALID_HTT_SET(_var, _val)
  89. #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
  90. HTT_TX_TCL_METADATA_TYPE_SET(_var, _val)
  91. #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
  92. HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val)
  93. #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
  94. HTT_TX_TCL_METADATA_PEER_ID_SET(_var, _val)
  95. #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
  96. HTT_TX_TCL_METADATA_VDEV_ID_SET(_var, _val)
  97. #define DP_TCL_METADATA_TYPE_PEER_BASED \
  98. HTT_TCL_METADATA_TYPE_PEER_BASED
  99. #define DP_TCL_METADATA_TYPE_VDEV_BASED \
  100. HTT_TCL_METADATA_TYPE_VDEV_BASED
  101. #endif
  102. #define DP_GET_HW_LINK_ID_FRM_PPDU_ID(PPDU_ID, LINK_ID_OFFSET, LINK_ID_BITS) \
  103. (((PPDU_ID) >> (LINK_ID_OFFSET)) & ((1 << (LINK_ID_BITS)) - 1))
  104. /*mapping between hal encrypt type and cdp_sec_type*/
  105. uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
  106. HAL_TX_ENCRYPT_TYPE_WEP_128,
  107. HAL_TX_ENCRYPT_TYPE_WEP_104,
  108. HAL_TX_ENCRYPT_TYPE_WEP_40,
  109. HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
  110. HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
  111. HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
  112. HAL_TX_ENCRYPT_TYPE_WAPI,
  113. HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
  114. HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
  115. HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
  116. HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
  117. qdf_export_symbol(sec_type_map);
  118. #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
  119. static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
  120. {
  121. enum dp_tx_event_type type;
  122. if (flags & DP_TX_DESC_FLAG_FLUSH)
  123. type = DP_TX_DESC_FLUSH;
  124. else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
  125. type = DP_TX_COMP_UNMAP_ERR;
  126. else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
  127. type = DP_TX_COMP_UNMAP;
  128. else
  129. type = DP_TX_DESC_UNMAP;
  130. return type;
  131. }
  132. static inline void
  133. dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
  134. qdf_nbuf_t skb, uint32_t sw_cookie,
  135. enum dp_tx_event_type type)
  136. {
  137. struct dp_tx_tcl_history *tx_tcl_history = &soc->tx_tcl_history;
  138. struct dp_tx_comp_history *tx_comp_history = &soc->tx_comp_history;
  139. struct dp_tx_desc_event *entry;
  140. uint32_t idx;
  141. uint16_t slot;
  142. switch (type) {
  143. case DP_TX_COMP_UNMAP:
  144. case DP_TX_COMP_UNMAP_ERR:
  145. case DP_TX_COMP_MSDU_EXT:
  146. if (qdf_unlikely(!tx_comp_history->allocated))
  147. return;
  148. dp_get_frag_hist_next_atomic_idx(&tx_comp_history->index, &idx,
  149. &slot,
  150. DP_TX_COMP_HIST_SLOT_SHIFT,
  151. DP_TX_COMP_HIST_PER_SLOT_MAX,
  152. DP_TX_COMP_HISTORY_SIZE);
  153. entry = &tx_comp_history->entry[slot][idx];
  154. break;
  155. case DP_TX_DESC_MAP:
  156. case DP_TX_DESC_UNMAP:
  157. case DP_TX_DESC_COOKIE:
  158. case DP_TX_DESC_FLUSH:
  159. if (qdf_unlikely(!tx_tcl_history->allocated))
  160. return;
  161. dp_get_frag_hist_next_atomic_idx(&tx_tcl_history->index, &idx,
  162. &slot,
  163. DP_TX_TCL_HIST_SLOT_SHIFT,
  164. DP_TX_TCL_HIST_PER_SLOT_MAX,
  165. DP_TX_TCL_HISTORY_SIZE);
  166. entry = &tx_tcl_history->entry[slot][idx];
  167. break;
  168. default:
  169. dp_info_rl("Invalid dp_tx_event_type: %d", type);
  170. return;
  171. }
  172. entry->skb = skb;
  173. entry->paddr = paddr;
  174. entry->sw_cookie = sw_cookie;
  175. entry->type = type;
  176. entry->ts = qdf_get_log_timestamp();
  177. }
  178. static inline void
  179. dp_tx_tso_seg_history_add(struct dp_soc *soc,
  180. struct qdf_tso_seg_elem_t *tso_seg,
  181. qdf_nbuf_t skb, uint32_t sw_cookie,
  182. enum dp_tx_event_type type)
  183. {
  184. int i;
  185. for (i = 1; i < tso_seg->seg.num_frags; i++) {
  186. dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
  187. skb, sw_cookie, type);
  188. }
  189. if (!tso_seg->next)
  190. dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
  191. skb, 0xFFFFFFFF, type);
  192. }
  193. static inline void
  194. dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
  195. qdf_nbuf_t skb, uint32_t sw_cookie,
  196. enum dp_tx_event_type type)
  197. {
  198. struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
  199. uint32_t num_segs = tso_info.num_segs;
  200. while (num_segs) {
  201. dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
  202. curr_seg = curr_seg->next;
  203. num_segs--;
  204. }
  205. }
  206. #else
  207. static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
  208. {
  209. return DP_TX_DESC_INVAL_EVT;
  210. }
  211. static inline void
  212. dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
  213. qdf_nbuf_t skb, uint32_t sw_cookie,
  214. enum dp_tx_event_type type)
  215. {
  216. }
  217. static inline void
  218. dp_tx_tso_seg_history_add(struct dp_soc *soc,
  219. struct qdf_tso_seg_elem_t *tso_seg,
  220. qdf_nbuf_t skb, uint32_t sw_cookie,
  221. enum dp_tx_event_type type)
  222. {
  223. }
  224. static inline void
  225. dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
  226. qdf_nbuf_t skb, uint32_t sw_cookie,
  227. enum dp_tx_event_type type)
  228. {
  229. }
  230. #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
  231. /**
  232. * dp_is_tput_high() - Check if throughput is high
  233. *
  234. * @soc: core txrx main context
  235. *
  236. * The current function is based of the RTPM tput policy variable where RTPM is
  237. * avoided based on throughput.
  238. */
  239. static inline int dp_is_tput_high(struct dp_soc *soc)
  240. {
  241. return dp_get_rtpm_tput_policy_requirement(soc);
  242. }
  243. #if defined(FEATURE_TSO)
  244. /**
  245. * dp_tx_tso_unmap_segment() - Unmap TSO segment
  246. *
  247. * @soc: core txrx main context
  248. * @seg_desc: tso segment descriptor
  249. * @num_seg_desc: tso number segment descriptor
  250. */
  251. static void dp_tx_tso_unmap_segment(
  252. struct dp_soc *soc,
  253. struct qdf_tso_seg_elem_t *seg_desc,
  254. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  255. {
  256. TSO_DEBUG("%s: Unmap the tso segment", __func__);
  257. if (qdf_unlikely(!seg_desc)) {
  258. DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
  259. __func__, __LINE__);
  260. qdf_assert(0);
  261. } else if (qdf_unlikely(!num_seg_desc)) {
  262. DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
  263. __func__, __LINE__);
  264. qdf_assert(0);
  265. } else {
  266. bool is_last_seg;
  267. /* no tso segment left to do dma unmap */
  268. if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
  269. return;
  270. is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
  271. true : false;
  272. qdf_nbuf_unmap_tso_segment(soc->osdev,
  273. seg_desc, is_last_seg);
  274. num_seg_desc->num_seg.tso_cmn_num_seg--;
  275. }
  276. }
  277. /**
  278. * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
  279. * back to the freelist
  280. *
  281. * @soc: soc device handle
  282. * @tx_desc: Tx software descriptor
  283. */
  284. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  285. struct dp_tx_desc_s *tx_desc)
  286. {
  287. TSO_DEBUG("%s: Free the tso descriptor", __func__);
  288. if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) {
  289. dp_tx_err("SO desc is NULL!");
  290. qdf_assert(0);
  291. } else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) {
  292. dp_tx_err("TSO num desc is NULL!");
  293. qdf_assert(0);
  294. } else {
  295. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  296. (struct qdf_tso_num_seg_elem_t *)tx_desc->
  297. msdu_ext_desc->tso_num_desc;
  298. /* Add the tso num segment into the free list */
  299. if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
  300. dp_tso_num_seg_free(soc, tx_desc->pool_id,
  301. tx_desc->msdu_ext_desc->
  302. tso_num_desc);
  303. tx_desc->msdu_ext_desc->tso_num_desc = NULL;
  304. DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
  305. }
  306. /* Add the tso segment into the free list*/
  307. dp_tx_tso_desc_free(soc,
  308. tx_desc->pool_id, tx_desc->msdu_ext_desc->
  309. tso_desc);
  310. tx_desc->msdu_ext_desc->tso_desc = NULL;
  311. }
  312. }
  313. #else
  314. static void dp_tx_tso_unmap_segment(
  315. struct dp_soc *soc,
  316. struct qdf_tso_seg_elem_t *seg_desc,
  317. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  318. {
  319. }
  320. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  321. struct dp_tx_desc_s *tx_desc)
  322. {
  323. }
  324. #endif
  325. void
  326. dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
  327. {
  328. struct dp_pdev *pdev = tx_desc->pdev;
  329. struct dp_soc *soc;
  330. uint8_t comp_status = 0;
  331. qdf_assert(pdev);
  332. soc = pdev->soc;
  333. dp_tx_outstanding_dec(pdev);
  334. if (tx_desc->msdu_ext_desc) {
  335. if (tx_desc->frm_type == dp_tx_frm_tso)
  336. dp_tx_tso_desc_release(soc, tx_desc);
  337. if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
  338. dp_tx_me_free_buf(tx_desc->pdev,
  339. tx_desc->msdu_ext_desc->me_buffer);
  340. dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
  341. }
  342. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  343. qdf_atomic_dec(&soc->num_tx_exception);
  344. if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
  345. tx_desc->buffer_src)
  346. comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
  347. soc->hal_soc);
  348. else
  349. comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
  350. dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
  351. tx_desc->id, comp_status,
  352. qdf_atomic_read(&pdev->num_tx_outstanding));
  353. dp_tx_desc_free(soc, tx_desc, desc_pool_id);
  354. return;
  355. }
  356. /**
  357. * dp_tx_prepare_htt_metadata() - Prepare HTT metadata for special frames
  358. * @vdev: DP vdev Handle
  359. * @nbuf: skb
  360. * @msdu_info: msdu_info required to create HTT metadata
  361. *
  362. * Prepares and fills HTT metadata in the frame pre-header for special frames
  363. * that should be transmitted using varying transmit parameters.
  364. * There are 2 VDEV modes that currently needs this special metadata -
  365. * 1) Mesh Mode
  366. * 2) DSRC Mode
  367. *
  368. * Return: HTT metadata size
  369. *
  370. */
  371. static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  372. struct dp_tx_msdu_info_s *msdu_info)
  373. {
  374. uint32_t *meta_data = msdu_info->meta_data;
  375. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  376. (struct htt_tx_msdu_desc_ext2_t *) meta_data;
  377. uint8_t htt_desc_size;
  378. /* Size rounded of multiple of 8 bytes */
  379. uint8_t htt_desc_size_aligned;
  380. uint8_t *hdr = NULL;
  381. /*
  382. * Metadata - HTT MSDU Extension header
  383. */
  384. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  385. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  386. if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
  387. HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
  388. meta_data[0]) ||
  389. msdu_info->exception_fw) {
  390. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
  391. htt_desc_size_aligned)) {
  392. nbuf = qdf_nbuf_realloc_headroom(nbuf,
  393. htt_desc_size_aligned);
  394. if (!nbuf) {
  395. /*
  396. * qdf_nbuf_realloc_headroom won't do skb_clone
  397. * as skb_realloc_headroom does. so, no free is
  398. * needed here.
  399. */
  400. DP_STATS_INC(vdev,
  401. tx_i.dropped.headroom_insufficient,
  402. 1);
  403. qdf_print(" %s[%d] skb_realloc_headroom failed",
  404. __func__, __LINE__);
  405. return 0;
  406. }
  407. }
  408. /* Fill and add HTT metaheader */
  409. hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
  410. if (!hdr) {
  411. dp_tx_err("Error in filling HTT metadata");
  412. return 0;
  413. }
  414. qdf_mem_copy(hdr, desc_ext, htt_desc_size);
  415. } else if (vdev->opmode == wlan_op_mode_ocb) {
  416. /* Todo - Add support for DSRC */
  417. }
  418. return htt_desc_size_aligned;
  419. }
  420. /**
  421. * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
  422. * @tso_seg: TSO segment to process
  423. * @ext_desc: Pointer to MSDU extension descriptor
  424. *
  425. * Return: void
  426. */
  427. #if defined(FEATURE_TSO)
  428. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  429. void *ext_desc)
  430. {
  431. uint8_t num_frag;
  432. uint32_t tso_flags;
  433. /*
  434. * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
  435. * tcp_flag_mask
  436. *
  437. * Checksum enable flags are set in TCL descriptor and not in Extension
  438. * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
  439. */
  440. tso_flags = *(uint32_t *) &tso_seg->tso_flags;
  441. hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
  442. hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
  443. tso_seg->tso_flags.ip_len);
  444. hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
  445. hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
  446. for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
  447. uint32_t lo = 0;
  448. uint32_t hi = 0;
  449. qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
  450. (tso_seg->tso_frags[num_frag].length));
  451. qdf_dmaaddr_to_32s(
  452. tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
  453. hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
  454. tso_seg->tso_frags[num_frag].length);
  455. }
  456. return;
  457. }
  458. #else
  459. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  460. void *ext_desc)
  461. {
  462. return;
  463. }
  464. #endif
  465. #if defined(FEATURE_TSO)
  466. /**
  467. * dp_tx_free_tso_seg_list() - Loop through the tso segments
  468. * allocated and free them
  469. * @soc: soc handle
  470. * @free_seg: list of tso segments
  471. * @msdu_info: msdu descriptor
  472. *
  473. * Return: void
  474. */
  475. static void dp_tx_free_tso_seg_list(
  476. struct dp_soc *soc,
  477. struct qdf_tso_seg_elem_t *free_seg,
  478. struct dp_tx_msdu_info_s *msdu_info)
  479. {
  480. struct qdf_tso_seg_elem_t *next_seg;
  481. while (free_seg) {
  482. next_seg = free_seg->next;
  483. dp_tx_tso_desc_free(soc,
  484. msdu_info->tx_queue.desc_pool_id,
  485. free_seg);
  486. free_seg = next_seg;
  487. }
  488. }
  489. /**
  490. * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
  491. * allocated and free them
  492. * @soc: soc handle
  493. * @free_num_seg: list of tso number segments
  494. * @msdu_info: msdu descriptor
  495. *
  496. * Return: void
  497. */
  498. static void dp_tx_free_tso_num_seg_list(
  499. struct dp_soc *soc,
  500. struct qdf_tso_num_seg_elem_t *free_num_seg,
  501. struct dp_tx_msdu_info_s *msdu_info)
  502. {
  503. struct qdf_tso_num_seg_elem_t *next_num_seg;
  504. while (free_num_seg) {
  505. next_num_seg = free_num_seg->next;
  506. dp_tso_num_seg_free(soc,
  507. msdu_info->tx_queue.desc_pool_id,
  508. free_num_seg);
  509. free_num_seg = next_num_seg;
  510. }
  511. }
  512. /**
  513. * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
  514. * do dma unmap for each segment
  515. * @soc: soc handle
  516. * @free_seg: list of tso segments
  517. * @num_seg_desc: tso number segment descriptor
  518. *
  519. * Return: void
  520. */
  521. static void dp_tx_unmap_tso_seg_list(
  522. struct dp_soc *soc,
  523. struct qdf_tso_seg_elem_t *free_seg,
  524. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  525. {
  526. struct qdf_tso_seg_elem_t *next_seg;
  527. if (qdf_unlikely(!num_seg_desc)) {
  528. DP_TRACE(ERROR, "TSO number seg desc is NULL!");
  529. return;
  530. }
  531. while (free_seg) {
  532. next_seg = free_seg->next;
  533. dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
  534. free_seg = next_seg;
  535. }
  536. }
  537. #ifdef FEATURE_TSO_STATS
  538. /**
  539. * dp_tso_get_stats_idx() - Retrieve the tso packet id
  540. * @pdev: pdev handle
  541. *
  542. * Return: id
  543. */
  544. static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
  545. {
  546. uint32_t stats_idx;
  547. stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
  548. % CDP_MAX_TSO_PACKETS);
  549. return stats_idx;
  550. }
  551. #else
  552. static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
  553. {
  554. return 0;
  555. }
  556. #endif /* FEATURE_TSO_STATS */
  557. /**
  558. * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
  559. * free the tso segments descriptor and
  560. * tso num segments descriptor
  561. * @soc: soc handle
  562. * @msdu_info: msdu descriptor
  563. * @tso_seg_unmap: flag to show if dma unmap is necessary
  564. *
  565. * Return: void
  566. */
  567. static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
  568. struct dp_tx_msdu_info_s *msdu_info,
  569. bool tso_seg_unmap)
  570. {
  571. struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
  572. struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
  573. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  574. tso_info->tso_num_seg_list;
  575. /* do dma unmap for each segment */
  576. if (tso_seg_unmap)
  577. dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
  578. /* free all tso number segment descriptor though looks only have 1 */
  579. dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
  580. /* free all tso segment descriptor */
  581. dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
  582. }
  583. /**
  584. * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
  585. * @vdev: virtual device handle
  586. * @msdu: network buffer
  587. * @msdu_info: meta data associated with the msdu
  588. *
  589. * Return: QDF_STATUS_SUCCESS success
  590. */
  591. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  592. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  593. {
  594. struct qdf_tso_seg_elem_t *tso_seg;
  595. int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
  596. struct dp_soc *soc = vdev->pdev->soc;
  597. struct dp_pdev *pdev = vdev->pdev;
  598. struct qdf_tso_info_t *tso_info;
  599. struct qdf_tso_num_seg_elem_t *tso_num_seg;
  600. tso_info = &msdu_info->u.tso_info;
  601. tso_info->curr_seg = NULL;
  602. tso_info->tso_seg_list = NULL;
  603. tso_info->num_segs = num_seg;
  604. msdu_info->frm_type = dp_tx_frm_tso;
  605. tso_info->tso_num_seg_list = NULL;
  606. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  607. while (num_seg) {
  608. tso_seg = dp_tx_tso_desc_alloc(
  609. soc, msdu_info->tx_queue.desc_pool_id);
  610. if (tso_seg) {
  611. tso_seg->next = tso_info->tso_seg_list;
  612. tso_info->tso_seg_list = tso_seg;
  613. num_seg--;
  614. } else {
  615. dp_err_rl("Failed to alloc tso seg desc");
  616. DP_STATS_INC_PKT(vdev->pdev,
  617. tso_stats.tso_no_mem_dropped, 1,
  618. qdf_nbuf_len(msdu));
  619. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  620. return QDF_STATUS_E_NOMEM;
  621. }
  622. }
  623. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  624. tso_num_seg = dp_tso_num_seg_alloc(soc,
  625. msdu_info->tx_queue.desc_pool_id);
  626. if (tso_num_seg) {
  627. tso_num_seg->next = tso_info->tso_num_seg_list;
  628. tso_info->tso_num_seg_list = tso_num_seg;
  629. } else {
  630. DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
  631. __func__);
  632. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  633. return QDF_STATUS_E_NOMEM;
  634. }
  635. msdu_info->num_seg =
  636. qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
  637. TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
  638. msdu_info->num_seg);
  639. if (!(msdu_info->num_seg)) {
  640. /*
  641. * Free allocated TSO seg desc and number seg desc,
  642. * do unmap for segments if dma map has done.
  643. */
  644. DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
  645. dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
  646. return QDF_STATUS_E_INVAL;
  647. }
  648. dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
  649. msdu, 0, DP_TX_DESC_MAP);
  650. tso_info->curr_seg = tso_info->tso_seg_list;
  651. tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
  652. dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
  653. msdu, msdu_info->num_seg);
  654. dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
  655. tso_info->msdu_stats_idx);
  656. dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
  657. return QDF_STATUS_SUCCESS;
  658. }
  659. #else
  660. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  661. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  662. {
  663. return QDF_STATUS_E_NOMEM;
  664. }
  665. #endif
  666. QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
  667. (DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
  668. sizeof(struct htt_tx_msdu_desc_ext2_t)));
  669. /**
  670. * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
  671. * @vdev: DP Vdev handle
  672. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  673. * @desc_pool_id: Descriptor Pool ID
  674. *
  675. * Return:
  676. */
  677. static
  678. struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
  679. struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
  680. {
  681. uint8_t i;
  682. uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
  683. struct dp_tx_seg_info_s *seg_info;
  684. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  685. struct dp_soc *soc = vdev->pdev->soc;
  686. /* Allocate an extension descriptor */
  687. msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
  688. qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
  689. if (!msdu_ext_desc) {
  690. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  691. return NULL;
  692. }
  693. if (msdu_info->exception_fw &&
  694. qdf_unlikely(vdev->mesh_vdev)) {
  695. qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
  696. &msdu_info->meta_data[0],
  697. sizeof(struct htt_tx_msdu_desc_ext2_t));
  698. qdf_atomic_inc(&soc->num_tx_exception);
  699. msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
  700. }
  701. switch (msdu_info->frm_type) {
  702. case dp_tx_frm_sg:
  703. case dp_tx_frm_me:
  704. case dp_tx_frm_raw:
  705. seg_info = msdu_info->u.sg_info.curr_seg;
  706. /* Update the buffer pointers in MSDU Extension Descriptor */
  707. for (i = 0; i < seg_info->frag_cnt; i++) {
  708. hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
  709. seg_info->frags[i].paddr_lo,
  710. seg_info->frags[i].paddr_hi,
  711. seg_info->frags[i].len);
  712. }
  713. break;
  714. case dp_tx_frm_tso:
  715. dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
  716. &cached_ext_desc[0]);
  717. break;
  718. default:
  719. break;
  720. }
  721. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  722. cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
  723. hal_tx_ext_desc_sync(&cached_ext_desc[0],
  724. msdu_ext_desc->vaddr);
  725. return msdu_ext_desc;
  726. }
  727. /**
  728. * dp_tx_trace_pkt() - Trace TX packet at DP layer
  729. * @soc: datapath SOC
  730. * @skb: skb to be traced
  731. * @msdu_id: msdu_id of the packet
  732. * @vdev_id: vdev_id of the packet
  733. *
  734. * Return: None
  735. */
  736. #ifdef DP_DISABLE_TX_PKT_TRACE
  737. static void dp_tx_trace_pkt(struct dp_soc *soc,
  738. qdf_nbuf_t skb, uint16_t msdu_id,
  739. uint8_t vdev_id)
  740. {
  741. }
  742. #else
  743. static void dp_tx_trace_pkt(struct dp_soc *soc,
  744. qdf_nbuf_t skb, uint16_t msdu_id,
  745. uint8_t vdev_id)
  746. {
  747. if (dp_is_tput_high(soc))
  748. return;
  749. QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
  750. QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
  751. DPTRACE(qdf_dp_trace_ptr(skb,
  752. QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
  753. QDF_TRACE_DEFAULT_PDEV_ID,
  754. qdf_nbuf_data_addr(skb),
  755. sizeof(qdf_nbuf_data(skb)),
  756. msdu_id, vdev_id, 0));
  757. qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
  758. DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
  759. QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
  760. msdu_id, QDF_TX));
  761. }
  762. #endif
  763. #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
  764. /**
  765. * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
  766. * exception by the upper layer (OS_IF)
  767. * @soc: DP soc handle
  768. * @nbuf: packet to be transmitted
  769. *
  770. * Return: 1 if the packet is marked as exception,
  771. * 0, if the packet is not marked as exception.
  772. */
  773. static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
  774. qdf_nbuf_t nbuf)
  775. {
  776. return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
  777. }
  778. #else
  779. static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
  780. qdf_nbuf_t nbuf)
  781. {
  782. return 0;
  783. }
  784. #endif
  785. #ifdef DP_TRAFFIC_END_INDICATION
  786. /**
  787. * dp_tx_get_traffic_end_indication_pkt() - Allocate and prepare packet to send
  788. * as indication to fw to inform that
  789. * data stream has ended
  790. * @vdev: DP vdev handle
  791. * @nbuf: original buffer from network stack
  792. *
  793. * Return: NULL on failure,
  794. * nbuf on success
  795. */
  796. static inline qdf_nbuf_t
  797. dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
  798. qdf_nbuf_t nbuf)
  799. {
  800. /* Packet length should be enough to copy upto L3 header */
  801. uint8_t end_nbuf_len = 64;
  802. uint8_t htt_desc_size_aligned;
  803. uint8_t htt_desc_size;
  804. qdf_nbuf_t end_nbuf;
  805. if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
  806. QDF_NBUF_CB_PACKET_TYPE_END_INDICATION)) {
  807. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  808. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  809. end_nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q);
  810. if (!end_nbuf) {
  811. end_nbuf = qdf_nbuf_alloc(NULL,
  812. (htt_desc_size_aligned +
  813. end_nbuf_len),
  814. htt_desc_size_aligned,
  815. 8, false);
  816. if (!end_nbuf) {
  817. dp_err("Packet allocation failed");
  818. goto out;
  819. }
  820. } else {
  821. qdf_nbuf_reset(end_nbuf, htt_desc_size_aligned, 8);
  822. }
  823. qdf_mem_copy(qdf_nbuf_data(end_nbuf), qdf_nbuf_data(nbuf),
  824. end_nbuf_len);
  825. qdf_nbuf_set_pktlen(end_nbuf, end_nbuf_len);
  826. return end_nbuf;
  827. }
  828. out:
  829. return NULL;
  830. }
  831. /**
  832. * dp_tx_send_traffic_end_indication_pkt() - Send indication packet to FW
  833. * via exception path.
  834. * @vdev: DP vdev handle
  835. * @end_nbuf: skb to send as indication
  836. * @msdu_info: msdu_info of original nbuf
  837. * @peer_id: peer id
  838. *
  839. * Return: None
  840. */
  841. static inline void
  842. dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
  843. qdf_nbuf_t end_nbuf,
  844. struct dp_tx_msdu_info_s *msdu_info,
  845. uint16_t peer_id)
  846. {
  847. struct dp_tx_msdu_info_s e_msdu_info = {0};
  848. qdf_nbuf_t nbuf;
  849. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  850. (struct htt_tx_msdu_desc_ext2_t *)(e_msdu_info.meta_data);
  851. e_msdu_info.tx_queue = msdu_info->tx_queue;
  852. e_msdu_info.tid = msdu_info->tid;
  853. e_msdu_info.exception_fw = 1;
  854. desc_ext->host_tx_desc_pool = 1;
  855. desc_ext->traffic_end_indication = 1;
  856. nbuf = dp_tx_send_msdu_single(vdev, end_nbuf, &e_msdu_info,
  857. peer_id, NULL);
  858. if (nbuf) {
  859. dp_err("Traffic end indication packet tx failed");
  860. qdf_nbuf_free(nbuf);
  861. }
  862. }
  863. /**
  864. * dp_tx_traffic_end_indication_set_desc_flag() - Set tx descriptor flag to
  865. * mark it traffic end indication
  866. * packet.
  867. * @tx_desc: Tx descriptor pointer
  868. * @msdu_info: msdu_info structure pointer
  869. *
  870. * Return: None
  871. */
  872. static inline void
  873. dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
  874. struct dp_tx_msdu_info_s *msdu_info)
  875. {
  876. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  877. (struct htt_tx_msdu_desc_ext2_t *)(msdu_info->meta_data);
  878. if (qdf_unlikely(desc_ext->traffic_end_indication))
  879. tx_desc->flags |= DP_TX_DESC_FLAG_TRAFFIC_END_IND;
  880. }
  881. /**
  882. * dp_tx_traffic_end_indication_enq_ind_pkt() - Enqueue the packet instead of
  883. * freeing which are associated
  884. * with traffic end indication
  885. * flagged descriptor.
  886. * @soc: dp soc handle
  887. * @desc: Tx descriptor pointer
  888. * @nbuf: buffer pointer
  889. *
  890. * Return: True if packet gets enqueued else false
  891. */
  892. static bool
  893. dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
  894. struct dp_tx_desc_s *desc,
  895. qdf_nbuf_t nbuf)
  896. {
  897. struct dp_vdev *vdev = NULL;
  898. if (qdf_unlikely((desc->flags &
  899. DP_TX_DESC_FLAG_TRAFFIC_END_IND) != 0)) {
  900. vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
  901. DP_MOD_ID_TX_COMP);
  902. if (vdev) {
  903. qdf_nbuf_queue_add(&vdev->end_ind_pkt_q, nbuf);
  904. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_COMP);
  905. return true;
  906. }
  907. }
  908. return false;
  909. }
  910. /**
  911. * dp_tx_traffic_end_indication_is_enabled() - get the feature
  912. * enable/disable status
  913. * @vdev: dp vdev handle
  914. *
  915. * Return: True if feature is enable else false
  916. */
  917. static inline bool
  918. dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
  919. {
  920. return qdf_unlikely(vdev->traffic_end_ind_en);
  921. }
  922. static inline qdf_nbuf_t
  923. dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  924. struct dp_tx_msdu_info_s *msdu_info,
  925. uint16_t peer_id, qdf_nbuf_t end_nbuf)
  926. {
  927. if (dp_tx_traffic_end_indication_is_enabled(vdev))
  928. end_nbuf = dp_tx_get_traffic_end_indication_pkt(vdev, nbuf);
  929. nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
  930. if (qdf_unlikely(end_nbuf))
  931. dp_tx_send_traffic_end_indication_pkt(vdev, end_nbuf,
  932. msdu_info, peer_id);
  933. return nbuf;
  934. }
  935. #else
  936. static inline qdf_nbuf_t
  937. dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
  938. qdf_nbuf_t nbuf)
  939. {
  940. return NULL;
  941. }
  942. static inline void
  943. dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
  944. qdf_nbuf_t end_nbuf,
  945. struct dp_tx_msdu_info_s *msdu_info,
  946. uint16_t peer_id)
  947. {}
  948. static inline void
  949. dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
  950. struct dp_tx_msdu_info_s *msdu_info)
  951. {}
  952. static inline bool
  953. dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
  954. struct dp_tx_desc_s *desc,
  955. qdf_nbuf_t nbuf)
  956. {
  957. return false;
  958. }
  959. static inline bool
  960. dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
  961. {
  962. return false;
  963. }
  964. static inline qdf_nbuf_t
  965. dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  966. struct dp_tx_msdu_info_s *msdu_info,
  967. uint16_t peer_id, qdf_nbuf_t end_nbuf)
  968. {
  969. return dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
  970. }
  971. #endif
  972. #if defined(QCA_SUPPORT_WDS_EXTENDED)
  973. static bool
  974. dp_tx_is_wds_ast_override_en(struct dp_soc *soc,
  975. struct cdp_tx_exception_metadata *tx_exc_metadata)
  976. {
  977. if (soc->features.wds_ext_ast_override_enable &&
  978. tx_exc_metadata && tx_exc_metadata->is_wds_extended)
  979. return true;
  980. return false;
  981. }
  982. #else
  983. static bool
  984. dp_tx_is_wds_ast_override_en(struct dp_soc *soc,
  985. struct cdp_tx_exception_metadata *tx_exc_metadata)
  986. {
  987. return false;
  988. }
  989. #endif
  990. /**
  991. * dp_tx_prepare_desc_single() - Allocate and prepare Tx descriptor
  992. * @vdev: DP vdev handle
  993. * @nbuf: skb
  994. * @desc_pool_id: Descriptor pool ID
  995. * @msdu_info: Metadata to the fw
  996. * @tx_exc_metadata: Handle that holds exception path metadata
  997. *
  998. * Allocate and prepare Tx descriptor with msdu information.
  999. *
  1000. * Return: Pointer to Tx Descriptor on success,
  1001. * NULL on failure
  1002. */
  1003. static
  1004. struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
  1005. qdf_nbuf_t nbuf, uint8_t desc_pool_id,
  1006. struct dp_tx_msdu_info_s *msdu_info,
  1007. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1008. {
  1009. uint8_t align_pad;
  1010. uint8_t is_exception = 0;
  1011. uint8_t htt_hdr_size;
  1012. struct dp_tx_desc_s *tx_desc;
  1013. struct dp_pdev *pdev = vdev->pdev;
  1014. struct dp_soc *soc = pdev->soc;
  1015. if (dp_tx_limit_check(vdev, nbuf))
  1016. return NULL;
  1017. /* Allocate software Tx descriptor */
  1018. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  1019. if (qdf_unlikely(!tx_desc)) {
  1020. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1021. DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
  1022. return NULL;
  1023. }
  1024. dp_tx_outstanding_inc(pdev);
  1025. /* Initialize the SW tx descriptor */
  1026. tx_desc->nbuf = nbuf;
  1027. tx_desc->frm_type = dp_tx_frm_std;
  1028. tx_desc->tx_encap_type = ((tx_exc_metadata &&
  1029. (tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
  1030. tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
  1031. tx_desc->vdev_id = vdev->vdev_id;
  1032. tx_desc->pdev = pdev;
  1033. tx_desc->msdu_ext_desc = NULL;
  1034. tx_desc->pkt_offset = 0;
  1035. tx_desc->length = qdf_nbuf_headlen(nbuf);
  1036. tx_desc->shinfo_addr = skb_end_pointer(nbuf);
  1037. dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
  1038. if (qdf_unlikely(vdev->multipass_en)) {
  1039. if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
  1040. goto failure;
  1041. }
  1042. /* Packets marked by upper layer (OS-IF) to be sent to FW */
  1043. if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
  1044. is_exception = 1;
  1045. /* for BE chipsets if wds extension was enbled will not mark FW
  1046. * in desc will mark ast index based search for ast index.
  1047. */
  1048. if (dp_tx_is_wds_ast_override_en(soc, tx_exc_metadata))
  1049. return tx_desc;
  1050. /*
  1051. * For special modes (vdev_type == ocb or mesh), data frames should be
  1052. * transmitted using varying transmit parameters (tx spec) which include
  1053. * transmit rate, power, priority, channel, channel bandwidth , nss etc.
  1054. * These are filled in HTT MSDU descriptor and sent in frame pre-header.
  1055. * These frames are sent as exception packets to firmware.
  1056. *
  1057. * HW requirement is that metadata should always point to a
  1058. * 8-byte aligned address. So we add alignment pad to start of buffer.
  1059. * HTT Metadata should be ensured to be multiple of 8-bytes,
  1060. * to get 8-byte aligned start address along with align_pad added
  1061. *
  1062. * |-----------------------------|
  1063. * | |
  1064. * |-----------------------------| <-----Buffer Pointer Address given
  1065. * | | ^ in HW descriptor (aligned)
  1066. * | HTT Metadata | |
  1067. * | | |
  1068. * | | | Packet Offset given in descriptor
  1069. * | | |
  1070. * |-----------------------------| |
  1071. * | Alignment Pad | v
  1072. * |-----------------------------| <----- Actual buffer start address
  1073. * | SKB Data | (Unaligned)
  1074. * | |
  1075. * | |
  1076. * | |
  1077. * | |
  1078. * | |
  1079. * |-----------------------------|
  1080. */
  1081. if (qdf_unlikely((msdu_info->exception_fw)) ||
  1082. (vdev->opmode == wlan_op_mode_ocb) ||
  1083. (tx_exc_metadata &&
  1084. tx_exc_metadata->is_tx_sniffer)) {
  1085. align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
  1086. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
  1087. DP_STATS_INC(vdev,
  1088. tx_i.dropped.headroom_insufficient, 1);
  1089. goto failure;
  1090. }
  1091. if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
  1092. dp_tx_err("qdf_nbuf_push_head failed");
  1093. goto failure;
  1094. }
  1095. htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
  1096. msdu_info);
  1097. if (htt_hdr_size == 0)
  1098. goto failure;
  1099. tx_desc->length = qdf_nbuf_headlen(nbuf);
  1100. tx_desc->pkt_offset = align_pad + htt_hdr_size;
  1101. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1102. dp_tx_traffic_end_indication_set_desc_flag(tx_desc,
  1103. msdu_info);
  1104. is_exception = 1;
  1105. tx_desc->length -= tx_desc->pkt_offset;
  1106. }
  1107. #if !TQM_BYPASS_WAR
  1108. if (is_exception || tx_exc_metadata)
  1109. #endif
  1110. {
  1111. /* Temporary WAR due to TQM VP issues */
  1112. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1113. qdf_atomic_inc(&soc->num_tx_exception);
  1114. }
  1115. return tx_desc;
  1116. failure:
  1117. dp_tx_desc_release(tx_desc, desc_pool_id);
  1118. return NULL;
  1119. }
  1120. /**
  1121. * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment
  1122. * frame
  1123. * @vdev: DP vdev handle
  1124. * @nbuf: skb
  1125. * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
  1126. * @desc_pool_id : Descriptor Pool ID
  1127. *
  1128. * Allocate and prepare Tx descriptor with msdu and fragment descritor
  1129. * information. For frames with fragments, allocate and prepare
  1130. * an MSDU extension descriptor
  1131. *
  1132. * Return: Pointer to Tx Descriptor on success,
  1133. * NULL on failure
  1134. */
  1135. static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
  1136. qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
  1137. uint8_t desc_pool_id)
  1138. {
  1139. struct dp_tx_desc_s *tx_desc;
  1140. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  1141. struct dp_pdev *pdev = vdev->pdev;
  1142. struct dp_soc *soc = pdev->soc;
  1143. if (dp_tx_limit_check(vdev, nbuf))
  1144. return NULL;
  1145. /* Allocate software Tx descriptor */
  1146. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  1147. if (!tx_desc) {
  1148. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1149. return NULL;
  1150. }
  1151. dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
  1152. nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
  1153. dp_tx_outstanding_inc(pdev);
  1154. /* Initialize the SW tx descriptor */
  1155. tx_desc->nbuf = nbuf;
  1156. tx_desc->frm_type = msdu_info->frm_type;
  1157. tx_desc->tx_encap_type = vdev->tx_encap_type;
  1158. tx_desc->vdev_id = vdev->vdev_id;
  1159. tx_desc->pdev = pdev;
  1160. tx_desc->pkt_offset = 0;
  1161. dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
  1162. /* Handle scattered frames - TSO/SG/ME */
  1163. /* Allocate and prepare an extension descriptor for scattered frames */
  1164. msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
  1165. if (!msdu_ext_desc) {
  1166. dp_tx_info("Tx Extension Descriptor Alloc Fail");
  1167. goto failure;
  1168. }
  1169. #if TQM_BYPASS_WAR
  1170. /* Temporary WAR due to TQM VP issues */
  1171. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1172. qdf_atomic_inc(&soc->num_tx_exception);
  1173. #endif
  1174. if (qdf_unlikely(msdu_info->exception_fw))
  1175. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1176. tx_desc->msdu_ext_desc = msdu_ext_desc;
  1177. tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
  1178. msdu_ext_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
  1179. msdu_ext_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
  1180. tx_desc->dma_addr = msdu_ext_desc->paddr;
  1181. if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
  1182. tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
  1183. else
  1184. tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
  1185. return tx_desc;
  1186. failure:
  1187. dp_tx_desc_release(tx_desc, desc_pool_id);
  1188. return NULL;
  1189. }
  1190. /**
  1191. * dp_tx_prepare_raw() - Prepare RAW packet TX
  1192. * @vdev: DP vdev handle
  1193. * @nbuf: buffer pointer
  1194. * @seg_info: Pointer to Segment info Descriptor to be prepared
  1195. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
  1196. * descriptor
  1197. *
  1198. * Return:
  1199. */
  1200. static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1201. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  1202. {
  1203. qdf_nbuf_t curr_nbuf = NULL;
  1204. uint16_t total_len = 0;
  1205. qdf_dma_addr_t paddr;
  1206. int32_t i;
  1207. int32_t mapped_buf_num = 0;
  1208. struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
  1209. qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  1210. DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
  1211. /* Continue only if frames are of DATA type */
  1212. if (!DP_FRAME_IS_DATA(qos_wh)) {
  1213. DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
  1214. dp_tx_debug("Pkt. recd is of not data type");
  1215. goto error;
  1216. }
  1217. /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
  1218. if (vdev->raw_mode_war &&
  1219. (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
  1220. (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
  1221. qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
  1222. for (curr_nbuf = nbuf, i = 0; curr_nbuf;
  1223. curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
  1224. /*
  1225. * Number of nbuf's must not exceed the size of the frags
  1226. * array in seg_info.
  1227. */
  1228. if (i >= DP_TX_MAX_NUM_FRAGS) {
  1229. dp_err_rl("nbuf cnt exceeds the max number of segs");
  1230. DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
  1231. goto error;
  1232. }
  1233. if (QDF_STATUS_SUCCESS !=
  1234. qdf_nbuf_map_nbytes_single(vdev->osdev,
  1235. curr_nbuf,
  1236. QDF_DMA_TO_DEVICE,
  1237. curr_nbuf->len)) {
  1238. dp_tx_err("%s dma map error ", __func__);
  1239. DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
  1240. goto error;
  1241. }
  1242. /* Update the count of mapped nbuf's */
  1243. mapped_buf_num++;
  1244. paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
  1245. seg_info->frags[i].paddr_lo = paddr;
  1246. seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
  1247. seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
  1248. seg_info->frags[i].vaddr = (void *) curr_nbuf;
  1249. total_len += qdf_nbuf_len(curr_nbuf);
  1250. }
  1251. seg_info->frag_cnt = i;
  1252. seg_info->total_len = total_len;
  1253. seg_info->next = NULL;
  1254. sg_info->curr_seg = seg_info;
  1255. msdu_info->frm_type = dp_tx_frm_raw;
  1256. msdu_info->num_seg = 1;
  1257. return nbuf;
  1258. error:
  1259. i = 0;
  1260. while (nbuf) {
  1261. curr_nbuf = nbuf;
  1262. if (i < mapped_buf_num) {
  1263. qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
  1264. QDF_DMA_TO_DEVICE,
  1265. curr_nbuf->len);
  1266. i++;
  1267. }
  1268. nbuf = qdf_nbuf_next(nbuf);
  1269. qdf_nbuf_free(curr_nbuf);
  1270. }
  1271. return NULL;
  1272. }
  1273. /**
  1274. * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
  1275. * @soc: DP soc handle
  1276. * @nbuf: Buffer pointer
  1277. *
  1278. * unmap the chain of nbufs that belong to this RAW frame.
  1279. *
  1280. * Return: None
  1281. */
  1282. static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
  1283. qdf_nbuf_t nbuf)
  1284. {
  1285. qdf_nbuf_t cur_nbuf = nbuf;
  1286. do {
  1287. qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
  1288. QDF_DMA_TO_DEVICE,
  1289. cur_nbuf->len);
  1290. cur_nbuf = qdf_nbuf_next(cur_nbuf);
  1291. } while (cur_nbuf);
  1292. }
  1293. #ifdef VDEV_PEER_PROTOCOL_COUNT
  1294. void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
  1295. qdf_nbuf_t nbuf)
  1296. {
  1297. qdf_nbuf_t nbuf_local;
  1298. struct dp_vdev *vdev_local = vdev_hdl;
  1299. do {
  1300. if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
  1301. break;
  1302. nbuf_local = nbuf;
  1303. if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
  1304. htt_cmn_pkt_type_raw))
  1305. break;
  1306. else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
  1307. break;
  1308. else if (qdf_nbuf_is_tso((nbuf_local)))
  1309. break;
  1310. dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
  1311. (nbuf_local),
  1312. NULL, 1, 0);
  1313. } while (0);
  1314. }
  1315. #endif
  1316. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  1317. void dp_tx_update_stats(struct dp_soc *soc,
  1318. struct dp_tx_desc_s *tx_desc,
  1319. uint8_t ring_id)
  1320. {
  1321. uint32_t stats_len = dp_tx_get_pkt_len(tx_desc);
  1322. DP_STATS_INC_PKT(soc, tx.egress[ring_id], 1, stats_len);
  1323. }
  1324. int
  1325. dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
  1326. struct dp_tx_desc_s *tx_desc,
  1327. uint8_t tid,
  1328. struct dp_tx_msdu_info_s *msdu_info,
  1329. uint8_t ring_id)
  1330. {
  1331. struct dp_swlm *swlm = &soc->swlm;
  1332. union swlm_data swlm_query_data;
  1333. struct dp_swlm_tcl_data tcl_data;
  1334. QDF_STATUS status;
  1335. int ret;
  1336. if (!swlm->is_enabled)
  1337. return msdu_info->skip_hp_update;
  1338. tcl_data.nbuf = tx_desc->nbuf;
  1339. tcl_data.tid = tid;
  1340. tcl_data.ring_id = ring_id;
  1341. tcl_data.pkt_len = dp_tx_get_pkt_len(tx_desc);
  1342. tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
  1343. swlm_query_data.tcl_data = &tcl_data;
  1344. status = dp_swlm_tcl_pre_check(soc, &tcl_data);
  1345. if (QDF_IS_STATUS_ERROR(status)) {
  1346. dp_swlm_tcl_reset_session_data(soc, ring_id);
  1347. DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
  1348. return 0;
  1349. }
  1350. ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
  1351. if (ret) {
  1352. DP_STATS_INC(swlm, tcl[ring_id].coalesce_success, 1);
  1353. } else {
  1354. DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
  1355. }
  1356. return ret;
  1357. }
  1358. void
  1359. dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
  1360. int coalesce)
  1361. {
  1362. if (coalesce)
  1363. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1364. else
  1365. dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
  1366. }
  1367. static inline void
  1368. dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
  1369. {
  1370. if (((i + 1) < msdu_info->num_seg))
  1371. msdu_info->skip_hp_update = 1;
  1372. else
  1373. msdu_info->skip_hp_update = 0;
  1374. }
  1375. static inline void
  1376. dp_flush_tcp_hp(struct dp_soc *soc, uint8_t ring_id)
  1377. {
  1378. hal_ring_handle_t hal_ring_hdl =
  1379. dp_tx_get_hal_ring_hdl(soc, ring_id);
  1380. if (dp_tx_hal_ring_access_start(soc, hal_ring_hdl)) {
  1381. dp_err("Fillmore: SRNG access start failed");
  1382. return;
  1383. }
  1384. dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
  1385. }
  1386. static inline void
  1387. dp_tx_check_and_flush_hp(struct dp_soc *soc,
  1388. QDF_STATUS status,
  1389. struct dp_tx_msdu_info_s *msdu_info)
  1390. {
  1391. if (QDF_IS_STATUS_ERROR(status) && !msdu_info->skip_hp_update) {
  1392. dp_flush_tcp_hp(soc,
  1393. (msdu_info->tx_queue.ring_id & DP_TX_QUEUE_MASK));
  1394. }
  1395. }
  1396. #else
  1397. static inline void
  1398. dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
  1399. {
  1400. }
  1401. static inline void
  1402. dp_tx_check_and_flush_hp(struct dp_soc *soc,
  1403. QDF_STATUS status,
  1404. struct dp_tx_msdu_info_s *msdu_info)
  1405. {
  1406. }
  1407. #endif
  1408. #ifdef FEATURE_RUNTIME_PM
  1409. void
  1410. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  1411. hal_ring_handle_t hal_ring_hdl,
  1412. int coalesce)
  1413. {
  1414. int ret;
  1415. /*
  1416. * Avoid runtime get and put APIs under high throughput scenarios.
  1417. */
  1418. if (dp_get_rtpm_tput_policy_requirement(soc)) {
  1419. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1420. return;
  1421. }
  1422. ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
  1423. if (QDF_IS_STATUS_SUCCESS(ret)) {
  1424. if (hif_system_pm_state_check(soc->hif_handle)) {
  1425. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1426. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1427. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1428. } else {
  1429. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1430. }
  1431. hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
  1432. } else {
  1433. dp_runtime_get(soc);
  1434. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1435. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1436. qdf_atomic_inc(&soc->tx_pending_rtpm);
  1437. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1438. dp_runtime_put(soc);
  1439. }
  1440. }
  1441. #else
  1442. #ifdef DP_POWER_SAVE
  1443. void
  1444. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  1445. hal_ring_handle_t hal_ring_hdl,
  1446. int coalesce)
  1447. {
  1448. if (hif_system_pm_state_check(soc->hif_handle)) {
  1449. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1450. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1451. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1452. } else {
  1453. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1454. }
  1455. }
  1456. #endif
  1457. #endif
  1458. /**
  1459. * dp_tx_get_tid() - Obtain TID to be used for this frame
  1460. * @vdev: DP vdev handle
  1461. * @nbuf: skb
  1462. * @msdu_info: msdu descriptor
  1463. *
  1464. * Extract the DSCP or PCP information from frame and map into TID value.
  1465. *
  1466. * Return: void
  1467. */
  1468. static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1469. struct dp_tx_msdu_info_s *msdu_info)
  1470. {
  1471. uint8_t tos = 0, dscp_tid_override = 0;
  1472. uint8_t *hdr_ptr, *L3datap;
  1473. uint8_t is_mcast = 0;
  1474. qdf_ether_header_t *eh = NULL;
  1475. qdf_ethervlan_header_t *evh = NULL;
  1476. uint16_t ether_type;
  1477. qdf_llc_t *llcHdr;
  1478. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  1479. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1480. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1481. eh = (qdf_ether_header_t *)nbuf->data;
  1482. hdr_ptr = (uint8_t *)(eh->ether_dhost);
  1483. L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
  1484. } else {
  1485. qdf_dot3_qosframe_t *qos_wh =
  1486. (qdf_dot3_qosframe_t *) nbuf->data;
  1487. msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
  1488. qos_wh->i_qos[0] & DP_QOS_TID : 0;
  1489. return;
  1490. }
  1491. is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
  1492. ether_type = eh->ether_type;
  1493. llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
  1494. /*
  1495. * Check if packet is dot3 or eth2 type.
  1496. */
  1497. if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
  1498. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1499. sizeof(*llcHdr));
  1500. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1501. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
  1502. sizeof(*llcHdr);
  1503. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
  1504. + sizeof(*llcHdr) +
  1505. sizeof(qdf_net_vlanhdr_t));
  1506. } else {
  1507. L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
  1508. sizeof(*llcHdr);
  1509. }
  1510. } else {
  1511. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1512. evh = (qdf_ethervlan_header_t *) eh;
  1513. ether_type = evh->ether_type;
  1514. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
  1515. }
  1516. }
  1517. /*
  1518. * Find priority from IP TOS DSCP field
  1519. */
  1520. if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
  1521. qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
  1522. if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
  1523. /* Only for unicast frames */
  1524. if (!is_mcast) {
  1525. /* send it on VO queue */
  1526. msdu_info->tid = DP_VO_TID;
  1527. }
  1528. } else {
  1529. /*
  1530. * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
  1531. * from TOS byte.
  1532. */
  1533. tos = ip->ip_tos;
  1534. dscp_tid_override = 1;
  1535. }
  1536. } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
  1537. /* TODO
  1538. * use flowlabel
  1539. *igmpmld cases to be handled in phase 2
  1540. */
  1541. unsigned long ver_pri_flowlabel;
  1542. unsigned long pri;
  1543. ver_pri_flowlabel = *(unsigned long *) L3datap;
  1544. pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
  1545. DP_IPV6_PRIORITY_SHIFT;
  1546. tos = pri;
  1547. dscp_tid_override = 1;
  1548. } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
  1549. msdu_info->tid = DP_VO_TID;
  1550. else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
  1551. /* Only for unicast frames */
  1552. if (!is_mcast) {
  1553. /* send ucast arp on VO queue */
  1554. msdu_info->tid = DP_VO_TID;
  1555. }
  1556. }
  1557. /*
  1558. * Assign all MCAST packets to BE
  1559. */
  1560. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1561. if (is_mcast) {
  1562. tos = 0;
  1563. dscp_tid_override = 1;
  1564. }
  1565. }
  1566. if (dscp_tid_override == 1) {
  1567. tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  1568. msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
  1569. }
  1570. if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
  1571. msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
  1572. return;
  1573. }
  1574. /**
  1575. * dp_tx_classify_tid() - Obtain TID to be used for this frame
  1576. * @vdev: DP vdev handle
  1577. * @nbuf: skb
  1578. * @msdu_info: msdu descriptor
  1579. *
  1580. * Software based TID classification is required when more than 2 DSCP-TID
  1581. * mapping tables are needed.
  1582. * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
  1583. *
  1584. * Return: void
  1585. */
  1586. static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1587. struct dp_tx_msdu_info_s *msdu_info)
  1588. {
  1589. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1590. /*
  1591. * skip_sw_tid_classification flag will set in below cases-
  1592. * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
  1593. * 2. hlos_tid_override enabled for vdev
  1594. * 3. mesh mode enabled for vdev
  1595. */
  1596. if (qdf_likely(vdev->skip_sw_tid_classification)) {
  1597. /* Update tid in msdu_info from skb priority */
  1598. if (qdf_unlikely(vdev->skip_sw_tid_classification
  1599. & DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
  1600. uint32_t tid = qdf_nbuf_get_priority(nbuf);
  1601. if (tid == DP_TX_INVALID_QOS_TAG)
  1602. return;
  1603. msdu_info->tid = tid;
  1604. return;
  1605. }
  1606. return;
  1607. }
  1608. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1609. }
  1610. #ifdef FEATURE_WLAN_TDLS
  1611. /**
  1612. * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
  1613. * @soc: datapath SOC
  1614. * @vdev: datapath vdev
  1615. * @tx_desc: TX descriptor
  1616. *
  1617. * Return: None
  1618. */
  1619. static void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1620. struct dp_vdev *vdev,
  1621. struct dp_tx_desc_s *tx_desc)
  1622. {
  1623. if (vdev) {
  1624. if (vdev->is_tdls_frame) {
  1625. tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
  1626. vdev->is_tdls_frame = false;
  1627. }
  1628. }
  1629. }
  1630. static uint8_t dp_htt_tx_comp_get_status(struct dp_soc *soc, char *htt_desc)
  1631. {
  1632. uint8_t tx_status = HTT_TX_FW2WBM_TX_STATUS_MAX;
  1633. switch (soc->arch_id) {
  1634. case CDP_ARCH_TYPE_LI:
  1635. tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
  1636. break;
  1637. case CDP_ARCH_TYPE_BE:
  1638. tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
  1639. break;
  1640. case CDP_ARCH_TYPE_RH:
  1641. {
  1642. uint32_t *msg_word = (uint32_t *)htt_desc;
  1643. tx_status = HTT_TX_MSDU_INFO_RELEASE_REASON_GET(
  1644. *(msg_word + 3));
  1645. }
  1646. break;
  1647. default:
  1648. dp_err("Incorrect CDP_ARCH %d", soc->arch_id);
  1649. QDF_BUG(0);
  1650. }
  1651. return tx_status;
  1652. }
  1653. /**
  1654. * dp_non_std_htt_tx_comp_free_buff() - Free the non std tx packet buffer
  1655. * @soc: dp_soc handle
  1656. * @tx_desc: TX descriptor
  1657. *
  1658. * Return: None
  1659. */
  1660. static void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
  1661. struct dp_tx_desc_s *tx_desc)
  1662. {
  1663. uint8_t tx_status = 0;
  1664. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  1665. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1666. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
  1667. DP_MOD_ID_TDLS);
  1668. if (qdf_unlikely(!vdev)) {
  1669. dp_err_rl("vdev is null!");
  1670. goto error;
  1671. }
  1672. hal_tx_comp_get_htt_desc(&tx_desc->comp, htt_tx_status);
  1673. tx_status = dp_htt_tx_comp_get_status(soc, htt_tx_status);
  1674. dp_debug("vdev_id: %d tx_status: %d", tx_desc->vdev_id, tx_status);
  1675. if (vdev->tx_non_std_data_callback.func) {
  1676. qdf_nbuf_set_next(nbuf, NULL);
  1677. vdev->tx_non_std_data_callback.func(
  1678. vdev->tx_non_std_data_callback.ctxt,
  1679. nbuf, tx_status);
  1680. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1681. return;
  1682. } else {
  1683. dp_err_rl("callback func is null");
  1684. }
  1685. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1686. error:
  1687. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  1688. qdf_nbuf_free(nbuf);
  1689. }
  1690. /**
  1691. * dp_tx_msdu_single_map() - do nbuf map
  1692. * @vdev: DP vdev handle
  1693. * @tx_desc: DP TX descriptor pointer
  1694. * @nbuf: skb pointer
  1695. *
  1696. * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
  1697. * operation done in other component.
  1698. *
  1699. * Return: QDF_STATUS
  1700. */
  1701. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1702. struct dp_tx_desc_s *tx_desc,
  1703. qdf_nbuf_t nbuf)
  1704. {
  1705. if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
  1706. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1707. nbuf,
  1708. QDF_DMA_TO_DEVICE,
  1709. nbuf->len);
  1710. else
  1711. return qdf_nbuf_map_single(vdev->osdev, nbuf,
  1712. QDF_DMA_TO_DEVICE);
  1713. }
  1714. #else
  1715. static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1716. struct dp_vdev *vdev,
  1717. struct dp_tx_desc_s *tx_desc)
  1718. {
  1719. }
  1720. static inline void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
  1721. struct dp_tx_desc_s *tx_desc)
  1722. {
  1723. }
  1724. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1725. struct dp_tx_desc_s *tx_desc,
  1726. qdf_nbuf_t nbuf)
  1727. {
  1728. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1729. nbuf,
  1730. QDF_DMA_TO_DEVICE,
  1731. nbuf->len);
  1732. }
  1733. #endif
  1734. static inline
  1735. qdf_dma_addr_t dp_tx_nbuf_map_regular(struct dp_vdev *vdev,
  1736. struct dp_tx_desc_s *tx_desc,
  1737. qdf_nbuf_t nbuf)
  1738. {
  1739. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  1740. ret = dp_tx_msdu_single_map(vdev, tx_desc, nbuf);
  1741. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
  1742. return 0;
  1743. return qdf_nbuf_mapped_paddr_get(nbuf);
  1744. }
  1745. static inline
  1746. void dp_tx_nbuf_unmap_regular(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1747. {
  1748. qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev,
  1749. desc->nbuf,
  1750. desc->dma_addr,
  1751. QDF_DMA_TO_DEVICE,
  1752. desc->length);
  1753. }
  1754. #ifdef QCA_DP_TX_RMNET_OPTIMIZATION
  1755. static inline bool
  1756. is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
  1757. {
  1758. struct net_device *ingress_dev;
  1759. skb_frag_t *frag;
  1760. uint16_t buf_len = 0;
  1761. uint16_t linear_data_len = 0;
  1762. uint8_t *payload_addr = NULL;
  1763. if (!nbuf->dev)
  1764. return false;
  1765. ingress_dev = dev_get_by_index(dev_net(nbuf->dev), nbuf->skb_iif);
  1766. if (!ingress_dev)
  1767. return false;
  1768. if ((ingress_dev->priv_flags & IFF_PHONY_HEADROOM)) {
  1769. dev_put(ingress_dev);
  1770. frag = &(skb_shinfo(nbuf)->frags[0]);
  1771. buf_len = skb_frag_size(frag);
  1772. payload_addr = (uint8_t *)skb_frag_address(frag);
  1773. linear_data_len = skb_headlen(nbuf);
  1774. buf_len += linear_data_len;
  1775. payload_addr = payload_addr - linear_data_len;
  1776. memcpy(payload_addr, nbuf->data, linear_data_len);
  1777. msdu_info->frm_type = dp_tx_frm_rmnet;
  1778. msdu_info->buf_len = buf_len;
  1779. msdu_info->payload_addr = payload_addr;
  1780. return true;
  1781. }
  1782. dev_put(ingress_dev);
  1783. return false;
  1784. }
  1785. static inline
  1786. qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info,
  1787. struct dp_tx_desc_s *tx_desc)
  1788. {
  1789. qdf_dma_addr_t paddr;
  1790. paddr = (qdf_dma_addr_t)qdf_mem_virt_to_phys(msdu_info->payload_addr);
  1791. tx_desc->length = msdu_info->buf_len;
  1792. qdf_nbuf_dma_clean_range((void *)msdu_info->payload_addr,
  1793. (void *)(msdu_info->payload_addr +
  1794. msdu_info->buf_len));
  1795. tx_desc->flags |= DP_TX_DESC_FLAG_RMNET;
  1796. return paddr;
  1797. }
  1798. #else
  1799. static inline bool
  1800. is_nbuf_frm_rmnet(qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
  1801. {
  1802. return false;
  1803. }
  1804. static inline
  1805. qdf_dma_addr_t dp_tx_rmnet_nbuf_map(struct dp_tx_msdu_info_s *msdu_info,
  1806. struct dp_tx_desc_s *tx_desc)
  1807. {
  1808. return 0;
  1809. }
  1810. #endif
  1811. #if defined(QCA_DP_TX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
  1812. static inline
  1813. qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
  1814. struct dp_tx_desc_s *tx_desc,
  1815. qdf_nbuf_t nbuf)
  1816. {
  1817. if (qdf_likely(tx_desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
  1818. qdf_nbuf_dma_clean_range((void *)nbuf->data,
  1819. (void *)(nbuf->data + nbuf->len));
  1820. return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
  1821. } else {
  1822. return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
  1823. }
  1824. }
  1825. static inline
  1826. void dp_tx_nbuf_unmap(struct dp_soc *soc,
  1827. struct dp_tx_desc_s *desc)
  1828. {
  1829. if (qdf_unlikely(!(desc->flags &
  1830. (DP_TX_DESC_FLAG_SIMPLE | DP_TX_DESC_FLAG_RMNET))))
  1831. return dp_tx_nbuf_unmap_regular(soc, desc);
  1832. }
  1833. #else
  1834. static inline
  1835. qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
  1836. struct dp_tx_desc_s *tx_desc,
  1837. qdf_nbuf_t nbuf)
  1838. {
  1839. return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
  1840. }
  1841. static inline
  1842. void dp_tx_nbuf_unmap(struct dp_soc *soc,
  1843. struct dp_tx_desc_s *desc)
  1844. {
  1845. return dp_tx_nbuf_unmap_regular(soc, desc);
  1846. }
  1847. #endif
  1848. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
  1849. static inline
  1850. void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1851. {
  1852. dp_tx_nbuf_unmap(soc, desc);
  1853. desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
  1854. }
  1855. static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1856. {
  1857. if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
  1858. dp_tx_nbuf_unmap(soc, desc);
  1859. }
  1860. #else
  1861. static inline
  1862. void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1863. {
  1864. }
  1865. static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1866. {
  1867. dp_tx_nbuf_unmap(soc, desc);
  1868. }
  1869. #endif
  1870. #ifdef MESH_MODE_SUPPORT
  1871. /**
  1872. * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
  1873. * @soc: datapath SOC
  1874. * @vdev: datapath vdev
  1875. * @tx_desc: TX descriptor
  1876. *
  1877. * Return: None
  1878. */
  1879. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1880. struct dp_vdev *vdev,
  1881. struct dp_tx_desc_s *tx_desc)
  1882. {
  1883. if (qdf_unlikely(vdev->mesh_vdev))
  1884. tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
  1885. }
  1886. /**
  1887. * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
  1888. * @soc: dp_soc handle
  1889. * @tx_desc: TX descriptor
  1890. * @delayed_free: delay the nbuf free
  1891. *
  1892. * Return: nbuf to be freed late
  1893. */
  1894. static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1895. struct dp_tx_desc_s *tx_desc,
  1896. bool delayed_free)
  1897. {
  1898. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1899. struct dp_vdev *vdev = NULL;
  1900. vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, DP_MOD_ID_MESH);
  1901. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
  1902. if (vdev)
  1903. DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
  1904. if (delayed_free)
  1905. return nbuf;
  1906. qdf_nbuf_free(nbuf);
  1907. } else {
  1908. if (vdev && vdev->osif_tx_free_ext) {
  1909. vdev->osif_tx_free_ext((nbuf));
  1910. } else {
  1911. if (delayed_free)
  1912. return nbuf;
  1913. qdf_nbuf_free(nbuf);
  1914. }
  1915. }
  1916. if (vdev)
  1917. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  1918. return NULL;
  1919. }
  1920. #else
  1921. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1922. struct dp_vdev *vdev,
  1923. struct dp_tx_desc_s *tx_desc)
  1924. {
  1925. }
  1926. static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1927. struct dp_tx_desc_s *tx_desc,
  1928. bool delayed_free)
  1929. {
  1930. return NULL;
  1931. }
  1932. #endif
  1933. int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
  1934. {
  1935. struct dp_pdev *pdev = NULL;
  1936. struct dp_ast_entry *src_ast_entry = NULL;
  1937. struct dp_ast_entry *dst_ast_entry = NULL;
  1938. struct dp_soc *soc = NULL;
  1939. qdf_assert(vdev);
  1940. pdev = vdev->pdev;
  1941. qdf_assert(pdev);
  1942. soc = pdev->soc;
  1943. dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1944. (soc, dstmac, vdev->pdev->pdev_id);
  1945. src_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1946. (soc, srcmac, vdev->pdev->pdev_id);
  1947. if (dst_ast_entry && src_ast_entry) {
  1948. if (dst_ast_entry->peer_id ==
  1949. src_ast_entry->peer_id)
  1950. return 1;
  1951. }
  1952. return 0;
  1953. }
  1954. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  1955. defined(WLAN_MCAST_MLO)
  1956. /* MLO peer id for reinject*/
  1957. #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
  1958. /* MLO vdev id inc offset */
  1959. #define DP_MLO_VDEV_ID_OFFSET 0x80
  1960. #ifdef QCA_SUPPORT_WDS_EXTENDED
  1961. static inline bool
  1962. dp_tx_wds_ext_check(struct cdp_tx_exception_metadata *tx_exc_metadata)
  1963. {
  1964. if (tx_exc_metadata && tx_exc_metadata->is_wds_extended)
  1965. return true;
  1966. return false;
  1967. }
  1968. #else
  1969. static inline bool
  1970. dp_tx_wds_ext_check(struct cdp_tx_exception_metadata *tx_exc_metadata)
  1971. {
  1972. return false;
  1973. }
  1974. #endif
  1975. static inline void
  1976. dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  1977. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1978. {
  1979. /* wds ext enabled will not set the TO_FW bit */
  1980. if (dp_tx_wds_ext_check(tx_exc_metadata))
  1981. return;
  1982. if (!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) {
  1983. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1984. qdf_atomic_inc(&soc->num_tx_exception);
  1985. }
  1986. }
  1987. static inline void
  1988. dp_tx_update_mcast_param(uint16_t peer_id,
  1989. uint16_t *htt_tcl_metadata,
  1990. struct dp_vdev *vdev,
  1991. struct dp_tx_msdu_info_s *msdu_info)
  1992. {
  1993. if (peer_id == DP_MLO_MCAST_REINJECT_PEER_ID) {
  1994. *htt_tcl_metadata = 0;
  1995. DP_TX_TCL_METADATA_TYPE_SET(
  1996. *htt_tcl_metadata,
  1997. HTT_TCL_METADATA_V2_TYPE_GLOBAL_SEQ_BASED);
  1998. HTT_TX_TCL_METADATA_GLBL_SEQ_NO_SET(*htt_tcl_metadata,
  1999. msdu_info->gsn);
  2000. msdu_info->vdev_id = vdev->vdev_id + DP_MLO_VDEV_ID_OFFSET;
  2001. if (qdf_unlikely(vdev->nawds_enabled ||
  2002. dp_vdev_is_wds_ext_enabled(vdev)))
  2003. HTT_TX_TCL_METADATA_GLBL_SEQ_HOST_INSPECTED_SET(
  2004. *htt_tcl_metadata, 1);
  2005. } else {
  2006. msdu_info->vdev_id = vdev->vdev_id;
  2007. }
  2008. }
  2009. #else
  2010. static inline void
  2011. dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  2012. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2013. {
  2014. }
  2015. static inline void
  2016. dp_tx_update_mcast_param(uint16_t peer_id,
  2017. uint16_t *htt_tcl_metadata,
  2018. struct dp_vdev *vdev,
  2019. struct dp_tx_msdu_info_s *msdu_info)
  2020. {
  2021. }
  2022. #endif
  2023. #ifdef DP_TX_SW_DROP_STATS_INC
  2024. static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
  2025. qdf_nbuf_t nbuf,
  2026. enum cdp_tx_sw_drop drop_code)
  2027. {
  2028. /* EAPOL Drop stats */
  2029. if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) {
  2030. switch (drop_code) {
  2031. case TX_DESC_ERR:
  2032. DP_STATS_INC(pdev, eap_drop_stats.tx_desc_err, 1);
  2033. break;
  2034. case TX_HAL_RING_ACCESS_ERR:
  2035. DP_STATS_INC(pdev,
  2036. eap_drop_stats.tx_hal_ring_access_err, 1);
  2037. break;
  2038. case TX_DMA_MAP_ERR:
  2039. DP_STATS_INC(pdev, eap_drop_stats.tx_dma_map_err, 1);
  2040. break;
  2041. case TX_HW_ENQUEUE:
  2042. DP_STATS_INC(pdev, eap_drop_stats.tx_hw_enqueue, 1);
  2043. break;
  2044. case TX_SW_ENQUEUE:
  2045. DP_STATS_INC(pdev, eap_drop_stats.tx_sw_enqueue, 1);
  2046. break;
  2047. default:
  2048. dp_info_rl("Invalid eapol_drop code: %d", drop_code);
  2049. break;
  2050. }
  2051. }
  2052. }
  2053. #else
  2054. static void tx_sw_drop_stats_inc(struct dp_pdev *pdev,
  2055. qdf_nbuf_t nbuf,
  2056. enum cdp_tx_sw_drop drop_code)
  2057. {
  2058. }
  2059. #endif
  2060. qdf_nbuf_t
  2061. dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2062. struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
  2063. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2064. {
  2065. struct dp_pdev *pdev = vdev->pdev;
  2066. struct dp_soc *soc = pdev->soc;
  2067. struct dp_tx_desc_s *tx_desc;
  2068. QDF_STATUS status;
  2069. struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
  2070. uint16_t htt_tcl_metadata = 0;
  2071. enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
  2072. uint8_t tid = msdu_info->tid;
  2073. struct cdp_tid_tx_stats *tid_stats = NULL;
  2074. qdf_dma_addr_t paddr;
  2075. /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
  2076. tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
  2077. msdu_info, tx_exc_metadata);
  2078. if (!tx_desc) {
  2079. dp_err_rl("Tx_desc prepare Fail vdev_id %d vdev %pK queue %d",
  2080. vdev->vdev_id, vdev, tx_q->desc_pool_id);
  2081. drop_code = TX_DESC_ERR;
  2082. goto fail_return;
  2083. }
  2084. dp_tx_update_tdls_flags(soc, vdev, tx_desc);
  2085. if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
  2086. htt_tcl_metadata = vdev->htt_tcl_metadata;
  2087. DP_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
  2088. } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
  2089. DP_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
  2090. DP_TCL_METADATA_TYPE_PEER_BASED);
  2091. DP_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
  2092. peer_id);
  2093. dp_tx_bypass_reinjection(soc, tx_desc, tx_exc_metadata);
  2094. } else
  2095. htt_tcl_metadata = vdev->htt_tcl_metadata;
  2096. if (msdu_info->exception_fw)
  2097. DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  2098. dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
  2099. !pdev->enhanced_stats_en);
  2100. dp_tx_update_mesh_flags(soc, vdev, tx_desc);
  2101. if (qdf_unlikely(msdu_info->frm_type == dp_tx_frm_rmnet))
  2102. paddr = dp_tx_rmnet_nbuf_map(msdu_info, tx_desc);
  2103. else
  2104. paddr = dp_tx_nbuf_map(vdev, tx_desc, nbuf);
  2105. if (!paddr) {
  2106. /* Handle failure */
  2107. dp_err("qdf_nbuf_map failed");
  2108. DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
  2109. drop_code = TX_DMA_MAP_ERR;
  2110. goto release_desc;
  2111. }
  2112. tx_desc->dma_addr = paddr;
  2113. dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
  2114. tx_desc->id, DP_TX_DESC_MAP);
  2115. dp_tx_update_mcast_param(peer_id, &htt_tcl_metadata, vdev, msdu_info);
  2116. /* Enqueue the Tx MSDU descriptor to HW for transmit */
  2117. status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
  2118. htt_tcl_metadata,
  2119. tx_exc_metadata, msdu_info);
  2120. if (status != QDF_STATUS_SUCCESS) {
  2121. dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
  2122. tx_desc, tx_q->ring_id);
  2123. dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
  2124. tx_desc->id, DP_TX_DESC_UNMAP);
  2125. dp_tx_nbuf_unmap(soc, tx_desc);
  2126. drop_code = TX_HW_ENQUEUE;
  2127. goto release_desc;
  2128. }
  2129. tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
  2130. return NULL;
  2131. release_desc:
  2132. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2133. fail_return:
  2134. dp_tx_get_tid(vdev, nbuf, msdu_info);
  2135. tx_sw_drop_stats_inc(pdev, nbuf, drop_code);
  2136. tid_stats = &pdev->stats.tid_stats.
  2137. tid_tx_stats[tx_q->ring_id][tid];
  2138. tid_stats->swdrop_cnt[drop_code]++;
  2139. return nbuf;
  2140. }
  2141. /**
  2142. * dp_tdls_tx_comp_free_buff() - Free non std buffer when TDLS flag is set
  2143. * @soc: Soc handle
  2144. * @desc: software Tx descriptor to be processed
  2145. *
  2146. * Return: 0 if Success
  2147. */
  2148. #ifdef FEATURE_WLAN_TDLS
  2149. static inline int
  2150. dp_tdls_tx_comp_free_buff(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  2151. {
  2152. /* If it is TDLS mgmt, don't unmap or free the frame */
  2153. if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) {
  2154. dp_non_std_htt_tx_comp_free_buff(soc, desc);
  2155. return 0;
  2156. }
  2157. return 1;
  2158. }
  2159. #else
  2160. static inline int
  2161. dp_tdls_tx_comp_free_buff(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  2162. {
  2163. return 1;
  2164. }
  2165. #endif
  2166. qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
  2167. bool delayed_free)
  2168. {
  2169. qdf_nbuf_t nbuf = desc->nbuf;
  2170. enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
  2171. /* nbuf already freed in vdev detach path */
  2172. if (!nbuf)
  2173. return NULL;
  2174. if (!dp_tdls_tx_comp_free_buff(soc, desc))
  2175. return NULL;
  2176. /* 0 : MSDU buffer, 1 : MLE */
  2177. if (desc->msdu_ext_desc) {
  2178. /* TSO free */
  2179. if (hal_tx_ext_desc_get_tso_enable(
  2180. desc->msdu_ext_desc->vaddr)) {
  2181. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
  2182. desc->id, DP_TX_COMP_MSDU_EXT);
  2183. dp_tx_tso_seg_history_add(soc,
  2184. desc->msdu_ext_desc->tso_desc,
  2185. desc->nbuf, desc->id, type);
  2186. /* unmap eash TSO seg before free the nbuf */
  2187. dp_tx_tso_unmap_segment(soc,
  2188. desc->msdu_ext_desc->tso_desc,
  2189. desc->msdu_ext_desc->
  2190. tso_num_desc);
  2191. goto nbuf_free;
  2192. }
  2193. if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) {
  2194. void *msdu_ext_desc = desc->msdu_ext_desc->vaddr;
  2195. qdf_dma_addr_t iova;
  2196. uint32_t frag_len;
  2197. uint32_t i;
  2198. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  2199. QDF_DMA_TO_DEVICE,
  2200. qdf_nbuf_headlen(nbuf));
  2201. for (i = 1; i < DP_TX_MAX_NUM_FRAGS; i++) {
  2202. hal_tx_ext_desc_get_frag_info(msdu_ext_desc, i,
  2203. &iova,
  2204. &frag_len);
  2205. if (!iova || !frag_len)
  2206. break;
  2207. qdf_mem_unmap_page(soc->osdev, iova, frag_len,
  2208. QDF_DMA_TO_DEVICE);
  2209. }
  2210. goto nbuf_free;
  2211. }
  2212. }
  2213. /* If it's ME frame, dont unmap the cloned nbuf's */
  2214. if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
  2215. goto nbuf_free;
  2216. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
  2217. dp_tx_unmap(soc, desc);
  2218. if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
  2219. return dp_mesh_tx_comp_free_buff(soc, desc, delayed_free);
  2220. if (dp_tx_traffic_end_indication_enq_ind_pkt(soc, desc, nbuf))
  2221. return NULL;
  2222. nbuf_free:
  2223. if (delayed_free)
  2224. return nbuf;
  2225. qdf_nbuf_free(nbuf);
  2226. return NULL;
  2227. }
  2228. /**
  2229. * dp_tx_sg_unmap_buf() - Unmap scatter gather fragments
  2230. * @soc: DP soc handle
  2231. * @nbuf: skb
  2232. * @msdu_info: MSDU info
  2233. *
  2234. * Return: None
  2235. */
  2236. static inline void
  2237. dp_tx_sg_unmap_buf(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2238. struct dp_tx_msdu_info_s *msdu_info)
  2239. {
  2240. uint32_t cur_idx;
  2241. struct dp_tx_seg_info_s *seg = msdu_info->u.sg_info.curr_seg;
  2242. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE,
  2243. qdf_nbuf_headlen(nbuf));
  2244. for (cur_idx = 1; cur_idx < seg->frag_cnt; cur_idx++)
  2245. qdf_mem_unmap_page(soc->osdev, (qdf_dma_addr_t)
  2246. (seg->frags[cur_idx].paddr_lo | ((uint64_t)
  2247. seg->frags[cur_idx].paddr_hi) << 32),
  2248. seg->frags[cur_idx].len,
  2249. QDF_DMA_TO_DEVICE);
  2250. }
  2251. #if QDF_LOCK_STATS
  2252. noinline
  2253. #else
  2254. #endif
  2255. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2256. struct dp_tx_msdu_info_s *msdu_info)
  2257. {
  2258. uint32_t i;
  2259. struct dp_pdev *pdev = vdev->pdev;
  2260. struct dp_soc *soc = pdev->soc;
  2261. struct dp_tx_desc_s *tx_desc;
  2262. bool is_cce_classified = false;
  2263. QDF_STATUS status;
  2264. uint16_t htt_tcl_metadata = 0;
  2265. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  2266. struct cdp_tid_tx_stats *tid_stats = NULL;
  2267. uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
  2268. if (msdu_info->frm_type == dp_tx_frm_me)
  2269. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  2270. i = 0;
  2271. /* Print statement to track i and num_seg */
  2272. /*
  2273. * For each segment (maps to 1 MSDU) , prepare software and hardware
  2274. * descriptors using information in msdu_info
  2275. */
  2276. while (i < msdu_info->num_seg) {
  2277. /*
  2278. * Setup Tx descriptor for an MSDU, and MSDU extension
  2279. * descriptor
  2280. */
  2281. tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
  2282. tx_q->desc_pool_id);
  2283. if (!tx_desc) {
  2284. if (msdu_info->frm_type == dp_tx_frm_me) {
  2285. prep_desc_fail++;
  2286. dp_tx_me_free_buf(pdev,
  2287. (void *)(msdu_info->u.sg_info
  2288. .curr_seg->frags[0].vaddr));
  2289. if (prep_desc_fail == msdu_info->num_seg) {
  2290. /*
  2291. * Unmap is needed only if descriptor
  2292. * preparation failed for all segments.
  2293. */
  2294. qdf_nbuf_unmap(soc->osdev,
  2295. msdu_info->u.sg_info.
  2296. curr_seg->nbuf,
  2297. QDF_DMA_TO_DEVICE);
  2298. }
  2299. /*
  2300. * Free the nbuf for the current segment
  2301. * and make it point to the next in the list.
  2302. * For me, there are as many segments as there
  2303. * are no of clients.
  2304. */
  2305. qdf_nbuf_free(msdu_info->u.sg_info
  2306. .curr_seg->nbuf);
  2307. if (msdu_info->u.sg_info.curr_seg->next) {
  2308. msdu_info->u.sg_info.curr_seg =
  2309. msdu_info->u.sg_info
  2310. .curr_seg->next;
  2311. nbuf = msdu_info->u.sg_info
  2312. .curr_seg->nbuf;
  2313. }
  2314. i++;
  2315. continue;
  2316. }
  2317. if (msdu_info->frm_type == dp_tx_frm_tso) {
  2318. dp_tx_tso_seg_history_add(
  2319. soc,
  2320. msdu_info->u.tso_info.curr_seg,
  2321. nbuf, 0, DP_TX_DESC_UNMAP);
  2322. dp_tx_tso_unmap_segment(soc,
  2323. msdu_info->u.tso_info.
  2324. curr_seg,
  2325. msdu_info->u.tso_info.
  2326. tso_num_seg_list);
  2327. if (msdu_info->u.tso_info.curr_seg->next) {
  2328. msdu_info->u.tso_info.curr_seg =
  2329. msdu_info->u.tso_info.curr_seg->next;
  2330. i++;
  2331. continue;
  2332. }
  2333. }
  2334. if (msdu_info->frm_type == dp_tx_frm_sg)
  2335. dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
  2336. goto done;
  2337. }
  2338. if (msdu_info->frm_type == dp_tx_frm_me) {
  2339. tx_desc->msdu_ext_desc->me_buffer =
  2340. (struct dp_tx_me_buf_t *)msdu_info->
  2341. u.sg_info.curr_seg->frags[0].vaddr;
  2342. tx_desc->flags |= DP_TX_DESC_FLAG_ME;
  2343. }
  2344. if (is_cce_classified)
  2345. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  2346. htt_tcl_metadata = vdev->htt_tcl_metadata;
  2347. if (msdu_info->exception_fw) {
  2348. DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  2349. }
  2350. dp_tx_is_hp_update_required(i, msdu_info);
  2351. /*
  2352. * For frames with multiple segments (TSO, ME), jump to next
  2353. * segment.
  2354. */
  2355. if (msdu_info->frm_type == dp_tx_frm_tso) {
  2356. if (msdu_info->u.tso_info.curr_seg->next) {
  2357. msdu_info->u.tso_info.curr_seg =
  2358. msdu_info->u.tso_info.curr_seg->next;
  2359. /*
  2360. * If this is a jumbo nbuf, then increment the
  2361. * number of nbuf users for each additional
  2362. * segment of the msdu. This will ensure that
  2363. * the skb is freed only after receiving tx
  2364. * completion for all segments of an nbuf
  2365. */
  2366. qdf_nbuf_inc_users(nbuf);
  2367. /* Check with MCL if this is needed */
  2368. /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
  2369. */
  2370. }
  2371. }
  2372. dp_tx_update_mcast_param(DP_INVALID_PEER,
  2373. &htt_tcl_metadata,
  2374. vdev,
  2375. msdu_info);
  2376. /*
  2377. * Enqueue the Tx MSDU descriptor to HW for transmit
  2378. */
  2379. status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
  2380. htt_tcl_metadata,
  2381. NULL, msdu_info);
  2382. dp_tx_check_and_flush_hp(soc, status, msdu_info);
  2383. if (status != QDF_STATUS_SUCCESS) {
  2384. dp_info_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
  2385. tx_desc, tx_q->ring_id);
  2386. dp_tx_get_tid(vdev, nbuf, msdu_info);
  2387. tid_stats = &pdev->stats.tid_stats.
  2388. tid_tx_stats[tx_q->ring_id][msdu_info->tid];
  2389. tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
  2390. if (msdu_info->frm_type == dp_tx_frm_me) {
  2391. hw_enq_fail++;
  2392. if (hw_enq_fail == msdu_info->num_seg) {
  2393. /*
  2394. * Unmap is needed only if enqueue
  2395. * failed for all segments.
  2396. */
  2397. qdf_nbuf_unmap(soc->osdev,
  2398. msdu_info->u.sg_info.
  2399. curr_seg->nbuf,
  2400. QDF_DMA_TO_DEVICE);
  2401. }
  2402. /*
  2403. * Free the nbuf for the current segment
  2404. * and make it point to the next in the list.
  2405. * For me, there are as many segments as there
  2406. * are no of clients.
  2407. */
  2408. qdf_nbuf_free(msdu_info->u.sg_info
  2409. .curr_seg->nbuf);
  2410. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2411. if (msdu_info->u.sg_info.curr_seg->next) {
  2412. msdu_info->u.sg_info.curr_seg =
  2413. msdu_info->u.sg_info
  2414. .curr_seg->next;
  2415. nbuf = msdu_info->u.sg_info
  2416. .curr_seg->nbuf;
  2417. } else
  2418. break;
  2419. i++;
  2420. continue;
  2421. }
  2422. /*
  2423. * For TSO frames, the nbuf users increment done for
  2424. * the current segment has to be reverted, since the
  2425. * hw enqueue for this segment failed
  2426. */
  2427. if (msdu_info->frm_type == dp_tx_frm_tso &&
  2428. msdu_info->u.tso_info.curr_seg) {
  2429. /*
  2430. * unmap and free current,
  2431. * retransmit remaining segments
  2432. */
  2433. dp_tx_comp_free_buf(soc, tx_desc, false);
  2434. i++;
  2435. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2436. continue;
  2437. }
  2438. if (msdu_info->frm_type == dp_tx_frm_sg)
  2439. dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
  2440. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2441. goto done;
  2442. }
  2443. /*
  2444. * TODO
  2445. * if tso_info structure can be modified to have curr_seg
  2446. * as first element, following 2 blocks of code (for TSO and SG)
  2447. * can be combined into 1
  2448. */
  2449. /*
  2450. * For Multicast-Unicast converted packets,
  2451. * each converted frame (for a client) is represented as
  2452. * 1 segment
  2453. */
  2454. if ((msdu_info->frm_type == dp_tx_frm_sg) ||
  2455. (msdu_info->frm_type == dp_tx_frm_me)) {
  2456. if (msdu_info->u.sg_info.curr_seg->next) {
  2457. msdu_info->u.sg_info.curr_seg =
  2458. msdu_info->u.sg_info.curr_seg->next;
  2459. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  2460. } else
  2461. break;
  2462. }
  2463. i++;
  2464. }
  2465. nbuf = NULL;
  2466. done:
  2467. return nbuf;
  2468. }
  2469. /**
  2470. * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
  2471. * for SG frames
  2472. * @vdev: DP vdev handle
  2473. * @nbuf: skb
  2474. * @seg_info: Pointer to Segment info Descriptor to be prepared
  2475. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2476. *
  2477. * Return: NULL on success,
  2478. * nbuf when it fails to send
  2479. */
  2480. static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2481. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  2482. {
  2483. uint32_t cur_frag, nr_frags, i;
  2484. qdf_dma_addr_t paddr;
  2485. struct dp_tx_sg_info_s *sg_info;
  2486. sg_info = &msdu_info->u.sg_info;
  2487. nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  2488. if (QDF_STATUS_SUCCESS !=
  2489. qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
  2490. QDF_DMA_TO_DEVICE,
  2491. qdf_nbuf_headlen(nbuf))) {
  2492. dp_tx_err("dma map error");
  2493. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  2494. qdf_nbuf_free(nbuf);
  2495. return NULL;
  2496. }
  2497. paddr = qdf_nbuf_mapped_paddr_get(nbuf);
  2498. seg_info->frags[0].paddr_lo = paddr;
  2499. seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
  2500. seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
  2501. seg_info->frags[0].vaddr = (void *) nbuf;
  2502. for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
  2503. if (QDF_STATUS_SUCCESS != qdf_nbuf_frag_map(vdev->osdev,
  2504. nbuf, 0,
  2505. QDF_DMA_TO_DEVICE,
  2506. cur_frag)) {
  2507. dp_tx_err("frag dma map error");
  2508. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  2509. goto map_err;
  2510. }
  2511. paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
  2512. seg_info->frags[cur_frag + 1].paddr_lo = paddr;
  2513. seg_info->frags[cur_frag + 1].paddr_hi =
  2514. ((uint64_t) paddr) >> 32;
  2515. seg_info->frags[cur_frag + 1].len =
  2516. qdf_nbuf_get_frag_size(nbuf, cur_frag);
  2517. }
  2518. seg_info->frag_cnt = (cur_frag + 1);
  2519. seg_info->total_len = qdf_nbuf_len(nbuf);
  2520. seg_info->next = NULL;
  2521. sg_info->curr_seg = seg_info;
  2522. msdu_info->frm_type = dp_tx_frm_sg;
  2523. msdu_info->num_seg = 1;
  2524. return nbuf;
  2525. map_err:
  2526. /* restore paddr into nbuf before calling unmap */
  2527. qdf_nbuf_mapped_paddr_set(nbuf,
  2528. (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
  2529. ((uint64_t)
  2530. seg_info->frags[0].paddr_hi) << 32));
  2531. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  2532. QDF_DMA_TO_DEVICE,
  2533. seg_info->frags[0].len);
  2534. for (i = 1; i <= cur_frag; i++) {
  2535. qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
  2536. (seg_info->frags[i].paddr_lo | ((uint64_t)
  2537. seg_info->frags[i].paddr_hi) << 32),
  2538. seg_info->frags[i].len,
  2539. QDF_DMA_TO_DEVICE);
  2540. }
  2541. qdf_nbuf_free(nbuf);
  2542. return NULL;
  2543. }
  2544. /**
  2545. * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
  2546. * @vdev: DP vdev handle
  2547. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2548. * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
  2549. *
  2550. * Return: NULL on failure,
  2551. * nbuf when extracted successfully
  2552. */
  2553. static
  2554. void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
  2555. struct dp_tx_msdu_info_s *msdu_info,
  2556. uint16_t ppdu_cookie)
  2557. {
  2558. struct htt_tx_msdu_desc_ext2_t *meta_data =
  2559. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  2560. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  2561. HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
  2562. (msdu_info->meta_data[5], 1);
  2563. HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
  2564. (msdu_info->meta_data[5], 1);
  2565. HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
  2566. (msdu_info->meta_data[6], ppdu_cookie);
  2567. msdu_info->exception_fw = 1;
  2568. msdu_info->is_tx_sniffer = 1;
  2569. }
  2570. #ifdef MESH_MODE_SUPPORT
  2571. /**
  2572. * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
  2573. * and prepare msdu_info for mesh frames.
  2574. * @vdev: DP vdev handle
  2575. * @nbuf: skb
  2576. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2577. *
  2578. * Return: NULL on failure,
  2579. * nbuf when extracted successfully
  2580. */
  2581. static
  2582. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2583. struct dp_tx_msdu_info_s *msdu_info)
  2584. {
  2585. struct meta_hdr_s *mhdr;
  2586. struct htt_tx_msdu_desc_ext2_t *meta_data =
  2587. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  2588. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  2589. if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
  2590. msdu_info->exception_fw = 0;
  2591. goto remove_meta_hdr;
  2592. }
  2593. msdu_info->exception_fw = 1;
  2594. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  2595. meta_data->host_tx_desc_pool = 1;
  2596. meta_data->update_peer_cache = 1;
  2597. meta_data->learning_frame = 1;
  2598. if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
  2599. meta_data->power = mhdr->power;
  2600. meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
  2601. meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
  2602. meta_data->pream_type = mhdr->rate_info[0].preamble_type;
  2603. meta_data->retry_limit = mhdr->rate_info[0].max_tries;
  2604. meta_data->dyn_bw = 1;
  2605. meta_data->valid_pwr = 1;
  2606. meta_data->valid_mcs_mask = 1;
  2607. meta_data->valid_nss_mask = 1;
  2608. meta_data->valid_preamble_type = 1;
  2609. meta_data->valid_retries = 1;
  2610. meta_data->valid_bw_info = 1;
  2611. }
  2612. if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
  2613. meta_data->encrypt_type = 0;
  2614. meta_data->valid_encrypt_type = 1;
  2615. meta_data->learning_frame = 0;
  2616. }
  2617. meta_data->valid_key_flags = 1;
  2618. meta_data->key_flags = (mhdr->keyix & 0x3);
  2619. remove_meta_hdr:
  2620. if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
  2621. dp_tx_err("qdf_nbuf_pull_head failed");
  2622. qdf_nbuf_free(nbuf);
  2623. return NULL;
  2624. }
  2625. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  2626. dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
  2627. " tid %d to_fw %d",
  2628. msdu_info->meta_data[0],
  2629. msdu_info->meta_data[1],
  2630. msdu_info->meta_data[2],
  2631. msdu_info->meta_data[3],
  2632. msdu_info->meta_data[4],
  2633. msdu_info->meta_data[5],
  2634. msdu_info->tid, msdu_info->exception_fw);
  2635. return nbuf;
  2636. }
  2637. #else
  2638. static
  2639. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2640. struct dp_tx_msdu_info_s *msdu_info)
  2641. {
  2642. return nbuf;
  2643. }
  2644. #endif
  2645. /**
  2646. * dp_check_exc_metadata() - Checks if parameters are valid
  2647. * @tx_exc: holds all exception path parameters
  2648. *
  2649. * Return: true when all the parameters are valid else false
  2650. *
  2651. */
  2652. static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
  2653. {
  2654. bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
  2655. HTT_INVALID_TID);
  2656. bool invalid_encap_type =
  2657. (tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
  2658. tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
  2659. bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
  2660. tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
  2661. bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
  2662. tx_exc->ppdu_cookie == 0);
  2663. if (tx_exc->is_intrabss_fwd)
  2664. return true;
  2665. if (invalid_tid || invalid_encap_type || invalid_sec_type ||
  2666. invalid_cookie) {
  2667. return false;
  2668. }
  2669. return true;
  2670. }
  2671. #ifdef ATH_SUPPORT_IQUE
  2672. bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2673. {
  2674. qdf_ether_header_t *eh;
  2675. /* Mcast to Ucast Conversion*/
  2676. if (qdf_likely(!vdev->mcast_enhancement_en))
  2677. return true;
  2678. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2679. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
  2680. !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
  2681. dp_verbose_debug("Mcast frm for ME %pK", vdev);
  2682. qdf_nbuf_set_next(nbuf, NULL);
  2683. DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
  2684. qdf_nbuf_len(nbuf));
  2685. if (dp_tx_prepare_send_me(vdev, nbuf) ==
  2686. QDF_STATUS_SUCCESS) {
  2687. return false;
  2688. }
  2689. if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
  2690. if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
  2691. QDF_STATUS_SUCCESS) {
  2692. return false;
  2693. }
  2694. }
  2695. }
  2696. return true;
  2697. }
  2698. #else
  2699. bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2700. {
  2701. return true;
  2702. }
  2703. #endif
  2704. #ifdef QCA_SUPPORT_WDS_EXTENDED
  2705. /**
  2706. * dp_tx_mcast_drop() - Drop mcast frame if drop_tx_mcast is set in WDS_EXT
  2707. * @vdev: vdev handle
  2708. * @nbuf: skb
  2709. *
  2710. * Return: true if frame is dropped, false otherwise
  2711. */
  2712. static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2713. {
  2714. /* Drop tx mcast and WDS Extended feature check */
  2715. if (qdf_unlikely((vdev->drop_tx_mcast) && (vdev->wds_ext_enabled))) {
  2716. qdf_ether_header_t *eh = (qdf_ether_header_t *)
  2717. qdf_nbuf_data(nbuf);
  2718. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  2719. DP_STATS_INC(vdev, tx_i.dropped.tx_mcast_drop, 1);
  2720. return true;
  2721. }
  2722. }
  2723. return false;
  2724. }
  2725. #else
  2726. static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2727. {
  2728. return false;
  2729. }
  2730. #endif
  2731. /**
  2732. * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
  2733. * @nbuf: qdf_nbuf_t
  2734. * @vdev: struct dp_vdev *
  2735. *
  2736. * Allow packet for processing only if it is for peer client which is
  2737. * connected with same vap. Drop packet if client is connected to
  2738. * different vap.
  2739. *
  2740. * Return: QDF_STATUS
  2741. */
  2742. static inline QDF_STATUS
  2743. dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
  2744. {
  2745. struct dp_ast_entry *dst_ast_entry = NULL;
  2746. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2747. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
  2748. DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
  2749. return QDF_STATUS_SUCCESS;
  2750. qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
  2751. dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
  2752. eh->ether_dhost,
  2753. vdev->vdev_id);
  2754. /* If there is no ast entry, return failure */
  2755. if (qdf_unlikely(!dst_ast_entry)) {
  2756. qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
  2757. return QDF_STATUS_E_FAILURE;
  2758. }
  2759. qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
  2760. return QDF_STATUS_SUCCESS;
  2761. }
  2762. /**
  2763. * dp_tx_nawds_handler() - NAWDS handler
  2764. *
  2765. * @soc: DP soc handle
  2766. * @vdev: DP vdev handle
  2767. * @msdu_info: msdu_info required to create HTT metadata
  2768. * @nbuf: skb
  2769. * @sa_peer_id:
  2770. *
  2771. * This API transfers the multicast frames with the peer id
  2772. * on NAWDS enabled peer.
  2773. *
  2774. * Return: none
  2775. */
  2776. void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
  2777. struct dp_tx_msdu_info_s *msdu_info,
  2778. qdf_nbuf_t nbuf, uint16_t sa_peer_id)
  2779. {
  2780. struct dp_peer *peer = NULL;
  2781. qdf_nbuf_t nbuf_clone = NULL;
  2782. uint16_t peer_id = DP_INVALID_PEER;
  2783. struct dp_txrx_peer *txrx_peer;
  2784. uint8_t link_id = 0;
  2785. /* This check avoids pkt forwarding which is entered
  2786. * in the ast table but still doesn't have valid peerid.
  2787. */
  2788. if (sa_peer_id == HTT_INVALID_PEER)
  2789. return;
  2790. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2791. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2792. txrx_peer = dp_get_txrx_peer(peer);
  2793. if (!txrx_peer)
  2794. continue;
  2795. if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) {
  2796. peer_id = peer->peer_id;
  2797. if (!dp_peer_is_primary_link_peer(peer))
  2798. continue;
  2799. /* In the case of wds ext peer mcast traffic will be
  2800. * sent as part of VLAN interface
  2801. */
  2802. if (dp_peer_is_wds_ext_peer(txrx_peer))
  2803. continue;
  2804. /* Multicast packets needs to be
  2805. * dropped in case of intra bss forwarding
  2806. */
  2807. if (sa_peer_id == txrx_peer->peer_id) {
  2808. dp_tx_debug("multicast packet");
  2809. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2810. tx.nawds_mcast_drop,
  2811. 1, link_id);
  2812. continue;
  2813. }
  2814. nbuf_clone = qdf_nbuf_clone(nbuf);
  2815. if (!nbuf_clone) {
  2816. QDF_TRACE(QDF_MODULE_ID_DP,
  2817. QDF_TRACE_LEVEL_ERROR,
  2818. FL("nbuf clone failed"));
  2819. break;
  2820. }
  2821. nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
  2822. msdu_info, peer_id,
  2823. NULL);
  2824. if (nbuf_clone) {
  2825. dp_tx_debug("pkt send failed");
  2826. qdf_nbuf_free(nbuf_clone);
  2827. } else {
  2828. if (peer_id != DP_INVALID_PEER)
  2829. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  2830. tx.nawds_mcast,
  2831. 1, qdf_nbuf_len(nbuf), link_id);
  2832. }
  2833. }
  2834. }
  2835. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2836. }
  2837. #ifdef WLAN_MCAST_MLO
  2838. static inline bool
  2839. dp_tx_check_mesh_vdev(struct dp_vdev *vdev,
  2840. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2841. {
  2842. if (!tx_exc_metadata->is_mlo_mcast && qdf_unlikely(vdev->mesh_vdev))
  2843. return true;
  2844. return false;
  2845. }
  2846. #else
  2847. static inline bool
  2848. dp_tx_check_mesh_vdev(struct dp_vdev *vdev,
  2849. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2850. {
  2851. if (qdf_unlikely(vdev->mesh_vdev))
  2852. return true;
  2853. return false;
  2854. }
  2855. #endif
  2856. qdf_nbuf_t
  2857. dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2858. qdf_nbuf_t nbuf,
  2859. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2860. {
  2861. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2862. struct dp_tx_msdu_info_s msdu_info;
  2863. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2864. DP_MOD_ID_TX_EXCEPTION);
  2865. if (qdf_unlikely(!vdev))
  2866. goto fail;
  2867. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  2868. if (!tx_exc_metadata)
  2869. goto fail;
  2870. msdu_info.tid = tx_exc_metadata->tid;
  2871. dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
  2872. QDF_MAC_ADDR_REF(nbuf->data));
  2873. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  2874. if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
  2875. dp_tx_err("Invalid parameters in exception path");
  2876. goto fail;
  2877. }
  2878. /* for peer based metadata check if peer is valid */
  2879. if (tx_exc_metadata->peer_id != CDP_INVALID_PEER) {
  2880. struct dp_peer *peer = NULL;
  2881. peer = dp_peer_get_ref_by_id(vdev->pdev->soc,
  2882. tx_exc_metadata->peer_id,
  2883. DP_MOD_ID_TX_EXCEPTION);
  2884. if (qdf_unlikely(!peer)) {
  2885. DP_STATS_INC(vdev,
  2886. tx_i.dropped.invalid_peer_id_in_exc_path,
  2887. 1);
  2888. goto fail;
  2889. }
  2890. dp_peer_unref_delete(peer, DP_MOD_ID_TX_EXCEPTION);
  2891. }
  2892. /* Basic sanity checks for unsupported packets */
  2893. /* MESH mode */
  2894. if (dp_tx_check_mesh_vdev(vdev, tx_exc_metadata)) {
  2895. dp_tx_err("Mesh mode is not supported in exception path");
  2896. goto fail;
  2897. }
  2898. /*
  2899. * Classify the frame and call corresponding
  2900. * "prepare" function which extracts the segment (TSO)
  2901. * and fragmentation information (for TSO , SG, ME, or Raw)
  2902. * into MSDU_INFO structure which is later used to fill
  2903. * SW and HW descriptors.
  2904. */
  2905. if (qdf_nbuf_is_tso(nbuf)) {
  2906. dp_verbose_debug("TSO frame %pK", vdev);
  2907. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  2908. qdf_nbuf_len(nbuf));
  2909. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  2910. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  2911. qdf_nbuf_len(nbuf));
  2912. goto fail;
  2913. }
  2914. DP_STATS_INC(vdev, tx_i.rcvd.num, msdu_info.num_seg - 1);
  2915. goto send_multiple;
  2916. }
  2917. /* SG */
  2918. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  2919. struct dp_tx_seg_info_s seg_info = {0};
  2920. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  2921. if (!nbuf)
  2922. goto fail;
  2923. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  2924. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  2925. qdf_nbuf_len(nbuf));
  2926. goto send_multiple;
  2927. }
  2928. if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
  2929. DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
  2930. qdf_nbuf_len(nbuf));
  2931. dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
  2932. tx_exc_metadata->ppdu_cookie);
  2933. }
  2934. /*
  2935. * Get HW Queue to use for this frame.
  2936. * TCL supports upto 4 DMA rings, out of which 3 rings are
  2937. * dedicated for data and 1 for command.
  2938. * "queue_id" maps to one hardware ring.
  2939. * With each ring, we also associate a unique Tx descriptor pool
  2940. * to minimize lock contention for these resources.
  2941. */
  2942. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2943. /*
  2944. * if the packet is mcast packet send through mlo_macst handler
  2945. * for all prnt_vdevs
  2946. */
  2947. if (soc->arch_ops.dp_tx_mlo_mcast_send) {
  2948. nbuf = soc->arch_ops.dp_tx_mlo_mcast_send(soc, vdev,
  2949. nbuf,
  2950. tx_exc_metadata);
  2951. if (!nbuf)
  2952. goto fail;
  2953. }
  2954. if (qdf_likely(tx_exc_metadata->is_intrabss_fwd)) {
  2955. if (qdf_unlikely(vdev->nawds_enabled)) {
  2956. /*
  2957. * This is a multicast packet
  2958. */
  2959. dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
  2960. tx_exc_metadata->peer_id);
  2961. DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
  2962. 1, qdf_nbuf_len(nbuf));
  2963. }
  2964. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
  2965. DP_INVALID_PEER, NULL);
  2966. } else {
  2967. /*
  2968. * Check exception descriptors
  2969. */
  2970. if (dp_tx_exception_limit_check(vdev))
  2971. goto fail;
  2972. /* Single linear frame */
  2973. /*
  2974. * If nbuf is a simple linear frame, use send_single function to
  2975. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  2976. * SRNG. There is no need to setup a MSDU extension descriptor.
  2977. */
  2978. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
  2979. tx_exc_metadata->peer_id,
  2980. tx_exc_metadata);
  2981. }
  2982. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2983. return nbuf;
  2984. send_multiple:
  2985. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2986. fail:
  2987. if (vdev)
  2988. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2989. dp_verbose_debug("pkt send failed");
  2990. return nbuf;
  2991. }
  2992. qdf_nbuf_t
  2993. dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
  2994. uint8_t vdev_id, qdf_nbuf_t nbuf,
  2995. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2996. {
  2997. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2998. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2999. DP_MOD_ID_TX_EXCEPTION);
  3000. if (qdf_unlikely(!vdev))
  3001. goto fail;
  3002. if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
  3003. == QDF_STATUS_E_FAILURE)) {
  3004. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  3005. goto fail;
  3006. }
  3007. /* Unref count as it will again be taken inside dp_tx_exception */
  3008. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  3009. return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
  3010. fail:
  3011. if (vdev)
  3012. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  3013. dp_verbose_debug("pkt send failed");
  3014. return nbuf;
  3015. }
  3016. #ifdef MESH_MODE_SUPPORT
  3017. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3018. qdf_nbuf_t nbuf)
  3019. {
  3020. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3021. struct meta_hdr_s *mhdr;
  3022. qdf_nbuf_t nbuf_mesh = NULL;
  3023. qdf_nbuf_t nbuf_clone = NULL;
  3024. struct dp_vdev *vdev;
  3025. uint8_t no_enc_frame = 0;
  3026. nbuf_mesh = qdf_nbuf_unshare(nbuf);
  3027. if (!nbuf_mesh) {
  3028. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3029. "qdf_nbuf_unshare failed");
  3030. return nbuf;
  3031. }
  3032. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
  3033. if (!vdev) {
  3034. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3035. "vdev is NULL for vdev_id %d", vdev_id);
  3036. return nbuf;
  3037. }
  3038. nbuf = nbuf_mesh;
  3039. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  3040. if ((vdev->sec_type != cdp_sec_type_none) &&
  3041. (mhdr->flags & METAHDR_FLAG_NOENCRYPT))
  3042. no_enc_frame = 1;
  3043. if (mhdr->flags & METAHDR_FLAG_NOQOS)
  3044. qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
  3045. if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
  3046. !no_enc_frame) {
  3047. nbuf_clone = qdf_nbuf_clone(nbuf);
  3048. if (!nbuf_clone) {
  3049. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3050. "qdf_nbuf_clone failed");
  3051. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  3052. return nbuf;
  3053. }
  3054. qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
  3055. }
  3056. if (nbuf_clone) {
  3057. if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
  3058. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  3059. } else {
  3060. qdf_nbuf_free(nbuf_clone);
  3061. }
  3062. }
  3063. if (no_enc_frame)
  3064. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
  3065. else
  3066. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
  3067. nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
  3068. if ((!nbuf) && no_enc_frame) {
  3069. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  3070. }
  3071. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  3072. return nbuf;
  3073. }
  3074. #else
  3075. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3076. qdf_nbuf_t nbuf)
  3077. {
  3078. return dp_tx_send(soc_hdl, vdev_id, nbuf);
  3079. }
  3080. #endif
  3081. #ifdef QCA_DP_TX_NBUF_AND_NBUF_DATA_PREFETCH
  3082. static inline
  3083. void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
  3084. {
  3085. if (nbuf) {
  3086. qdf_prefetch(&nbuf->len);
  3087. qdf_prefetch(&nbuf->data);
  3088. }
  3089. }
  3090. #else
  3091. static inline
  3092. void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
  3093. {
  3094. }
  3095. #endif
  3096. #ifdef DP_UMAC_HW_RESET_SUPPORT
  3097. qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3098. qdf_nbuf_t nbuf)
  3099. {
  3100. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3101. struct dp_vdev *vdev = NULL;
  3102. vdev = soc->vdev_id_map[vdev_id];
  3103. if (qdf_unlikely(!vdev))
  3104. return nbuf;
  3105. DP_STATS_INC(vdev, tx_i.dropped.drop_ingress, 1);
  3106. return nbuf;
  3107. }
  3108. qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3109. qdf_nbuf_t nbuf,
  3110. struct cdp_tx_exception_metadata *tx_exc_metadata)
  3111. {
  3112. return dp_tx_drop(soc_hdl, vdev_id, nbuf);
  3113. }
  3114. #endif
  3115. #ifdef FEATURE_DIRECT_LINK
  3116. /**
  3117. * dp_vdev_tx_mark_to_fw() - Mark to_fw bit for the tx packet
  3118. * @nbuf: skb
  3119. * @vdev: DP vdev handle
  3120. *
  3121. * Return: None
  3122. */
  3123. static inline void dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
  3124. {
  3125. if (qdf_unlikely(vdev->to_fw))
  3126. QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf) = 1;
  3127. }
  3128. #else
  3129. static inline void dp_vdev_tx_mark_to_fw(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
  3130. {
  3131. }
  3132. #endif
  3133. qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3134. qdf_nbuf_t nbuf)
  3135. {
  3136. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3137. uint16_t peer_id = HTT_INVALID_PEER;
  3138. /*
  3139. * doing a memzero is causing additional function call overhead
  3140. * so doing static stack clearing
  3141. */
  3142. struct dp_tx_msdu_info_s msdu_info = {0};
  3143. struct dp_vdev *vdev = NULL;
  3144. qdf_nbuf_t end_nbuf = NULL;
  3145. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  3146. return nbuf;
  3147. /*
  3148. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  3149. * this in per packet path.
  3150. *
  3151. * As in this path vdev memory is already protected with netdev
  3152. * tx lock
  3153. */
  3154. vdev = soc->vdev_id_map[vdev_id];
  3155. if (qdf_unlikely(!vdev))
  3156. return nbuf;
  3157. dp_vdev_tx_mark_to_fw(nbuf, vdev);
  3158. /*
  3159. * Set Default Host TID value to invalid TID
  3160. * (TID override disabled)
  3161. */
  3162. msdu_info.tid = HTT_TX_EXT_TID_INVALID;
  3163. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  3164. if (qdf_unlikely(vdev->mesh_vdev)) {
  3165. qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
  3166. &msdu_info);
  3167. if (!nbuf_mesh) {
  3168. dp_verbose_debug("Extracting mesh metadata failed");
  3169. return nbuf;
  3170. }
  3171. nbuf = nbuf_mesh;
  3172. }
  3173. /*
  3174. * Get HW Queue to use for this frame.
  3175. * TCL supports upto 4 DMA rings, out of which 3 rings are
  3176. * dedicated for data and 1 for command.
  3177. * "queue_id" maps to one hardware ring.
  3178. * With each ring, we also associate a unique Tx descriptor pool
  3179. * to minimize lock contention for these resources.
  3180. */
  3181. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  3182. DP_STATS_INC(vdev, tx_i.rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
  3183. 1);
  3184. /*
  3185. * TCL H/W supports 2 DSCP-TID mapping tables.
  3186. * Table 1 - Default DSCP-TID mapping table
  3187. * Table 2 - 1 DSCP-TID override table
  3188. *
  3189. * If we need a different DSCP-TID mapping for this vap,
  3190. * call tid_classify to extract DSCP/ToS from frame and
  3191. * map to a TID and store in msdu_info. This is later used
  3192. * to fill in TCL Input descriptor (per-packet TID override).
  3193. */
  3194. dp_tx_classify_tid(vdev, nbuf, &msdu_info);
  3195. /*
  3196. * Classify the frame and call corresponding
  3197. * "prepare" function which extracts the segment (TSO)
  3198. * and fragmentation information (for TSO , SG, ME, or Raw)
  3199. * into MSDU_INFO structure which is later used to fill
  3200. * SW and HW descriptors.
  3201. */
  3202. if (qdf_nbuf_is_tso(nbuf)) {
  3203. dp_verbose_debug("TSO frame %pK", vdev);
  3204. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  3205. qdf_nbuf_len(nbuf));
  3206. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  3207. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  3208. qdf_nbuf_len(nbuf));
  3209. return nbuf;
  3210. }
  3211. DP_STATS_INC(vdev, tx_i.rcvd.num, msdu_info.num_seg - 1);
  3212. goto send_multiple;
  3213. }
  3214. /* SG */
  3215. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  3216. if (qdf_nbuf_get_nr_frags(nbuf) > DP_TX_MAX_NUM_FRAGS - 1) {
  3217. if (qdf_unlikely(qdf_nbuf_linearize(nbuf)))
  3218. return nbuf;
  3219. } else {
  3220. struct dp_tx_seg_info_s seg_info = {0};
  3221. if (qdf_unlikely(is_nbuf_frm_rmnet(nbuf, &msdu_info)))
  3222. goto send_single;
  3223. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info,
  3224. &msdu_info);
  3225. if (!nbuf)
  3226. return NULL;
  3227. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  3228. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  3229. qdf_nbuf_len(nbuf));
  3230. goto send_multiple;
  3231. }
  3232. }
  3233. if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
  3234. return NULL;
  3235. if (qdf_unlikely(dp_tx_mcast_drop(vdev, nbuf)))
  3236. return nbuf;
  3237. /* RAW */
  3238. if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
  3239. struct dp_tx_seg_info_s seg_info = {0};
  3240. nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
  3241. if (!nbuf)
  3242. return NULL;
  3243. dp_verbose_debug("Raw frame %pK", vdev);
  3244. goto send_multiple;
  3245. }
  3246. if (qdf_unlikely(vdev->nawds_enabled)) {
  3247. qdf_ether_header_t *eh = (qdf_ether_header_t *)
  3248. qdf_nbuf_data(nbuf);
  3249. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  3250. uint16_t sa_peer_id = DP_INVALID_PEER;
  3251. if (!soc->ast_offload_support) {
  3252. struct dp_ast_entry *ast_entry = NULL;
  3253. qdf_spin_lock_bh(&soc->ast_lock);
  3254. ast_entry = dp_peer_ast_hash_find_by_pdevid
  3255. (soc,
  3256. (uint8_t *)(eh->ether_shost),
  3257. vdev->pdev->pdev_id);
  3258. if (ast_entry)
  3259. sa_peer_id = ast_entry->peer_id;
  3260. qdf_spin_unlock_bh(&soc->ast_lock);
  3261. }
  3262. dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
  3263. sa_peer_id);
  3264. }
  3265. peer_id = DP_INVALID_PEER;
  3266. DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
  3267. 1, qdf_nbuf_len(nbuf));
  3268. }
  3269. send_single:
  3270. /* Single linear frame */
  3271. /*
  3272. * If nbuf is a simple linear frame, use send_single function to
  3273. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  3274. * SRNG. There is no need to setup a MSDU extension descriptor.
  3275. */
  3276. dp_tx_prefetch_nbuf_data(nbuf);
  3277. nbuf = dp_tx_send_msdu_single_wrapper(vdev, nbuf, &msdu_info,
  3278. peer_id, end_nbuf);
  3279. return nbuf;
  3280. send_multiple:
  3281. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  3282. if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
  3283. dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
  3284. return nbuf;
  3285. }
  3286. qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
  3287. uint8_t vdev_id, qdf_nbuf_t nbuf)
  3288. {
  3289. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3290. struct dp_vdev *vdev = NULL;
  3291. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  3292. return nbuf;
  3293. /*
  3294. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  3295. * this in per packet path.
  3296. *
  3297. * As in this path vdev memory is already protected with netdev
  3298. * tx lock
  3299. */
  3300. vdev = soc->vdev_id_map[vdev_id];
  3301. if (qdf_unlikely(!vdev))
  3302. return nbuf;
  3303. if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
  3304. == QDF_STATUS_E_FAILURE)) {
  3305. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  3306. return nbuf;
  3307. }
  3308. return dp_tx_send(soc_hdl, vdev_id, nbuf);
  3309. }
  3310. #ifdef UMAC_SUPPORT_PROXY_ARP
  3311. /**
  3312. * dp_tx_proxy_arp() - Tx proxy arp handler
  3313. * @vdev: datapath vdev handle
  3314. * @nbuf: sk buffer
  3315. *
  3316. * Return: status
  3317. */
  3318. static inline
  3319. int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  3320. {
  3321. if (vdev->osif_proxy_arp)
  3322. return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
  3323. /*
  3324. * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
  3325. * osif_proxy_arp has a valid function pointer assigned
  3326. * to it
  3327. */
  3328. dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
  3329. return QDF_STATUS_NOT_INITIALIZED;
  3330. }
  3331. #else
  3332. static inline
  3333. int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  3334. {
  3335. return QDF_STATUS_SUCCESS;
  3336. }
  3337. #endif
  3338. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  3339. !defined(CONFIG_MLO_SINGLE_DEV)
  3340. #ifdef WLAN_MCAST_MLO
  3341. static bool
  3342. dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
  3343. struct dp_tx_desc_s *tx_desc,
  3344. qdf_nbuf_t nbuf,
  3345. uint8_t reinject_reason)
  3346. {
  3347. if (reinject_reason == HTT_TX_FW2WBM_REINJECT_REASON_MLO_MCAST) {
  3348. if (soc->arch_ops.dp_tx_mcast_handler)
  3349. soc->arch_ops.dp_tx_mcast_handler(soc, vdev, nbuf);
  3350. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3351. return true;
  3352. }
  3353. return false;
  3354. }
  3355. #else /* WLAN_MCAST_MLO */
  3356. static inline bool
  3357. dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
  3358. struct dp_tx_desc_s *tx_desc,
  3359. qdf_nbuf_t nbuf,
  3360. uint8_t reinject_reason)
  3361. {
  3362. return false;
  3363. }
  3364. #endif /* WLAN_MCAST_MLO */
  3365. #else
  3366. static inline bool
  3367. dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
  3368. struct dp_tx_desc_s *tx_desc,
  3369. qdf_nbuf_t nbuf,
  3370. uint8_t reinject_reason)
  3371. {
  3372. return false;
  3373. }
  3374. #endif
  3375. void dp_tx_reinject_handler(struct dp_soc *soc,
  3376. struct dp_vdev *vdev,
  3377. struct dp_tx_desc_s *tx_desc,
  3378. uint8_t *status,
  3379. uint8_t reinject_reason)
  3380. {
  3381. struct dp_peer *peer = NULL;
  3382. uint32_t peer_id = HTT_INVALID_PEER;
  3383. qdf_nbuf_t nbuf = tx_desc->nbuf;
  3384. qdf_nbuf_t nbuf_copy = NULL;
  3385. struct dp_tx_msdu_info_s msdu_info;
  3386. #ifdef WDS_VENDOR_EXTENSION
  3387. int is_mcast = 0, is_ucast = 0;
  3388. int num_peers_3addr = 0;
  3389. qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
  3390. struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
  3391. #endif
  3392. struct dp_txrx_peer *txrx_peer;
  3393. qdf_assert(vdev);
  3394. dp_tx_debug("Tx reinject path");
  3395. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  3396. qdf_nbuf_len(tx_desc->nbuf));
  3397. if (dp_tx_reinject_mlo_hdl(soc, vdev, tx_desc, nbuf, reinject_reason))
  3398. return;
  3399. #ifdef WDS_VENDOR_EXTENSION
  3400. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  3401. is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
  3402. } else {
  3403. is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
  3404. }
  3405. is_ucast = !is_mcast;
  3406. qdf_spin_lock_bh(&vdev->peer_list_lock);
  3407. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  3408. txrx_peer = dp_get_txrx_peer(peer);
  3409. if (!txrx_peer || txrx_peer->bss_peer)
  3410. continue;
  3411. /* Detect wds peers that use 3-addr framing for mcast.
  3412. * if there are any, the bss_peer is used to send the
  3413. * the mcast frame using 3-addr format. all wds enabled
  3414. * peers that use 4-addr framing for mcast frames will
  3415. * be duplicated and sent as 4-addr frames below.
  3416. */
  3417. if (!txrx_peer->wds_enabled ||
  3418. !txrx_peer->wds_ecm.wds_tx_mcast_4addr) {
  3419. num_peers_3addr = 1;
  3420. break;
  3421. }
  3422. }
  3423. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  3424. #endif
  3425. if (qdf_unlikely(vdev->mesh_vdev)) {
  3426. DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
  3427. } else {
  3428. qdf_spin_lock_bh(&vdev->peer_list_lock);
  3429. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  3430. txrx_peer = dp_get_txrx_peer(peer);
  3431. if (!txrx_peer)
  3432. continue;
  3433. if ((txrx_peer->peer_id != HTT_INVALID_PEER) &&
  3434. #ifdef WDS_VENDOR_EXTENSION
  3435. /*
  3436. * . if 3-addr STA, then send on BSS Peer
  3437. * . if Peer WDS enabled and accept 4-addr mcast,
  3438. * send mcast on that peer only
  3439. * . if Peer WDS enabled and accept 4-addr ucast,
  3440. * send ucast on that peer only
  3441. */
  3442. ((txrx_peer->bss_peer && num_peers_3addr && is_mcast) ||
  3443. (txrx_peer->wds_enabled &&
  3444. ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) ||
  3445. (is_ucast &&
  3446. txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) {
  3447. #else
  3448. (txrx_peer->bss_peer &&
  3449. (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
  3450. #endif
  3451. peer_id = DP_INVALID_PEER;
  3452. nbuf_copy = qdf_nbuf_copy(nbuf);
  3453. if (!nbuf_copy) {
  3454. dp_tx_debug("nbuf copy failed");
  3455. break;
  3456. }
  3457. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  3458. dp_tx_get_queue(vdev, nbuf,
  3459. &msdu_info.tx_queue);
  3460. nbuf_copy = dp_tx_send_msdu_single(vdev,
  3461. nbuf_copy,
  3462. &msdu_info,
  3463. peer_id,
  3464. NULL);
  3465. if (nbuf_copy) {
  3466. dp_tx_debug("pkt send failed");
  3467. qdf_nbuf_free(nbuf_copy);
  3468. }
  3469. }
  3470. }
  3471. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  3472. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  3473. QDF_DMA_TO_DEVICE, nbuf->len);
  3474. qdf_nbuf_free(nbuf);
  3475. }
  3476. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3477. }
  3478. void dp_tx_inspect_handler(struct dp_soc *soc,
  3479. struct dp_vdev *vdev,
  3480. struct dp_tx_desc_s *tx_desc,
  3481. uint8_t *status)
  3482. {
  3483. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3484. "%s Tx inspect path",
  3485. __func__);
  3486. DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
  3487. qdf_nbuf_len(tx_desc->nbuf));
  3488. DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
  3489. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3490. }
  3491. #ifdef MESH_MODE_SUPPORT
  3492. /**
  3493. * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
  3494. * in mesh meta header
  3495. * @tx_desc: software descriptor head pointer
  3496. * @ts: pointer to tx completion stats
  3497. * Return: none
  3498. */
  3499. static
  3500. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  3501. struct hal_tx_completion_status *ts)
  3502. {
  3503. qdf_nbuf_t netbuf = tx_desc->nbuf;
  3504. if (!tx_desc->msdu_ext_desc) {
  3505. if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
  3506. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3507. "netbuf %pK offset %d",
  3508. netbuf, tx_desc->pkt_offset);
  3509. return;
  3510. }
  3511. }
  3512. }
  3513. #else
  3514. static
  3515. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  3516. struct hal_tx_completion_status *ts)
  3517. {
  3518. }
  3519. #endif
  3520. #ifdef CONFIG_SAWF
  3521. static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
  3522. struct dp_vdev *vdev,
  3523. struct dp_txrx_peer *txrx_peer,
  3524. struct dp_tx_desc_s *tx_desc,
  3525. struct hal_tx_completion_status *ts,
  3526. uint8_t tid)
  3527. {
  3528. dp_sawf_tx_compl_update_peer_stats(soc, vdev, txrx_peer, tx_desc,
  3529. ts, tid);
  3530. }
  3531. static void dp_tx_compute_delay_avg(struct cdp_delay_tx_stats *tx_delay,
  3532. uint32_t nw_delay,
  3533. uint32_t sw_delay,
  3534. uint32_t hw_delay)
  3535. {
  3536. dp_peer_tid_delay_avg(tx_delay,
  3537. nw_delay,
  3538. sw_delay,
  3539. hw_delay);
  3540. }
  3541. #else
  3542. static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
  3543. struct dp_vdev *vdev,
  3544. struct dp_txrx_peer *txrx_peer,
  3545. struct dp_tx_desc_s *tx_desc,
  3546. struct hal_tx_completion_status *ts,
  3547. uint8_t tid)
  3548. {
  3549. }
  3550. static inline void
  3551. dp_tx_compute_delay_avg(struct cdp_delay_tx_stats *tx_delay,
  3552. uint32_t nw_delay, uint32_t sw_delay,
  3553. uint32_t hw_delay)
  3554. {
  3555. }
  3556. #endif
  3557. #ifdef QCA_PEER_EXT_STATS
  3558. #ifdef WLAN_CONFIG_TX_DELAY
  3559. static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  3560. struct dp_tx_desc_s *tx_desc,
  3561. struct hal_tx_completion_status *ts,
  3562. struct dp_vdev *vdev)
  3563. {
  3564. struct dp_soc *soc = vdev->pdev->soc;
  3565. struct cdp_delay_tx_stats *tx_delay = &stats->tx_delay;
  3566. int64_t timestamp_ingress, timestamp_hw_enqueue;
  3567. uint32_t sw_enqueue_delay, fwhw_transmit_delay = 0;
  3568. if (!ts->valid)
  3569. return;
  3570. timestamp_ingress = qdf_nbuf_get_timestamp_us(tx_desc->nbuf);
  3571. timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
  3572. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3573. dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
  3574. if (soc->arch_ops.dp_tx_compute_hw_delay)
  3575. if (!soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
  3576. &fwhw_transmit_delay))
  3577. dp_hist_update_stats(&tx_delay->hwtx_delay,
  3578. fwhw_transmit_delay);
  3579. dp_tx_compute_delay_avg(tx_delay, 0, sw_enqueue_delay,
  3580. fwhw_transmit_delay);
  3581. }
  3582. #else
  3583. /**
  3584. * dp_tx_compute_tid_delay() - Compute per TID delay
  3585. * @stats: Per TID delay stats
  3586. * @tx_desc: Software Tx descriptor
  3587. * @ts: Tx completion status
  3588. * @vdev: vdev
  3589. *
  3590. * Compute the software enqueue and hw enqueue delays and
  3591. * update the respective histograms
  3592. *
  3593. * Return: void
  3594. */
  3595. static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  3596. struct dp_tx_desc_s *tx_desc,
  3597. struct hal_tx_completion_status *ts,
  3598. struct dp_vdev *vdev)
  3599. {
  3600. struct cdp_delay_tx_stats *tx_delay = &stats->tx_delay;
  3601. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  3602. uint32_t sw_enqueue_delay, fwhw_transmit_delay;
  3603. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  3604. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  3605. timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
  3606. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3607. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  3608. timestamp_hw_enqueue);
  3609. /*
  3610. * Update the Tx software enqueue delay and HW enque-Completion delay.
  3611. */
  3612. dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
  3613. dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
  3614. }
  3615. #endif
  3616. /**
  3617. * dp_tx_update_peer_delay_stats() - Update the peer delay stats
  3618. * @txrx_peer: DP peer context
  3619. * @tx_desc: Tx software descriptor
  3620. * @ts: Tx completion status
  3621. * @ring_id: Rx CPU context ID/CPU_ID
  3622. *
  3623. * Update the peer extended stats. These are enhanced other
  3624. * delay stats per msdu level.
  3625. *
  3626. * Return: void
  3627. */
  3628. static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
  3629. struct dp_tx_desc_s *tx_desc,
  3630. struct hal_tx_completion_status *ts,
  3631. uint8_t ring_id)
  3632. {
  3633. struct dp_pdev *pdev = txrx_peer->vdev->pdev;
  3634. struct dp_soc *soc = NULL;
  3635. struct dp_peer_delay_stats *delay_stats = NULL;
  3636. uint8_t tid;
  3637. soc = pdev->soc;
  3638. if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
  3639. return;
  3640. if (!txrx_peer->delay_stats)
  3641. return;
  3642. tid = ts->tid;
  3643. delay_stats = txrx_peer->delay_stats;
  3644. qdf_assert(ring < CDP_MAX_TXRX_CTX);
  3645. /*
  3646. * For non-TID packets use the TID 9
  3647. */
  3648. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3649. tid = CDP_MAX_DATA_TIDS - 1;
  3650. dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
  3651. tx_desc, ts, txrx_peer->vdev);
  3652. }
  3653. #else
  3654. static inline
  3655. void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
  3656. struct dp_tx_desc_s *tx_desc,
  3657. struct hal_tx_completion_status *ts,
  3658. uint8_t ring_id)
  3659. {
  3660. }
  3661. #endif
  3662. #ifdef WLAN_PEER_JITTER
  3663. /**
  3664. * dp_tx_jitter_get_avg_jitter() - compute the average jitter
  3665. * @curr_delay: Current delay
  3666. * @prev_delay: Previous delay
  3667. * @avg_jitter: Average Jitter
  3668. * Return: Newly Computed Average Jitter
  3669. */
  3670. static uint32_t dp_tx_jitter_get_avg_jitter(uint32_t curr_delay,
  3671. uint32_t prev_delay,
  3672. uint32_t avg_jitter)
  3673. {
  3674. uint32_t curr_jitter;
  3675. int32_t jitter_diff;
  3676. curr_jitter = qdf_abs(curr_delay - prev_delay);
  3677. if (!avg_jitter)
  3678. return curr_jitter;
  3679. jitter_diff = curr_jitter - avg_jitter;
  3680. if (jitter_diff < 0)
  3681. avg_jitter = avg_jitter -
  3682. (qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
  3683. else
  3684. avg_jitter = avg_jitter +
  3685. (qdf_abs(jitter_diff) >> DP_AVG_JITTER_WEIGHT_DENOM);
  3686. return avg_jitter;
  3687. }
  3688. /**
  3689. * dp_tx_jitter_get_avg_delay() - compute the average delay
  3690. * @curr_delay: Current delay
  3691. * @avg_delay: Average delay
  3692. * Return: Newly Computed Average Delay
  3693. */
  3694. static uint32_t dp_tx_jitter_get_avg_delay(uint32_t curr_delay,
  3695. uint32_t avg_delay)
  3696. {
  3697. int32_t delay_diff;
  3698. if (!avg_delay)
  3699. return curr_delay;
  3700. delay_diff = curr_delay - avg_delay;
  3701. if (delay_diff < 0)
  3702. avg_delay = avg_delay - (qdf_abs(delay_diff) >>
  3703. DP_AVG_DELAY_WEIGHT_DENOM);
  3704. else
  3705. avg_delay = avg_delay + (qdf_abs(delay_diff) >>
  3706. DP_AVG_DELAY_WEIGHT_DENOM);
  3707. return avg_delay;
  3708. }
  3709. #ifdef WLAN_CONFIG_TX_DELAY
  3710. /**
  3711. * dp_tx_compute_cur_delay() - get the current delay
  3712. * @soc: soc handle
  3713. * @vdev: vdev structure for data path state
  3714. * @ts: Tx completion status
  3715. * @curr_delay: current delay
  3716. * @tx_desc: tx descriptor
  3717. * Return: void
  3718. */
  3719. static
  3720. QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
  3721. struct dp_vdev *vdev,
  3722. struct hal_tx_completion_status *ts,
  3723. uint32_t *curr_delay,
  3724. struct dp_tx_desc_s *tx_desc)
  3725. {
  3726. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  3727. if (soc->arch_ops.dp_tx_compute_hw_delay)
  3728. status = soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
  3729. curr_delay);
  3730. return status;
  3731. }
  3732. #else
  3733. static
  3734. QDF_STATUS dp_tx_compute_cur_delay(struct dp_soc *soc,
  3735. struct dp_vdev *vdev,
  3736. struct hal_tx_completion_status *ts,
  3737. uint32_t *curr_delay,
  3738. struct dp_tx_desc_s *tx_desc)
  3739. {
  3740. int64_t current_timestamp, timestamp_hw_enqueue;
  3741. current_timestamp = qdf_ktime_to_us(qdf_ktime_real_get());
  3742. timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
  3743. *curr_delay = (uint32_t)(current_timestamp - timestamp_hw_enqueue);
  3744. return QDF_STATUS_SUCCESS;
  3745. }
  3746. #endif
  3747. /**
  3748. * dp_tx_compute_tid_jitter() - compute per tid per ring jitter
  3749. * @jitter: per tid per ring jitter stats
  3750. * @ts: Tx completion status
  3751. * @vdev: vdev structure for data path state
  3752. * @tx_desc: tx descriptor
  3753. * Return: void
  3754. */
  3755. static void dp_tx_compute_tid_jitter(struct cdp_peer_tid_stats *jitter,
  3756. struct hal_tx_completion_status *ts,
  3757. struct dp_vdev *vdev,
  3758. struct dp_tx_desc_s *tx_desc)
  3759. {
  3760. uint32_t curr_delay, avg_delay, avg_jitter, prev_delay;
  3761. struct dp_soc *soc = vdev->pdev->soc;
  3762. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  3763. if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
  3764. jitter->tx_drop += 1;
  3765. return;
  3766. }
  3767. status = dp_tx_compute_cur_delay(soc, vdev, ts, &curr_delay,
  3768. tx_desc);
  3769. if (QDF_IS_STATUS_SUCCESS(status)) {
  3770. avg_delay = jitter->tx_avg_delay;
  3771. avg_jitter = jitter->tx_avg_jitter;
  3772. prev_delay = jitter->tx_prev_delay;
  3773. avg_jitter = dp_tx_jitter_get_avg_jitter(curr_delay,
  3774. prev_delay,
  3775. avg_jitter);
  3776. avg_delay = dp_tx_jitter_get_avg_delay(curr_delay, avg_delay);
  3777. jitter->tx_avg_delay = avg_delay;
  3778. jitter->tx_avg_jitter = avg_jitter;
  3779. jitter->tx_prev_delay = curr_delay;
  3780. jitter->tx_total_success += 1;
  3781. } else if (status == QDF_STATUS_E_FAILURE) {
  3782. jitter->tx_avg_err += 1;
  3783. }
  3784. }
  3785. /* dp_tx_update_peer_jitter_stats() - Update the peer jitter stats
  3786. * @txrx_peer: DP peer context
  3787. * @tx_desc: Tx software descriptor
  3788. * @ts: Tx completion status
  3789. * @ring_id: Rx CPU context ID/CPU_ID
  3790. * Return: void
  3791. */
  3792. static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
  3793. struct dp_tx_desc_s *tx_desc,
  3794. struct hal_tx_completion_status *ts,
  3795. uint8_t ring_id)
  3796. {
  3797. struct dp_pdev *pdev = txrx_peer->vdev->pdev;
  3798. struct dp_soc *soc = pdev->soc;
  3799. struct cdp_peer_tid_stats *jitter_stats = NULL;
  3800. uint8_t tid;
  3801. struct cdp_peer_tid_stats *rx_tid = NULL;
  3802. if (qdf_likely(!wlan_cfg_is_peer_jitter_stats_enabled(soc->wlan_cfg_ctx)))
  3803. return;
  3804. tid = ts->tid;
  3805. jitter_stats = txrx_peer->jitter_stats;
  3806. qdf_assert_always(jitter_stats);
  3807. qdf_assert(ring < CDP_MAX_TXRX_CTX);
  3808. /*
  3809. * For non-TID packets use the TID 9
  3810. */
  3811. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3812. tid = CDP_MAX_DATA_TIDS - 1;
  3813. rx_tid = &jitter_stats[tid * CDP_MAX_TXRX_CTX + ring_id];
  3814. dp_tx_compute_tid_jitter(rx_tid,
  3815. ts, txrx_peer->vdev, tx_desc);
  3816. }
  3817. #else
  3818. static void dp_tx_update_peer_jitter_stats(struct dp_txrx_peer *txrx_peer,
  3819. struct dp_tx_desc_s *tx_desc,
  3820. struct hal_tx_completion_status *ts,
  3821. uint8_t ring_id)
  3822. {
  3823. }
  3824. #endif
  3825. #ifdef HW_TX_DELAY_STATS_ENABLE
  3826. /**
  3827. * dp_update_tx_delay_stats() - update the delay stats
  3828. * @vdev: vdev handle
  3829. * @delay: delay in ms or us based on the flag delay_in_us
  3830. * @tid: tid value
  3831. * @mode: type of tx delay mode
  3832. * @ring_id: ring number
  3833. * @delay_in_us: flag to indicate whether the delay is in ms or us
  3834. *
  3835. * Return: none
  3836. */
  3837. static inline
  3838. void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
  3839. uint8_t mode, uint8_t ring_id, bool delay_in_us)
  3840. {
  3841. struct cdp_tid_tx_stats *tstats =
  3842. &vdev->stats.tid_tx_stats[ring_id][tid];
  3843. dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
  3844. delay_in_us);
  3845. }
  3846. #else
  3847. static inline
  3848. void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
  3849. uint8_t mode, uint8_t ring_id, bool delay_in_us)
  3850. {
  3851. struct cdp_tid_tx_stats *tstats =
  3852. &vdev->pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  3853. dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
  3854. delay_in_us);
  3855. }
  3856. #endif
  3857. void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
  3858. uint8_t tid, uint8_t ring_id)
  3859. {
  3860. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  3861. uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
  3862. uint32_t fwhw_transmit_delay_us;
  3863. if (qdf_likely(!vdev->pdev->delay_stats_flag) &&
  3864. qdf_likely(!dp_is_vdev_tx_delay_stats_enabled(vdev)))
  3865. return;
  3866. if (dp_is_vdev_tx_delay_stats_enabled(vdev)) {
  3867. fwhw_transmit_delay_us =
  3868. qdf_ktime_to_us(qdf_ktime_real_get()) -
  3869. qdf_ktime_to_us(tx_desc->timestamp);
  3870. /*
  3871. * Delay between packet enqueued to HW and Tx completion in us
  3872. */
  3873. dp_update_tx_delay_stats(vdev, fwhw_transmit_delay_us, tid,
  3874. CDP_DELAY_STATS_FW_HW_TRANSMIT,
  3875. ring_id, true);
  3876. /*
  3877. * For MCL, only enqueue to completion delay is required
  3878. * so return if the vdev flag is enabled.
  3879. */
  3880. return;
  3881. }
  3882. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  3883. timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
  3884. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  3885. timestamp_hw_enqueue);
  3886. if (!timestamp_hw_enqueue)
  3887. return;
  3888. /*
  3889. * Delay between packet enqueued to HW and Tx completion in ms
  3890. */
  3891. dp_update_tx_delay_stats(vdev, fwhw_transmit_delay, tid,
  3892. CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id,
  3893. false);
  3894. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  3895. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3896. interframe_delay = (uint32_t)(timestamp_ingress -
  3897. vdev->prev_tx_enq_tstamp);
  3898. /*
  3899. * Delay in software enqueue
  3900. */
  3901. dp_update_tx_delay_stats(vdev, sw_enqueue_delay, tid,
  3902. CDP_DELAY_STATS_SW_ENQ, ring_id,
  3903. false);
  3904. /*
  3905. * Update interframe delay stats calculated at hardstart receive point.
  3906. * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
  3907. * interframe delay will not be calculate correctly for 1st frame.
  3908. * On the other side, this will help in avoiding extra per packet check
  3909. * of !vdev->prev_tx_enq_tstamp.
  3910. */
  3911. dp_update_tx_delay_stats(vdev, interframe_delay, tid,
  3912. CDP_DELAY_STATS_TX_INTERFRAME, ring_id,
  3913. false);
  3914. vdev->prev_tx_enq_tstamp = timestamp_ingress;
  3915. }
  3916. #ifdef DISABLE_DP_STATS
  3917. static
  3918. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf,
  3919. struct dp_txrx_peer *txrx_peer,
  3920. uint8_t link_id)
  3921. {
  3922. }
  3923. #else
  3924. static inline void
  3925. dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer,
  3926. uint8_t link_id)
  3927. {
  3928. enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
  3929. DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
  3930. if (subtype != QDF_PROTO_INVALID)
  3931. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype],
  3932. 1, link_id);
  3933. }
  3934. #endif
  3935. #ifndef QCA_ENHANCED_STATS_SUPPORT
  3936. #ifdef DP_PEER_EXTENDED_API
  3937. static inline uint8_t
  3938. dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
  3939. {
  3940. return txrx_peer->mpdu_retry_threshold;
  3941. }
  3942. #else
  3943. static inline uint8_t
  3944. dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
  3945. {
  3946. return 0;
  3947. }
  3948. #endif
  3949. /**
  3950. * dp_tx_update_peer_extd_stats()- Update Tx extended path stats for peer
  3951. *
  3952. * @ts: Tx compltion status
  3953. * @txrx_peer: datapath txrx_peer handle
  3954. * @link_id: Link id
  3955. *
  3956. * Return: void
  3957. */
  3958. static inline void
  3959. dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
  3960. struct dp_txrx_peer *txrx_peer, uint8_t link_id)
  3961. {
  3962. uint8_t mcs, pkt_type, dst_mcs_idx;
  3963. uint8_t retry_threshold = dp_tx_get_mpdu_retry_threshold(txrx_peer);
  3964. mcs = ts->mcs;
  3965. pkt_type = ts->pkt_type;
  3966. /* do HW to SW pkt type conversion */
  3967. pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
  3968. hal_2_dp_pkt_type_map[pkt_type]);
  3969. dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
  3970. if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
  3971. DP_PEER_EXTD_STATS_INC(txrx_peer,
  3972. tx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
  3973. 1, link_id);
  3974. DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1, link_id);
  3975. DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1, link_id);
  3976. DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi,
  3977. link_id);
  3978. DP_PEER_EXTD_STATS_INC(txrx_peer,
  3979. tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1,
  3980. link_id);
  3981. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc, link_id);
  3982. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc, link_id);
  3983. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1,
  3984. link_id);
  3985. if (ts->first_msdu) {
  3986. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1,
  3987. ts->transmit_cnt > 1, link_id);
  3988. if (!retry_threshold)
  3989. return;
  3990. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries,
  3991. qdf_do_div(ts->transmit_cnt,
  3992. retry_threshold),
  3993. ts->transmit_cnt > retry_threshold,
  3994. link_id);
  3995. }
  3996. }
  3997. #else
  3998. static inline void
  3999. dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
  4000. struct dp_txrx_peer *txrx_peer, uint8_t link_id)
  4001. {
  4002. }
  4003. #endif
  4004. #if defined(WLAN_FEATURE_11BE_MLO) && defined(QCA_ENHANCED_STATS_SUPPORT)
  4005. static inline uint8_t
  4006. dp_tx_get_link_id_from_ppdu_id(struct dp_soc *soc,
  4007. struct hal_tx_completion_status *ts,
  4008. struct dp_txrx_peer *txrx_peer,
  4009. struct dp_vdev *vdev)
  4010. {
  4011. uint8_t hw_link_id = 0;
  4012. uint32_t ppdu_id;
  4013. uint8_t link_id_offset, link_id_bits;
  4014. if (!txrx_peer->is_mld_peer || !vdev->pdev->link_peer_stats)
  4015. return 0;
  4016. link_id_offset = soc->link_id_offset;
  4017. link_id_bits = soc->link_id_bits;
  4018. ppdu_id = ts->ppdu_id;
  4019. hw_link_id = ((DP_GET_HW_LINK_ID_FRM_PPDU_ID(ppdu_id, link_id_offset,
  4020. link_id_bits)) + 1);
  4021. if (hw_link_id > DP_MAX_MLO_LINKS) {
  4022. hw_link_id = 0;
  4023. DP_PEER_PER_PKT_STATS_INC(
  4024. txrx_peer,
  4025. tx.inval_link_id_pkt_cnt, 1, hw_link_id);
  4026. }
  4027. return hw_link_id;
  4028. }
  4029. #else
  4030. static inline uint8_t
  4031. dp_tx_get_link_id_from_ppdu_id(struct dp_soc *soc,
  4032. struct hal_tx_completion_status *ts,
  4033. struct dp_txrx_peer *txrx_peer,
  4034. struct dp_vdev *vdev)
  4035. {
  4036. return 0;
  4037. }
  4038. #endif
  4039. /**
  4040. * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
  4041. * per wbm ring
  4042. *
  4043. * @tx_desc: software descriptor head pointer
  4044. * @ts: Tx completion status
  4045. * @txrx_peer: peer handle
  4046. * @ring_id: ring number
  4047. * @link_id: Link id
  4048. *
  4049. * Return: None
  4050. */
  4051. static inline void
  4052. dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
  4053. struct hal_tx_completion_status *ts,
  4054. struct dp_txrx_peer *txrx_peer, uint8_t ring_id,
  4055. uint8_t link_id)
  4056. {
  4057. struct dp_pdev *pdev = txrx_peer->vdev->pdev;
  4058. uint8_t tid = ts->tid;
  4059. uint32_t length;
  4060. struct cdp_tid_tx_stats *tid_stats;
  4061. if (!pdev)
  4062. return;
  4063. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  4064. tid = CDP_MAX_DATA_TIDS - 1;
  4065. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  4066. if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
  4067. dp_err_rl("Release source:%d is not from TQM", ts->release_src);
  4068. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1,
  4069. link_id);
  4070. return;
  4071. }
  4072. length = qdf_nbuf_len(tx_desc->nbuf);
  4073. DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
  4074. if (qdf_unlikely(pdev->delay_stats_flag) ||
  4075. qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(txrx_peer->vdev)))
  4076. dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
  4077. if (ts->status < CDP_MAX_TX_TQM_STATUS) {
  4078. tid_stats->tqm_status_cnt[ts->status]++;
  4079. }
  4080. if (qdf_likely(ts->status == HAL_TX_TQM_RR_FRAME_ACKED)) {
  4081. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1,
  4082. ts->transmit_cnt > 1, link_id);
  4083. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count,
  4084. 1, ts->transmit_cnt > 2, link_id);
  4085. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma,
  4086. link_id);
  4087. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1,
  4088. ts->msdu_part_of_amsdu, link_id);
  4089. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1,
  4090. !ts->msdu_part_of_amsdu, link_id);
  4091. txrx_peer->stats[link_id].per_pkt_stats.tx.last_tx_ts =
  4092. qdf_system_ticks();
  4093. dp_tx_update_peer_extd_stats(ts, txrx_peer, link_id);
  4094. return;
  4095. }
  4096. /*
  4097. * tx_failed is ideally supposed to be updated from HTT ppdu
  4098. * completion stats. But in IPQ807X/IPQ6018 chipsets owing to
  4099. * hw limitation there are no completions for failed cases.
  4100. * Hence updating tx_failed from data path. Please note that
  4101. * if tx_failed is fixed to be from ppdu, then this has to be
  4102. * removed
  4103. */
  4104. DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
  4105. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1,
  4106. ts->transmit_cnt > DP_RETRY_COUNT,
  4107. link_id);
  4108. dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer, link_id);
  4109. if (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) {
  4110. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1,
  4111. link_id);
  4112. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_REM) {
  4113. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.dropped.fw_rem, 1,
  4114. length, link_id);
  4115. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX) {
  4116. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1,
  4117. link_id);
  4118. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TX) {
  4119. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1,
  4120. link_id);
  4121. } else if (ts->status == HAL_TX_TQM_RR_FW_REASON1) {
  4122. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1,
  4123. link_id);
  4124. } else if (ts->status == HAL_TX_TQM_RR_FW_REASON2) {
  4125. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1,
  4126. link_id);
  4127. } else if (ts->status == HAL_TX_TQM_RR_FW_REASON3) {
  4128. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1,
  4129. link_id);
  4130. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE) {
  4131. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  4132. tx.dropped.fw_rem_queue_disable, 1,
  4133. link_id);
  4134. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TILL_NONMATCHING) {
  4135. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  4136. tx.dropped.fw_rem_no_match, 1,
  4137. link_id);
  4138. } else if (ts->status == HAL_TX_TQM_RR_DROP_THRESHOLD) {
  4139. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  4140. tx.dropped.drop_threshold, 1,
  4141. link_id);
  4142. } else if (ts->status == HAL_TX_TQM_RR_LINK_DESC_UNAVAILABLE) {
  4143. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  4144. tx.dropped.drop_link_desc_na, 1,
  4145. link_id);
  4146. } else if (ts->status == HAL_TX_TQM_RR_DROP_OR_INVALID_MSDU) {
  4147. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  4148. tx.dropped.invalid_drop, 1,
  4149. link_id);
  4150. } else if (ts->status == HAL_TX_TQM_RR_MULTICAST_DROP) {
  4151. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  4152. tx.dropped.mcast_vdev_drop, 1,
  4153. link_id);
  4154. } else {
  4155. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1,
  4156. link_id);
  4157. }
  4158. }
  4159. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  4160. /**
  4161. * dp_tx_flow_pool_lock() - take flow pool lock
  4162. * @soc: core txrx main context
  4163. * @tx_desc: tx desc
  4164. *
  4165. * Return: None
  4166. */
  4167. static inline
  4168. void dp_tx_flow_pool_lock(struct dp_soc *soc,
  4169. struct dp_tx_desc_s *tx_desc)
  4170. {
  4171. struct dp_tx_desc_pool_s *pool;
  4172. uint8_t desc_pool_id;
  4173. desc_pool_id = tx_desc->pool_id;
  4174. pool = &soc->tx_desc[desc_pool_id];
  4175. qdf_spin_lock_bh(&pool->flow_pool_lock);
  4176. }
  4177. /**
  4178. * dp_tx_flow_pool_unlock() - release flow pool lock
  4179. * @soc: core txrx main context
  4180. * @tx_desc: tx desc
  4181. *
  4182. * Return: None
  4183. */
  4184. static inline
  4185. void dp_tx_flow_pool_unlock(struct dp_soc *soc,
  4186. struct dp_tx_desc_s *tx_desc)
  4187. {
  4188. struct dp_tx_desc_pool_s *pool;
  4189. uint8_t desc_pool_id;
  4190. desc_pool_id = tx_desc->pool_id;
  4191. pool = &soc->tx_desc[desc_pool_id];
  4192. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  4193. }
  4194. #else
  4195. static inline
  4196. void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  4197. {
  4198. }
  4199. static inline
  4200. void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  4201. {
  4202. }
  4203. #endif
  4204. /**
  4205. * dp_tx_notify_completion() - Notify tx completion for this desc
  4206. * @soc: core txrx main context
  4207. * @vdev: datapath vdev handle
  4208. * @tx_desc: tx desc
  4209. * @netbuf: buffer
  4210. * @status: tx status
  4211. *
  4212. * Return: none
  4213. */
  4214. static inline void dp_tx_notify_completion(struct dp_soc *soc,
  4215. struct dp_vdev *vdev,
  4216. struct dp_tx_desc_s *tx_desc,
  4217. qdf_nbuf_t netbuf,
  4218. uint8_t status)
  4219. {
  4220. void *osif_dev;
  4221. ol_txrx_completion_fp tx_compl_cbk = NULL;
  4222. uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
  4223. qdf_assert(tx_desc);
  4224. if (!vdev ||
  4225. !vdev->osif_vdev) {
  4226. return;
  4227. }
  4228. osif_dev = vdev->osif_vdev;
  4229. tx_compl_cbk = vdev->tx_comp;
  4230. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  4231. flag |= BIT(QDF_TX_RX_STATUS_OK);
  4232. if (tx_compl_cbk)
  4233. tx_compl_cbk(netbuf, osif_dev, flag);
  4234. }
  4235. /**
  4236. * dp_tx_sojourn_stats_process() - Collect sojourn stats
  4237. * @pdev: pdev handle
  4238. * @txrx_peer: DP peer context
  4239. * @tid: tid value
  4240. * @txdesc_ts: timestamp from txdesc
  4241. * @ppdu_id: ppdu id
  4242. * @link_id: link id
  4243. *
  4244. * Return: none
  4245. */
  4246. #ifdef FEATURE_PERPKT_INFO
  4247. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  4248. struct dp_txrx_peer *txrx_peer,
  4249. uint8_t tid,
  4250. uint64_t txdesc_ts,
  4251. uint32_t ppdu_id,
  4252. uint8_t link_id)
  4253. {
  4254. uint64_t delta_ms;
  4255. struct cdp_tx_sojourn_stats *sojourn_stats;
  4256. struct dp_peer *primary_link_peer = NULL;
  4257. struct dp_soc *link_peer_soc = NULL;
  4258. if (qdf_unlikely(!pdev->enhanced_stats_en))
  4259. return;
  4260. if (qdf_unlikely(tid == HTT_INVALID_TID ||
  4261. tid >= CDP_DATA_TID_MAX))
  4262. return;
  4263. if (qdf_unlikely(!pdev->sojourn_buf))
  4264. return;
  4265. primary_link_peer = dp_get_primary_link_peer_by_id(pdev->soc,
  4266. txrx_peer->peer_id,
  4267. DP_MOD_ID_TX_COMP);
  4268. if (qdf_unlikely(!primary_link_peer))
  4269. return;
  4270. sojourn_stats = (struct cdp_tx_sojourn_stats *)
  4271. qdf_nbuf_data(pdev->sojourn_buf);
  4272. link_peer_soc = primary_link_peer->vdev->pdev->soc;
  4273. sojourn_stats->cookie = (void *)
  4274. dp_monitor_peer_get_peerstats_ctx(link_peer_soc,
  4275. primary_link_peer);
  4276. delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) -
  4277. txdesc_ts;
  4278. qdf_ewma_tx_lag_add(&txrx_peer->stats[link_id].per_pkt_stats.tx.avg_sojourn_msdu[tid],
  4279. delta_ms);
  4280. sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
  4281. sojourn_stats->num_msdus[tid] = 1;
  4282. sojourn_stats->avg_sojourn_msdu[tid].internal =
  4283. txrx_peer->stats[link_id].
  4284. per_pkt_stats.tx.avg_sojourn_msdu[tid].internal;
  4285. dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
  4286. pdev->sojourn_buf, HTT_INVALID_PEER,
  4287. WDI_NO_VAL, pdev->pdev_id);
  4288. sojourn_stats->sum_sojourn_msdu[tid] = 0;
  4289. sojourn_stats->num_msdus[tid] = 0;
  4290. sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
  4291. dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_TX_COMP);
  4292. }
  4293. #else
  4294. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  4295. struct dp_txrx_peer *txrx_peer,
  4296. uint8_t tid,
  4297. uint64_t txdesc_ts,
  4298. uint32_t ppdu_id)
  4299. {
  4300. }
  4301. #endif
  4302. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  4303. void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
  4304. struct dp_tx_desc_s *desc,
  4305. struct hal_tx_completion_status *ts)
  4306. {
  4307. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
  4308. desc, ts->peer_id,
  4309. WDI_NO_VAL, desc->pdev->pdev_id);
  4310. }
  4311. #endif
  4312. void
  4313. dp_tx_comp_process_desc(struct dp_soc *soc,
  4314. struct dp_tx_desc_s *desc,
  4315. struct hal_tx_completion_status *ts,
  4316. struct dp_txrx_peer *txrx_peer)
  4317. {
  4318. uint64_t time_latency = 0;
  4319. uint16_t peer_id = DP_INVALID_PEER_ID;
  4320. /*
  4321. * m_copy/tx_capture modes are not supported for
  4322. * scatter gather packets
  4323. */
  4324. if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
  4325. time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
  4326. qdf_ktime_to_ms(desc->timestamp));
  4327. }
  4328. dp_send_completion_to_pkt_capture(soc, desc, ts);
  4329. if (dp_tx_pkt_tracepoints_enabled())
  4330. qdf_trace_dp_packet(desc->nbuf, QDF_TX,
  4331. desc->msdu_ext_desc ?
  4332. desc->msdu_ext_desc->tso_desc : NULL,
  4333. qdf_ktime_to_ms(desc->timestamp));
  4334. if (!(desc->msdu_ext_desc)) {
  4335. dp_tx_enh_unmap(soc, desc);
  4336. if (txrx_peer)
  4337. peer_id = txrx_peer->peer_id;
  4338. if (QDF_STATUS_SUCCESS ==
  4339. dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer_id)) {
  4340. return;
  4341. }
  4342. if (QDF_STATUS_SUCCESS ==
  4343. dp_get_completion_indication_for_stack(soc,
  4344. desc->pdev,
  4345. txrx_peer, ts,
  4346. desc->nbuf,
  4347. time_latency)) {
  4348. dp_send_completion_to_stack(soc,
  4349. desc->pdev,
  4350. ts->peer_id,
  4351. ts->ppdu_id,
  4352. desc->nbuf);
  4353. return;
  4354. }
  4355. }
  4356. desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
  4357. dp_tx_comp_free_buf(soc, desc, false);
  4358. }
  4359. #ifdef DISABLE_DP_STATS
  4360. /**
  4361. * dp_tx_update_connectivity_stats() - update tx connectivity stats
  4362. * @soc: core txrx main context
  4363. * @vdev: virtual device instance
  4364. * @tx_desc: tx desc
  4365. * @status: tx status
  4366. *
  4367. * Return: none
  4368. */
  4369. static inline
  4370. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  4371. struct dp_vdev *vdev,
  4372. struct dp_tx_desc_s *tx_desc,
  4373. uint8_t status)
  4374. {
  4375. }
  4376. #else
  4377. static inline
  4378. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  4379. struct dp_vdev *vdev,
  4380. struct dp_tx_desc_s *tx_desc,
  4381. uint8_t status)
  4382. {
  4383. void *osif_dev;
  4384. ol_txrx_stats_rx_fp stats_cbk;
  4385. uint8_t pkt_type;
  4386. qdf_assert(tx_desc);
  4387. if (!vdev ||
  4388. !vdev->osif_vdev ||
  4389. !vdev->stats_cb)
  4390. return;
  4391. osif_dev = vdev->osif_vdev;
  4392. stats_cbk = vdev->stats_cb;
  4393. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
  4394. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  4395. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
  4396. &pkt_type);
  4397. }
  4398. #endif
  4399. #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
  4400. /* Mask for bit29 ~ bit31 */
  4401. #define DP_TX_TS_BIT29_31_MASK 0xE0000000
  4402. /* Timestamp value (unit us) if bit29 is set */
  4403. #define DP_TX_TS_BIT29_SET_VALUE BIT(29)
  4404. /**
  4405. * dp_tx_adjust_enqueue_buffer_ts() - adjust the enqueue buffer_timestamp
  4406. * @ack_ts: OTA ack timestamp, unit us.
  4407. * @enqueue_ts: TCL enqueue TX data to TQM timestamp, unit us.
  4408. * @base_delta_ts: base timestamp delta for ack_ts and enqueue_ts
  4409. *
  4410. * this function will restore the bit29 ~ bit31 3 bits value for
  4411. * buffer_timestamp in wbm2sw ring entry, currently buffer_timestamp only
  4412. * can support 0x7FFF * 1024 us (29 bits), but if the timestamp is >
  4413. * 0x7FFF * 1024 us, bit29~ bit31 will be lost.
  4414. *
  4415. * Return: the adjusted buffer_timestamp value
  4416. */
  4417. static inline
  4418. uint32_t dp_tx_adjust_enqueue_buffer_ts(uint32_t ack_ts,
  4419. uint32_t enqueue_ts,
  4420. uint32_t base_delta_ts)
  4421. {
  4422. uint32_t ack_buffer_ts;
  4423. uint32_t ack_buffer_ts_bit29_31;
  4424. uint32_t adjusted_enqueue_ts;
  4425. /* corresponding buffer_timestamp value when receive OTA Ack */
  4426. ack_buffer_ts = ack_ts - base_delta_ts;
  4427. ack_buffer_ts_bit29_31 = ack_buffer_ts & DP_TX_TS_BIT29_31_MASK;
  4428. /* restore the bit29 ~ bit31 value */
  4429. adjusted_enqueue_ts = ack_buffer_ts_bit29_31 | enqueue_ts;
  4430. /*
  4431. * if actual enqueue_ts value occupied 29 bits only, this enqueue_ts
  4432. * value + real UL delay overflow 29 bits, then 30th bit (bit-29)
  4433. * should not be marked, otherwise extra 0x20000000 us is added to
  4434. * enqueue_ts.
  4435. */
  4436. if (qdf_unlikely(adjusted_enqueue_ts > ack_buffer_ts))
  4437. adjusted_enqueue_ts -= DP_TX_TS_BIT29_SET_VALUE;
  4438. return adjusted_enqueue_ts;
  4439. }
  4440. QDF_STATUS
  4441. dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
  4442. uint32_t delta_tsf,
  4443. uint32_t *delay_us)
  4444. {
  4445. uint32_t buffer_ts;
  4446. uint32_t delay;
  4447. if (!delay_us)
  4448. return QDF_STATUS_E_INVAL;
  4449. /* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
  4450. if (!ts->valid)
  4451. return QDF_STATUS_E_INVAL;
  4452. /* buffer_timestamp is in units of 1024 us and is [31:13] of
  4453. * WBM_RELEASE_RING_4. After left shift 10 bits, it's
  4454. * valid up to 29 bits.
  4455. */
  4456. buffer_ts = ts->buffer_timestamp << 10;
  4457. buffer_ts = dp_tx_adjust_enqueue_buffer_ts(ts->tsf,
  4458. buffer_ts, delta_tsf);
  4459. delay = ts->tsf - buffer_ts - delta_tsf;
  4460. if (qdf_unlikely(delay & 0x80000000)) {
  4461. dp_err_rl("delay = 0x%x (-ve)\n"
  4462. "release_src = %d\n"
  4463. "ppdu_id = 0x%x\n"
  4464. "peer_id = 0x%x\n"
  4465. "tid = 0x%x\n"
  4466. "release_reason = %d\n"
  4467. "tsf = %u (0x%x)\n"
  4468. "buffer_timestamp = %u (0x%x)\n"
  4469. "delta_tsf = %u (0x%x)\n",
  4470. delay, ts->release_src, ts->ppdu_id, ts->peer_id,
  4471. ts->tid, ts->status, ts->tsf, ts->tsf,
  4472. ts->buffer_timestamp, ts->buffer_timestamp,
  4473. delta_tsf, delta_tsf);
  4474. delay = 0;
  4475. goto end;
  4476. }
  4477. delay &= 0x1FFFFFFF; /* mask 29 BITS */
  4478. if (delay > 0x1000000) {
  4479. dp_info_rl("----------------------\n"
  4480. "Tx completion status:\n"
  4481. "----------------------\n"
  4482. "release_src = %d\n"
  4483. "ppdu_id = 0x%x\n"
  4484. "release_reason = %d\n"
  4485. "tsf = %u (0x%x)\n"
  4486. "buffer_timestamp = %u (0x%x)\n"
  4487. "delta_tsf = %u (0x%x)\n",
  4488. ts->release_src, ts->ppdu_id, ts->status,
  4489. ts->tsf, ts->tsf, ts->buffer_timestamp,
  4490. ts->buffer_timestamp, delta_tsf, delta_tsf);
  4491. return QDF_STATUS_E_FAILURE;
  4492. }
  4493. end:
  4494. *delay_us = delay;
  4495. return QDF_STATUS_SUCCESS;
  4496. }
  4497. void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  4498. uint32_t delta_tsf)
  4499. {
  4500. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4501. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  4502. DP_MOD_ID_CDP);
  4503. if (!vdev) {
  4504. dp_err_rl("vdev %d does not exist", vdev_id);
  4505. return;
  4506. }
  4507. vdev->delta_tsf = delta_tsf;
  4508. dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
  4509. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  4510. }
  4511. #endif
  4512. #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
  4513. QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
  4514. uint8_t vdev_id, bool enable)
  4515. {
  4516. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4517. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  4518. DP_MOD_ID_CDP);
  4519. if (!vdev) {
  4520. dp_err_rl("vdev %d does not exist", vdev_id);
  4521. return QDF_STATUS_E_FAILURE;
  4522. }
  4523. qdf_atomic_set(&vdev->ul_delay_report, enable);
  4524. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  4525. return QDF_STATUS_SUCCESS;
  4526. }
  4527. QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  4528. uint32_t *val)
  4529. {
  4530. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4531. struct dp_vdev *vdev;
  4532. uint32_t delay_accum;
  4533. uint32_t pkts_accum;
  4534. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
  4535. if (!vdev) {
  4536. dp_err_rl("vdev %d does not exist", vdev_id);
  4537. return QDF_STATUS_E_FAILURE;
  4538. }
  4539. if (!qdf_atomic_read(&vdev->ul_delay_report)) {
  4540. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  4541. return QDF_STATUS_E_FAILURE;
  4542. }
  4543. /* Average uplink delay based on current accumulated values */
  4544. delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
  4545. pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
  4546. *val = delay_accum / pkts_accum;
  4547. dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
  4548. delay_accum, pkts_accum);
  4549. /* Reset accumulated values to 0 */
  4550. qdf_atomic_set(&vdev->ul_delay_accum, 0);
  4551. qdf_atomic_set(&vdev->ul_pkts_accum, 0);
  4552. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  4553. return QDF_STATUS_SUCCESS;
  4554. }
  4555. static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
  4556. struct hal_tx_completion_status *ts)
  4557. {
  4558. uint32_t ul_delay;
  4559. if (qdf_unlikely(!vdev)) {
  4560. dp_info_rl("vdev is null or delete in progress");
  4561. return;
  4562. }
  4563. if (!qdf_atomic_read(&vdev->ul_delay_report))
  4564. return;
  4565. if (QDF_IS_STATUS_ERROR(dp_tx_compute_hw_delay_us(ts,
  4566. vdev->delta_tsf,
  4567. &ul_delay)))
  4568. return;
  4569. ul_delay /= 1000; /* in unit of ms */
  4570. qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
  4571. qdf_atomic_inc(&vdev->ul_pkts_accum);
  4572. }
  4573. #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
  4574. static inline
  4575. void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
  4576. struct hal_tx_completion_status *ts)
  4577. {
  4578. }
  4579. #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
  4580. void dp_tx_comp_process_tx_status(struct dp_soc *soc,
  4581. struct dp_tx_desc_s *tx_desc,
  4582. struct hal_tx_completion_status *ts,
  4583. struct dp_txrx_peer *txrx_peer,
  4584. uint8_t ring_id)
  4585. {
  4586. uint32_t length;
  4587. qdf_ether_header_t *eh;
  4588. struct dp_vdev *vdev = NULL;
  4589. qdf_nbuf_t nbuf = tx_desc->nbuf;
  4590. enum qdf_dp_tx_rx_status dp_status;
  4591. uint8_t link_id = 0;
  4592. if (!nbuf) {
  4593. dp_info_rl("invalid tx descriptor. nbuf NULL");
  4594. goto out;
  4595. }
  4596. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  4597. length = dp_tx_get_pkt_len(tx_desc);
  4598. dp_status = dp_tx_hw_to_qdf(ts->status);
  4599. DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
  4600. QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
  4601. QDF_TRACE_DEFAULT_PDEV_ID,
  4602. qdf_nbuf_data_addr(nbuf),
  4603. sizeof(qdf_nbuf_data(nbuf)),
  4604. tx_desc->id, ts->status, dp_status));
  4605. dp_tx_comp_debug("-------------------- \n"
  4606. "Tx Completion Stats: \n"
  4607. "-------------------- \n"
  4608. "ack_frame_rssi = %d \n"
  4609. "first_msdu = %d \n"
  4610. "last_msdu = %d \n"
  4611. "msdu_part_of_amsdu = %d \n"
  4612. "rate_stats valid = %d \n"
  4613. "bw = %d \n"
  4614. "pkt_type = %d \n"
  4615. "stbc = %d \n"
  4616. "ldpc = %d \n"
  4617. "sgi = %d \n"
  4618. "mcs = %d \n"
  4619. "ofdma = %d \n"
  4620. "tones_in_ru = %d \n"
  4621. "tsf = %d \n"
  4622. "ppdu_id = %d \n"
  4623. "transmit_cnt = %d \n"
  4624. "tid = %d \n"
  4625. "peer_id = %d\n"
  4626. "tx_status = %d\n",
  4627. ts->ack_frame_rssi, ts->first_msdu,
  4628. ts->last_msdu, ts->msdu_part_of_amsdu,
  4629. ts->valid, ts->bw, ts->pkt_type, ts->stbc,
  4630. ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
  4631. ts->tones_in_ru, ts->tsf, ts->ppdu_id,
  4632. ts->transmit_cnt, ts->tid, ts->peer_id,
  4633. ts->status);
  4634. /* Update SoC level stats */
  4635. DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
  4636. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  4637. if (!txrx_peer) {
  4638. dp_info_rl("peer is null or deletion in progress");
  4639. DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
  4640. goto out;
  4641. }
  4642. vdev = txrx_peer->vdev;
  4643. link_id = dp_tx_get_link_id_from_ppdu_id(soc, ts, txrx_peer, vdev);
  4644. dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
  4645. dp_tx_update_uplink_delay(soc, vdev, ts);
  4646. /* check tx complete notification */
  4647. if (qdf_nbuf_tx_notify_comp_get(nbuf))
  4648. dp_tx_notify_completion(soc, vdev, tx_desc,
  4649. nbuf, ts->status);
  4650. /* Update per-packet stats for mesh mode */
  4651. if (qdf_unlikely(vdev->mesh_vdev) &&
  4652. !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
  4653. dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
  4654. /* Update peer level stats */
  4655. if (qdf_unlikely(txrx_peer->bss_peer &&
  4656. vdev->opmode == wlan_op_mode_ap)) {
  4657. if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
  4658. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
  4659. length, link_id);
  4660. if (txrx_peer->vdev->tx_encap_type ==
  4661. htt_cmn_pkt_type_ethernet &&
  4662. QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  4663. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  4664. tx.bcast, 1,
  4665. length, link_id);
  4666. }
  4667. }
  4668. } else {
  4669. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length,
  4670. link_id);
  4671. if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
  4672. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success,
  4673. 1, length, link_id);
  4674. if (qdf_unlikely(txrx_peer->in_twt)) {
  4675. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  4676. tx.tx_success_twt,
  4677. 1, length,
  4678. link_id);
  4679. }
  4680. }
  4681. }
  4682. dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id, link_id);
  4683. dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts, ring_id);
  4684. dp_tx_update_peer_jitter_stats(txrx_peer, tx_desc, ts, ring_id);
  4685. dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc,
  4686. ts, ts->tid);
  4687. dp_tx_send_pktlog(soc, vdev->pdev, tx_desc, nbuf, dp_status);
  4688. #ifdef QCA_SUPPORT_RDK_STATS
  4689. if (soc->peerstats_enabled)
  4690. dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
  4691. qdf_ktime_to_ms(tx_desc->timestamp),
  4692. ts->ppdu_id, link_id);
  4693. #endif
  4694. out:
  4695. return;
  4696. }
  4697. #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \
  4698. defined(QCA_ENHANCED_STATS_SUPPORT)
  4699. void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
  4700. uint32_t length, uint8_t tx_status,
  4701. bool update)
  4702. {
  4703. if (update || (!txrx_peer->hw_txrx_stats_en)) {
  4704. DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
  4705. if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
  4706. DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
  4707. }
  4708. }
  4709. #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
  4710. void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
  4711. uint32_t length, uint8_t tx_status,
  4712. bool update)
  4713. {
  4714. if (!txrx_peer->hw_txrx_stats_en) {
  4715. DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
  4716. if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
  4717. DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
  4718. }
  4719. }
  4720. #else
  4721. void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
  4722. uint32_t length, uint8_t tx_status,
  4723. bool update)
  4724. {
  4725. DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
  4726. if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
  4727. DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
  4728. }
  4729. #endif
  4730. /**
  4731. * dp_tx_prefetch_next_nbuf_data(): Prefetch nbuf and nbuf data
  4732. * @next: descriptor of the nrxt buffer
  4733. *
  4734. * Return: none
  4735. */
  4736. #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
  4737. static inline
  4738. void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
  4739. {
  4740. qdf_nbuf_t nbuf = NULL;
  4741. if (next)
  4742. nbuf = next->nbuf;
  4743. if (nbuf)
  4744. qdf_prefetch(nbuf);
  4745. }
  4746. #else
  4747. static inline
  4748. void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
  4749. {
  4750. }
  4751. #endif
  4752. /**
  4753. * dp_tx_mcast_reinject_handler() - Tx reinjected multicast packets handler
  4754. * @soc: core txrx main context
  4755. * @desc: software descriptor
  4756. *
  4757. * Return: true when packet is reinjected
  4758. */
  4759. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  4760. defined(WLAN_MCAST_MLO) && !defined(CONFIG_MLO_SINGLE_DEV)
  4761. static inline bool
  4762. dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  4763. {
  4764. struct dp_vdev *vdev = NULL;
  4765. if (desc->tx_status == HAL_TX_TQM_RR_MULTICAST_DROP) {
  4766. if (!soc->arch_ops.dp_tx_mcast_handler ||
  4767. !soc->arch_ops.dp_tx_is_mcast_primary)
  4768. return false;
  4769. vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
  4770. DP_MOD_ID_REINJECT);
  4771. if (qdf_unlikely(!vdev)) {
  4772. dp_tx_comp_info_rl("Unable to get vdev ref %d",
  4773. desc->id);
  4774. return false;
  4775. }
  4776. if (!(soc->arch_ops.dp_tx_is_mcast_primary(soc, vdev))) {
  4777. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
  4778. return false;
  4779. }
  4780. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  4781. qdf_nbuf_len(desc->nbuf));
  4782. soc->arch_ops.dp_tx_mcast_handler(soc, vdev, desc->nbuf);
  4783. dp_tx_desc_release(desc, desc->pool_id);
  4784. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
  4785. return true;
  4786. }
  4787. return false;
  4788. }
  4789. #else
  4790. static inline bool
  4791. dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  4792. {
  4793. return false;
  4794. }
  4795. #endif
  4796. #ifdef QCA_DP_TX_NBUF_LIST_FREE
  4797. static inline void
  4798. dp_tx_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head)
  4799. {
  4800. qdf_nbuf_queue_head_init(nbuf_queue_head);
  4801. }
  4802. static inline void
  4803. dp_tx_nbuf_dev_queue_free(qdf_nbuf_queue_head_t *nbuf_queue_head,
  4804. struct dp_tx_desc_s *desc)
  4805. {
  4806. qdf_nbuf_t nbuf = NULL;
  4807. nbuf = desc->nbuf;
  4808. if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_FAST))
  4809. qdf_nbuf_dev_queue_head(nbuf_queue_head, nbuf);
  4810. else
  4811. qdf_nbuf_free(nbuf);
  4812. }
  4813. static inline void
  4814. dp_tx_nbuf_dev_queue_free_no_flag(qdf_nbuf_queue_head_t *nbuf_queue_head,
  4815. qdf_nbuf_t nbuf)
  4816. {
  4817. if (!nbuf)
  4818. return;
  4819. if (nbuf->is_from_recycler)
  4820. qdf_nbuf_dev_queue_head(nbuf_queue_head, nbuf);
  4821. else
  4822. qdf_nbuf_free(nbuf);
  4823. }
  4824. static inline void
  4825. dp_tx_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head)
  4826. {
  4827. qdf_nbuf_dev_kfree_list(nbuf_queue_head);
  4828. }
  4829. #else
  4830. static inline void
  4831. dp_tx_nbuf_queue_head_init(qdf_nbuf_queue_head_t *nbuf_queue_head)
  4832. {
  4833. }
  4834. static inline void
  4835. dp_tx_nbuf_dev_queue_free(qdf_nbuf_queue_head_t *nbuf_queue_head,
  4836. struct dp_tx_desc_s *desc)
  4837. {
  4838. qdf_nbuf_free(desc->nbuf);
  4839. }
  4840. static inline void
  4841. dp_tx_nbuf_dev_queue_free_no_flag(qdf_nbuf_queue_head_t *nbuf_queue_head,
  4842. qdf_nbuf_t nbuf)
  4843. {
  4844. qdf_nbuf_free(nbuf);
  4845. }
  4846. static inline void
  4847. dp_tx_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head)
  4848. {
  4849. }
  4850. #endif
  4851. #ifdef WLAN_SUPPORT_PPEDS
  4852. static inline void
  4853. dp_tx_update_ppeds_tx_comp_stats(struct dp_soc *soc,
  4854. struct dp_txrx_peer *txrx_peer,
  4855. struct hal_tx_completion_status *ts,
  4856. struct dp_tx_desc_s *desc,
  4857. uint8_t ring_id)
  4858. {
  4859. uint8_t link_id = 0;
  4860. struct dp_vdev *vdev = NULL;
  4861. if (qdf_likely(txrx_peer)) {
  4862. dp_tx_update_peer_basic_stats(txrx_peer,
  4863. desc->length,
  4864. desc->tx_status,
  4865. false);
  4866. if (!(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
  4867. hal_tx_comp_get_status(&desc->comp,
  4868. ts,
  4869. soc->hal_soc);
  4870. vdev = txrx_peer->vdev;
  4871. link_id = dp_tx_get_link_id_from_ppdu_id(soc,
  4872. ts,
  4873. txrx_peer,
  4874. vdev);
  4875. if (link_id < 1 || link_id > DP_MAX_MLO_LINKS)
  4876. link_id = 0;
  4877. dp_tx_update_peer_stats(desc, ts,
  4878. txrx_peer,
  4879. ring_id,
  4880. link_id);
  4881. }
  4882. }
  4883. }
  4884. #else
  4885. static inline void
  4886. dp_tx_update_ppeds_tx_comp_stats(struct dp_soc *soc,
  4887. struct dp_txrx_peer *txrx_peer,
  4888. struct hal_tx_completion_status *ts,
  4889. struct dp_tx_desc_s *desc,
  4890. uint8_t ring_id)
  4891. {
  4892. }
  4893. #endif
  4894. void
  4895. dp_tx_comp_process_desc_list(struct dp_soc *soc,
  4896. struct dp_tx_desc_s *comp_head, uint8_t ring_id)
  4897. {
  4898. struct dp_tx_desc_s *desc;
  4899. struct dp_tx_desc_s *next;
  4900. struct hal_tx_completion_status ts;
  4901. struct dp_txrx_peer *txrx_peer = NULL;
  4902. uint16_t peer_id = DP_INVALID_PEER;
  4903. dp_txrx_ref_handle txrx_ref_handle = NULL;
  4904. qdf_nbuf_queue_head_t h;
  4905. desc = comp_head;
  4906. dp_tx_nbuf_queue_head_init(&h);
  4907. while (desc) {
  4908. next = desc->next;
  4909. dp_tx_prefetch_next_nbuf_data(next);
  4910. if (peer_id != desc->peer_id) {
  4911. if (txrx_peer)
  4912. dp_txrx_peer_unref_delete(txrx_ref_handle,
  4913. DP_MOD_ID_TX_COMP);
  4914. peer_id = desc->peer_id;
  4915. txrx_peer =
  4916. dp_txrx_peer_get_ref_by_id(soc, peer_id,
  4917. &txrx_ref_handle,
  4918. DP_MOD_ID_TX_COMP);
  4919. }
  4920. if (dp_tx_mcast_reinject_handler(soc, desc)) {
  4921. desc = next;
  4922. continue;
  4923. }
  4924. if (desc->flags & DP_TX_DESC_FLAG_PPEDS) {
  4925. qdf_nbuf_t nbuf;
  4926. dp_tx_update_ppeds_tx_comp_stats(soc, txrx_peer, &ts,
  4927. desc, ring_id);
  4928. nbuf = dp_ppeds_tx_desc_free(soc, desc);
  4929. dp_tx_nbuf_dev_queue_free_no_flag(&h, nbuf);
  4930. desc = next;
  4931. continue;
  4932. }
  4933. if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
  4934. struct dp_pdev *pdev = desc->pdev;
  4935. if (qdf_likely(txrx_peer))
  4936. dp_tx_update_peer_basic_stats(txrx_peer,
  4937. desc->length,
  4938. desc->tx_status,
  4939. false);
  4940. qdf_assert(pdev);
  4941. dp_tx_outstanding_dec(pdev);
  4942. /*
  4943. * Calling a QDF WRAPPER here is creating significant
  4944. * performance impact so avoided the wrapper call here
  4945. */
  4946. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
  4947. desc->id, DP_TX_COMP_UNMAP);
  4948. dp_tx_nbuf_unmap(soc, desc);
  4949. dp_tx_nbuf_dev_queue_free(&h, desc);
  4950. dp_tx_desc_free(soc, desc, desc->pool_id);
  4951. desc = next;
  4952. continue;
  4953. }
  4954. hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
  4955. dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer,
  4956. ring_id);
  4957. dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer);
  4958. dp_tx_desc_release(desc, desc->pool_id);
  4959. desc = next;
  4960. }
  4961. dp_tx_nbuf_dev_kfree_list(&h);
  4962. if (txrx_peer)
  4963. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
  4964. }
  4965. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  4966. static inline
  4967. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
  4968. int max_reap_limit)
  4969. {
  4970. bool limit_hit = false;
  4971. limit_hit =
  4972. (num_reaped >= max_reap_limit) ? true : false;
  4973. if (limit_hit)
  4974. DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
  4975. return limit_hit;
  4976. }
  4977. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  4978. {
  4979. return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
  4980. }
  4981. static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
  4982. {
  4983. struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
  4984. return cfg->tx_comp_loop_pkt_limit;
  4985. }
  4986. #else
  4987. static inline
  4988. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
  4989. int max_reap_limit)
  4990. {
  4991. return false;
  4992. }
  4993. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  4994. {
  4995. return false;
  4996. }
  4997. static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
  4998. {
  4999. return 0;
  5000. }
  5001. #endif
  5002. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  5003. static inline int
  5004. dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
  5005. int *max_reap_limit)
  5006. {
  5007. return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
  5008. max_reap_limit);
  5009. }
  5010. #else
  5011. static inline int
  5012. dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
  5013. int *max_reap_limit)
  5014. {
  5015. return 0;
  5016. }
  5017. #endif
  5018. #ifdef DP_TX_TRACKING
  5019. void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
  5020. {
  5021. if ((tx_desc->magic != DP_TX_MAGIC_PATTERN_INUSE) &&
  5022. (tx_desc->magic != DP_TX_MAGIC_PATTERN_FREE)) {
  5023. dp_err_rl("tx_desc %u is corrupted", tx_desc->id);
  5024. qdf_trigger_self_recovery(NULL, QDF_TX_DESC_LEAK);
  5025. }
  5026. }
  5027. #endif
  5028. #ifndef WLAN_SOFTUMAC_SUPPORT
  5029. uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  5030. hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
  5031. uint32_t quota)
  5032. {
  5033. void *tx_comp_hal_desc;
  5034. void *last_prefetched_hw_desc = NULL;
  5035. struct dp_tx_desc_s *last_prefetched_sw_desc = NULL;
  5036. hal_soc_handle_t hal_soc;
  5037. uint8_t buffer_src;
  5038. struct dp_tx_desc_s *tx_desc = NULL;
  5039. struct dp_tx_desc_s *head_desc = NULL;
  5040. struct dp_tx_desc_s *tail_desc = NULL;
  5041. uint32_t num_processed = 0;
  5042. uint32_t count;
  5043. uint32_t num_avail_for_reap = 0;
  5044. bool force_break = false;
  5045. struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
  5046. int max_reap_limit, ring_near_full;
  5047. uint32_t num_entries;
  5048. DP_HIST_INIT();
  5049. num_entries = hal_srng_get_num_entries(soc->hal_soc, hal_ring_hdl);
  5050. more_data:
  5051. hal_soc = soc->hal_soc;
  5052. /* Re-initialize local variables to be re-used */
  5053. head_desc = NULL;
  5054. tail_desc = NULL;
  5055. count = 0;
  5056. max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
  5057. ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
  5058. &max_reap_limit);
  5059. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  5060. dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
  5061. return 0;
  5062. }
  5063. if (!num_avail_for_reap)
  5064. num_avail_for_reap = hal_srng_dst_num_valid(hal_soc,
  5065. hal_ring_hdl, 0);
  5066. if (num_avail_for_reap >= quota)
  5067. num_avail_for_reap = quota;
  5068. dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
  5069. last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
  5070. hal_ring_hdl,
  5071. num_avail_for_reap);
  5072. /* Find head descriptor from completion ring */
  5073. while (qdf_likely(num_avail_for_reap--)) {
  5074. tx_comp_hal_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
  5075. if (qdf_unlikely(!tx_comp_hal_desc))
  5076. break;
  5077. buffer_src = hal_tx_comp_get_buffer_source(hal_soc,
  5078. tx_comp_hal_desc);
  5079. /* If this buffer was not released by TQM or FW, then it is not
  5080. * Tx completion indication, assert */
  5081. if (qdf_unlikely(buffer_src !=
  5082. HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
  5083. (qdf_unlikely(buffer_src !=
  5084. HAL_TX_COMP_RELEASE_SOURCE_FW))) {
  5085. uint8_t wbm_internal_error;
  5086. dp_err_rl(
  5087. "Tx comp release_src != TQM | FW but from %d",
  5088. buffer_src);
  5089. hal_dump_comp_desc(tx_comp_hal_desc);
  5090. DP_STATS_INC(soc, tx.invalid_release_source, 1);
  5091. /* When WBM sees NULL buffer_addr_info in any of
  5092. * ingress rings it sends an error indication,
  5093. * with wbm_internal_error=1, to a specific ring.
  5094. * The WBM2SW ring used to indicate these errors is
  5095. * fixed in HW, and that ring is being used as Tx
  5096. * completion ring. These errors are not related to
  5097. * Tx completions, and should just be ignored
  5098. */
  5099. wbm_internal_error = hal_get_wbm_internal_error(
  5100. hal_soc,
  5101. tx_comp_hal_desc);
  5102. if (wbm_internal_error) {
  5103. dp_err_rl("Tx comp wbm_internal_error!!");
  5104. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
  5105. if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
  5106. buffer_src)
  5107. dp_handle_wbm_internal_error(
  5108. soc,
  5109. tx_comp_hal_desc,
  5110. hal_tx_comp_get_buffer_type(
  5111. tx_comp_hal_desc));
  5112. } else {
  5113. dp_err_rl("Tx comp wbm_internal_error false");
  5114. DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
  5115. }
  5116. continue;
  5117. }
  5118. soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
  5119. tx_comp_hal_desc,
  5120. &tx_desc);
  5121. if (qdf_unlikely(!tx_desc)) {
  5122. dp_err("unable to retrieve tx_desc!");
  5123. hal_dump_comp_desc(tx_comp_hal_desc);
  5124. DP_STATS_INC(soc, tx.invalid_tx_comp_desc, 1);
  5125. QDF_BUG(0);
  5126. continue;
  5127. }
  5128. tx_desc->buffer_src = buffer_src;
  5129. if (tx_desc->flags & DP_TX_DESC_FLAG_PPEDS)
  5130. goto add_to_pool2;
  5131. /*
  5132. * If the release source is FW, process the HTT status
  5133. */
  5134. if (qdf_unlikely(buffer_src ==
  5135. HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  5136. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  5137. hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
  5138. htt_tx_status);
  5139. /* Collect hw completion contents */
  5140. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  5141. &tx_desc->comp, 1);
  5142. soc->arch_ops.dp_tx_process_htt_completion(
  5143. soc,
  5144. tx_desc,
  5145. htt_tx_status,
  5146. ring_id);
  5147. } else {
  5148. tx_desc->tx_status =
  5149. hal_tx_comp_get_tx_status(tx_comp_hal_desc);
  5150. tx_desc->buffer_src = buffer_src;
  5151. /*
  5152. * If the fast completion mode is enabled extended
  5153. * metadata from descriptor is not copied
  5154. */
  5155. if (qdf_likely(tx_desc->flags &
  5156. DP_TX_DESC_FLAG_SIMPLE))
  5157. goto add_to_pool;
  5158. /*
  5159. * If the descriptor is already freed in vdev_detach,
  5160. * continue to next descriptor
  5161. */
  5162. if (qdf_unlikely
  5163. ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
  5164. !tx_desc->flags)) {
  5165. dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
  5166. tx_desc->id);
  5167. DP_STATS_INC(soc, tx.tx_comp_exception, 1);
  5168. dp_tx_desc_check_corruption(tx_desc);
  5169. continue;
  5170. }
  5171. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  5172. dp_tx_comp_info_rl("pdev in down state %d",
  5173. tx_desc->id);
  5174. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  5175. dp_tx_comp_free_buf(soc, tx_desc, false);
  5176. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  5177. goto next_desc;
  5178. }
  5179. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  5180. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  5181. dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
  5182. tx_desc->flags, tx_desc->id);
  5183. qdf_assert_always(0);
  5184. }
  5185. /* Collect hw completion contents */
  5186. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  5187. &tx_desc->comp, 1);
  5188. add_to_pool:
  5189. DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
  5190. add_to_pool2:
  5191. /* First ring descriptor on the cycle */
  5192. if (!head_desc) {
  5193. head_desc = tx_desc;
  5194. tail_desc = tx_desc;
  5195. }
  5196. tail_desc->next = tx_desc;
  5197. tx_desc->next = NULL;
  5198. tail_desc = tx_desc;
  5199. }
  5200. next_desc:
  5201. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  5202. /*
  5203. * Processed packet count is more than given quota
  5204. * stop to processing
  5205. */
  5206. count++;
  5207. dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
  5208. num_avail_for_reap,
  5209. hal_ring_hdl,
  5210. &last_prefetched_hw_desc,
  5211. &last_prefetched_sw_desc);
  5212. if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
  5213. break;
  5214. }
  5215. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  5216. /* Process the reaped descriptors */
  5217. if (head_desc)
  5218. dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
  5219. DP_STATS_INC(soc, tx.tx_comp[ring_id], count);
  5220. /*
  5221. * If we are processing in near-full condition, there are 3 scenario
  5222. * 1) Ring entries has reached critical state
  5223. * 2) Ring entries are still near high threshold
  5224. * 3) Ring entries are below the safe level
  5225. *
  5226. * One more loop will move the state to normal processing and yield
  5227. */
  5228. if (ring_near_full)
  5229. goto more_data;
  5230. if (dp_tx_comp_enable_eol_data_check(soc)) {
  5231. if (num_processed >= quota)
  5232. force_break = true;
  5233. if (!force_break &&
  5234. hal_srng_dst_peek_sync_locked(soc->hal_soc,
  5235. hal_ring_hdl)) {
  5236. DP_STATS_INC(soc, tx.hp_oos2, 1);
  5237. if (!hif_exec_should_yield(soc->hif_handle,
  5238. int_ctx->dp_intr_id))
  5239. goto more_data;
  5240. num_avail_for_reap =
  5241. hal_srng_dst_num_valid_locked(soc->hal_soc,
  5242. hal_ring_hdl,
  5243. true);
  5244. if (qdf_unlikely(num_entries &&
  5245. (num_avail_for_reap >=
  5246. num_entries >> 1))) {
  5247. DP_STATS_INC(soc, tx.near_full, 1);
  5248. goto more_data;
  5249. }
  5250. }
  5251. }
  5252. DP_TX_HIST_STATS_PER_PDEV();
  5253. return num_processed;
  5254. }
  5255. #endif
  5256. #ifdef FEATURE_WLAN_TDLS
  5257. qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  5258. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
  5259. {
  5260. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  5261. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  5262. DP_MOD_ID_TDLS);
  5263. if (!vdev) {
  5264. dp_err("vdev handle for id %d is NULL", vdev_id);
  5265. return NULL;
  5266. }
  5267. if (tx_spec & OL_TX_SPEC_NO_FREE)
  5268. vdev->is_tdls_frame = true;
  5269. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  5270. return dp_tx_send(soc_hdl, vdev_id, msdu_list);
  5271. }
  5272. #endif
  5273. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
  5274. {
  5275. int pdev_id;
  5276. /*
  5277. * Fill HTT TCL Metadata with Vdev ID and MAC ID
  5278. */
  5279. DP_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
  5280. DP_TCL_METADATA_TYPE_VDEV_BASED);
  5281. DP_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
  5282. vdev->vdev_id);
  5283. pdev_id =
  5284. dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
  5285. vdev->pdev->pdev_id);
  5286. DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
  5287. /*
  5288. * Set HTT Extension Valid bit to 0 by default
  5289. */
  5290. DP_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
  5291. dp_tx_vdev_update_search_flags(vdev);
  5292. return QDF_STATUS_SUCCESS;
  5293. }
  5294. #ifndef FEATURE_WDS
  5295. static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
  5296. {
  5297. return false;
  5298. }
  5299. #endif
  5300. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
  5301. {
  5302. struct dp_soc *soc = vdev->pdev->soc;
  5303. /*
  5304. * Enable both AddrY (SA based search) and AddrX (Da based search)
  5305. * for TDLS link
  5306. *
  5307. * Enable AddrY (SA based search) only for non-WDS STA and
  5308. * ProxySTA VAP (in HKv1) modes.
  5309. *
  5310. * In all other VAP modes, only DA based search should be
  5311. * enabled
  5312. */
  5313. if (vdev->opmode == wlan_op_mode_sta &&
  5314. vdev->tdls_link_connected)
  5315. vdev->hal_desc_addr_search_flags =
  5316. (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
  5317. else if ((vdev->opmode == wlan_op_mode_sta) &&
  5318. !dp_tx_da_search_override(vdev))
  5319. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
  5320. else
  5321. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
  5322. if (vdev->opmode == wlan_op_mode_sta && !vdev->tdls_link_connected)
  5323. vdev->search_type = soc->sta_mode_search_policy;
  5324. else
  5325. vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
  5326. }
  5327. static inline bool
  5328. dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
  5329. struct dp_vdev *vdev,
  5330. struct dp_tx_desc_s *tx_desc)
  5331. {
  5332. if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
  5333. return false;
  5334. /*
  5335. * if vdev is given, then only check whether desc
  5336. * vdev match. if vdev is NULL, then check whether
  5337. * desc pdev match.
  5338. */
  5339. return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
  5340. (tx_desc->pdev == pdev);
  5341. }
  5342. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  5343. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  5344. bool force_free)
  5345. {
  5346. uint8_t i;
  5347. uint32_t j;
  5348. uint32_t num_desc, page_id, offset;
  5349. uint16_t num_desc_per_page;
  5350. struct dp_soc *soc = pdev->soc;
  5351. struct dp_tx_desc_s *tx_desc = NULL;
  5352. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  5353. if (!vdev && !force_free) {
  5354. dp_err("Reset TX desc vdev, Vdev param is required!");
  5355. return;
  5356. }
  5357. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  5358. tx_desc_pool = &soc->tx_desc[i];
  5359. if (!(tx_desc_pool->pool_size) ||
  5360. IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
  5361. !(tx_desc_pool->desc_pages.cacheable_pages))
  5362. continue;
  5363. /*
  5364. * Add flow pool lock protection in case pool is freed
  5365. * due to all tx_desc is recycled when handle TX completion.
  5366. * this is not necessary when do force flush as:
  5367. * a. double lock will happen if dp_tx_desc_release is
  5368. * also trying to acquire it.
  5369. * b. dp interrupt has been disabled before do force TX desc
  5370. * flush in dp_pdev_deinit().
  5371. */
  5372. if (!force_free)
  5373. qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
  5374. num_desc = tx_desc_pool->pool_size;
  5375. num_desc_per_page =
  5376. tx_desc_pool->desc_pages.num_element_per_page;
  5377. for (j = 0; j < num_desc; j++) {
  5378. page_id = j / num_desc_per_page;
  5379. offset = j % num_desc_per_page;
  5380. if (qdf_unlikely(!(tx_desc_pool->
  5381. desc_pages.cacheable_pages)))
  5382. break;
  5383. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  5384. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  5385. /*
  5386. * Free TX desc if force free is
  5387. * required, otherwise only reset vdev
  5388. * in this TX desc.
  5389. */
  5390. if (force_free) {
  5391. tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
  5392. dp_tx_comp_free_buf(soc, tx_desc,
  5393. false);
  5394. dp_tx_desc_release(tx_desc, i);
  5395. } else {
  5396. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  5397. }
  5398. }
  5399. }
  5400. if (!force_free)
  5401. qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
  5402. }
  5403. }
  5404. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  5405. /**
  5406. * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
  5407. *
  5408. * @soc: Handle to DP soc structure
  5409. * @tx_desc: pointer of one TX desc
  5410. * @desc_pool_id: TX Desc pool id
  5411. */
  5412. static inline void
  5413. dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  5414. uint8_t desc_pool_id)
  5415. {
  5416. TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
  5417. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  5418. TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
  5419. }
  5420. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  5421. bool force_free)
  5422. {
  5423. uint8_t i, num_pool;
  5424. uint32_t j;
  5425. uint32_t num_desc, page_id, offset;
  5426. uint16_t num_desc_per_page;
  5427. struct dp_soc *soc = pdev->soc;
  5428. struct dp_tx_desc_s *tx_desc = NULL;
  5429. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  5430. if (!vdev && !force_free) {
  5431. dp_err("Reset TX desc vdev, Vdev param is required!");
  5432. return;
  5433. }
  5434. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  5435. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5436. for (i = 0; i < num_pool; i++) {
  5437. tx_desc_pool = &soc->tx_desc[i];
  5438. if (!tx_desc_pool->desc_pages.cacheable_pages)
  5439. continue;
  5440. num_desc_per_page =
  5441. tx_desc_pool->desc_pages.num_element_per_page;
  5442. for (j = 0; j < num_desc; j++) {
  5443. page_id = j / num_desc_per_page;
  5444. offset = j % num_desc_per_page;
  5445. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  5446. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  5447. if (force_free) {
  5448. tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
  5449. dp_tx_comp_free_buf(soc, tx_desc,
  5450. false);
  5451. dp_tx_desc_release(tx_desc, i);
  5452. } else {
  5453. dp_tx_desc_reset_vdev(soc, tx_desc,
  5454. i);
  5455. }
  5456. }
  5457. }
  5458. }
  5459. }
  5460. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  5461. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
  5462. {
  5463. struct dp_pdev *pdev = vdev->pdev;
  5464. /* Reset TX desc associated to this Vdev as NULL */
  5465. dp_tx_desc_flush(pdev, vdev, false);
  5466. return QDF_STATUS_SUCCESS;
  5467. }
  5468. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  5469. /* Pools will be allocated dynamically */
  5470. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  5471. int num_desc)
  5472. {
  5473. uint8_t i;
  5474. for (i = 0; i < num_pool; i++) {
  5475. qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
  5476. soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
  5477. }
  5478. return QDF_STATUS_SUCCESS;
  5479. }
  5480. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  5481. uint32_t num_desc)
  5482. {
  5483. return QDF_STATUS_SUCCESS;
  5484. }
  5485. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  5486. {
  5487. }
  5488. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  5489. {
  5490. uint8_t i;
  5491. for (i = 0; i < num_pool; i++)
  5492. qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
  5493. }
  5494. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  5495. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  5496. uint32_t num_desc)
  5497. {
  5498. uint8_t i, count;
  5499. /* Allocate software Tx descriptor pools */
  5500. for (i = 0; i < num_pool; i++) {
  5501. if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
  5502. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5503. FL("Tx Desc Pool alloc %d failed %pK"),
  5504. i, soc);
  5505. goto fail;
  5506. }
  5507. }
  5508. return QDF_STATUS_SUCCESS;
  5509. fail:
  5510. for (count = 0; count < i; count++)
  5511. dp_tx_desc_pool_free(soc, count);
  5512. return QDF_STATUS_E_NOMEM;
  5513. }
  5514. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  5515. uint32_t num_desc)
  5516. {
  5517. uint8_t i;
  5518. for (i = 0; i < num_pool; i++) {
  5519. if (dp_tx_desc_pool_init(soc, i, num_desc)) {
  5520. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5521. FL("Tx Desc Pool init %d failed %pK"),
  5522. i, soc);
  5523. return QDF_STATUS_E_NOMEM;
  5524. }
  5525. }
  5526. return QDF_STATUS_SUCCESS;
  5527. }
  5528. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  5529. {
  5530. uint8_t i;
  5531. for (i = 0; i < num_pool; i++)
  5532. dp_tx_desc_pool_deinit(soc, i);
  5533. }
  5534. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  5535. {
  5536. uint8_t i;
  5537. for (i = 0; i < num_pool; i++)
  5538. dp_tx_desc_pool_free(soc, i);
  5539. }
  5540. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  5541. /**
  5542. * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
  5543. * @soc: core txrx main context
  5544. * @num_pool: number of pools
  5545. *
  5546. */
  5547. static void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  5548. {
  5549. dp_tx_tso_desc_pool_deinit(soc, num_pool);
  5550. dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
  5551. }
  5552. /**
  5553. * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
  5554. * @soc: core txrx main context
  5555. * @num_pool: number of pools
  5556. *
  5557. */
  5558. static void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  5559. {
  5560. dp_tx_tso_desc_pool_free(soc, num_pool);
  5561. dp_tx_tso_num_seg_pool_free(soc, num_pool);
  5562. }
  5563. #ifndef WLAN_SOFTUMAC_SUPPORT
  5564. void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
  5565. {
  5566. uint8_t num_pool;
  5567. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5568. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  5569. dp_tx_ext_desc_pool_free(soc, num_pool);
  5570. dp_tx_delete_static_pools(soc, num_pool);
  5571. }
  5572. void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
  5573. {
  5574. uint8_t num_pool;
  5575. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5576. dp_tx_flow_control_deinit(soc);
  5577. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  5578. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  5579. dp_tx_deinit_static_pools(soc, num_pool);
  5580. }
  5581. #else
  5582. void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
  5583. {
  5584. uint8_t num_pool;
  5585. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5586. dp_tx_delete_static_pools(soc, num_pool);
  5587. }
  5588. void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
  5589. {
  5590. uint8_t num_pool;
  5591. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5592. dp_tx_flow_control_deinit(soc);
  5593. dp_tx_deinit_static_pools(soc, num_pool);
  5594. }
  5595. #endif /*WLAN_SOFTUMAC_SUPPORT*/
  5596. /**
  5597. * dp_tx_tso_cmn_desc_pool_alloc() - TSO cmn desc pool allocator
  5598. * @soc: DP soc handle
  5599. * @num_pool: Number of pools
  5600. * @num_desc: Number of descriptors
  5601. *
  5602. * Reserve TSO descriptor buffers
  5603. *
  5604. * Return: QDF_STATUS_E_FAILURE on failure or
  5605. * QDF_STATUS_SUCCESS on success
  5606. */
  5607. static QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
  5608. uint8_t num_pool,
  5609. uint32_t num_desc)
  5610. {
  5611. if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
  5612. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  5613. return QDF_STATUS_E_FAILURE;
  5614. }
  5615. if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
  5616. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  5617. num_pool, soc);
  5618. return QDF_STATUS_E_FAILURE;
  5619. }
  5620. return QDF_STATUS_SUCCESS;
  5621. }
  5622. /**
  5623. * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
  5624. * @soc: DP soc handle
  5625. * @num_pool: Number of pools
  5626. * @num_desc: Number of descriptors
  5627. *
  5628. * Initialize TSO descriptor pools
  5629. *
  5630. * Return: QDF_STATUS_E_FAILURE on failure or
  5631. * QDF_STATUS_SUCCESS on success
  5632. */
  5633. static QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
  5634. uint8_t num_pool,
  5635. uint32_t num_desc)
  5636. {
  5637. if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
  5638. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  5639. return QDF_STATUS_E_FAILURE;
  5640. }
  5641. if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
  5642. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  5643. num_pool, soc);
  5644. return QDF_STATUS_E_FAILURE;
  5645. }
  5646. return QDF_STATUS_SUCCESS;
  5647. }
  5648. #ifndef WLAN_SOFTUMAC_SUPPORT
  5649. QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
  5650. {
  5651. uint8_t num_pool;
  5652. uint32_t num_desc;
  5653. uint32_t num_ext_desc;
  5654. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5655. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  5656. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  5657. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  5658. "%s Tx Desc Alloc num_pool = %d, descs = %d",
  5659. __func__, num_pool, num_desc);
  5660. if ((num_pool > MAX_TXDESC_POOLS) ||
  5661. (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
  5662. goto fail1;
  5663. if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
  5664. goto fail1;
  5665. if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
  5666. goto fail2;
  5667. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  5668. return QDF_STATUS_SUCCESS;
  5669. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  5670. goto fail3;
  5671. return QDF_STATUS_SUCCESS;
  5672. fail3:
  5673. dp_tx_ext_desc_pool_free(soc, num_pool);
  5674. fail2:
  5675. dp_tx_delete_static_pools(soc, num_pool);
  5676. fail1:
  5677. return QDF_STATUS_E_RESOURCES;
  5678. }
  5679. QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
  5680. {
  5681. uint8_t num_pool;
  5682. uint32_t num_desc;
  5683. uint32_t num_ext_desc;
  5684. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5685. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  5686. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  5687. if (dp_tx_init_static_pools(soc, num_pool, num_desc))
  5688. goto fail1;
  5689. if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
  5690. goto fail2;
  5691. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  5692. return QDF_STATUS_SUCCESS;
  5693. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  5694. goto fail3;
  5695. dp_tx_flow_control_init(soc);
  5696. soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
  5697. return QDF_STATUS_SUCCESS;
  5698. fail3:
  5699. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  5700. fail2:
  5701. dp_tx_deinit_static_pools(soc, num_pool);
  5702. fail1:
  5703. return QDF_STATUS_E_RESOURCES;
  5704. }
  5705. #else
  5706. QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
  5707. {
  5708. uint8_t num_pool;
  5709. uint32_t num_desc;
  5710. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5711. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  5712. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  5713. "%s Tx Desc Alloc num_pool = %d, descs = %d",
  5714. __func__, num_pool, num_desc);
  5715. if ((num_pool > MAX_TXDESC_POOLS) ||
  5716. (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
  5717. return QDF_STATUS_E_RESOURCES;
  5718. if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
  5719. return QDF_STATUS_E_RESOURCES;
  5720. return QDF_STATUS_SUCCESS;
  5721. }
  5722. QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
  5723. {
  5724. uint8_t num_pool;
  5725. uint32_t num_desc;
  5726. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5727. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  5728. if (dp_tx_init_static_pools(soc, num_pool, num_desc))
  5729. return QDF_STATUS_E_RESOURCES;
  5730. dp_tx_flow_control_init(soc);
  5731. soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
  5732. return QDF_STATUS_SUCCESS;
  5733. }
  5734. #endif
  5735. QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
  5736. {
  5737. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  5738. uint8_t num_pool;
  5739. uint32_t num_ext_desc;
  5740. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5741. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  5742. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  5743. return QDF_STATUS_E_FAILURE;
  5744. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  5745. return QDF_STATUS_E_FAILURE;
  5746. return QDF_STATUS_SUCCESS;
  5747. }
  5748. QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
  5749. {
  5750. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  5751. uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5752. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  5753. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  5754. return QDF_STATUS_SUCCESS;
  5755. }
  5756. #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
  5757. void dp_pkt_add_timestamp(struct dp_vdev *vdev,
  5758. enum qdf_pkt_timestamp_index index, uint64_t time,
  5759. qdf_nbuf_t nbuf)
  5760. {
  5761. if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) {
  5762. uint64_t tsf_time;
  5763. if (vdev->get_tsf_time) {
  5764. vdev->get_tsf_time(vdev->osif_vdev, time, &tsf_time);
  5765. qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time);
  5766. }
  5767. }
  5768. }
  5769. void dp_pkt_get_timestamp(uint64_t *time)
  5770. {
  5771. if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled()))
  5772. *time = qdf_get_log_timestamp();
  5773. }
  5774. #endif