dp_mon.c 167 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080
  1. /*
  2. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include <dp_types.h>
  18. #include "dp_rx.h"
  19. #include "dp_peer.h"
  20. #include <dp_htt.h>
  21. #include <dp_mon_filter.h>
  22. #include <dp_htt.h>
  23. #include <dp_mon.h>
  24. #include <dp_rx_mon.h>
  25. #include <dp_internal.h>
  26. #include "htt_ppdu_stats.h"
  27. #include "dp_cal_client_api.h"
  28. #if defined(DP_CON_MON)
  29. #ifndef REMOVE_PKT_LOG
  30. #include <pktlog_ac_api.h>
  31. #include <pktlog_ac.h>
  32. #endif
  33. #endif
  34. #ifdef FEATURE_PERPKT_INFO
  35. #include "dp_ratetable.h"
  36. #endif
  37. #ifdef QCA_SUPPORT_LITE_MONITOR
  38. #include "dp_lite_mon.h"
  39. #endif
  40. #define DP_INTR_POLL_TIMER_MS 5
  41. #define INVALID_FREE_BUFF 0xffffffff
  42. #ifdef WLAN_RX_PKT_CAPTURE_ENH
  43. #include "dp_rx_mon_feature.h"
  44. #endif /* WLAN_RX_PKT_CAPTURE_ENH */
  45. #ifdef QCA_UNDECODED_METADATA_SUPPORT
  46. #define MAX_STRING_LEN_PER_FIELD 6
  47. #define DP_UNDECODED_ERR_LENGTH (MAX_STRING_LEN_PER_FIELD * CDP_PHYRX_ERR_MAX)
  48. #endif
  49. #ifdef QCA_MCOPY_SUPPORT
  50. static inline void
  51. dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
  52. {
  53. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  54. mon_pdev->mcopy_mode = M_COPY_DISABLED;
  55. mon_pdev->mvdev = NULL;
  56. }
  57. static inline void
  58. dp_reset_mcopy_mode(struct dp_pdev *pdev)
  59. {
  60. QDF_STATUS status = QDF_STATUS_SUCCESS;
  61. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  62. struct cdp_mon_ops *cdp_ops;
  63. if (mon_pdev->mcopy_mode) {
  64. cdp_ops = dp_mon_cdp_ops_get(pdev->soc);
  65. if (cdp_ops && cdp_ops->config_full_mon_mode)
  66. cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
  67. DP_FULL_MON_ENABLE);
  68. dp_pdev_disable_mcopy_code(pdev);
  69. dp_mon_filter_reset_mcopy_mode(pdev);
  70. status = dp_mon_filter_update(pdev);
  71. if (status != QDF_STATUS_SUCCESS) {
  72. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  73. FL("Failed to reset AM copy mode filters"));
  74. }
  75. mon_pdev->monitor_configured = false;
  76. }
  77. }
  78. static QDF_STATUS
  79. dp_config_mcopy_mode(struct dp_pdev *pdev, int val)
  80. {
  81. QDF_STATUS status = QDF_STATUS_SUCCESS;
  82. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  83. struct dp_mon_ops *mon_ops;
  84. struct cdp_mon_ops *cdp_ops;
  85. if (mon_pdev->mvdev)
  86. return QDF_STATUS_E_RESOURCES;
  87. mon_pdev->mcopy_mode = val;
  88. mon_pdev->tx_sniffer_enable = 0;
  89. mon_pdev->monitor_configured = true;
  90. mon_ops = dp_mon_ops_get(pdev->soc);
  91. if (!wlan_cfg_is_delay_mon_replenish(pdev->soc->wlan_cfg_ctx)) {
  92. if (mon_ops && mon_ops->mon_vdev_set_monitor_mode_rings)
  93. mon_ops->mon_vdev_set_monitor_mode_rings(pdev, true);
  94. }
  95. /*
  96. * Setup the M copy mode filter.
  97. */
  98. cdp_ops = dp_mon_cdp_ops_get(pdev->soc);
  99. if (cdp_ops && cdp_ops->config_full_mon_mode)
  100. cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
  101. DP_FULL_MON_ENABLE);
  102. dp_mon_filter_setup_mcopy_mode(pdev);
  103. status = dp_mon_filter_update(pdev);
  104. if (status != QDF_STATUS_SUCCESS) {
  105. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  106. FL("Failed to set M_copy mode filters"));
  107. dp_mon_filter_reset_mcopy_mode(pdev);
  108. dp_pdev_disable_mcopy_code(pdev);
  109. return status;
  110. }
  111. if (!mon_pdev->pktlog_ppdu_stats)
  112. dp_h2t_cfg_stats_msg_send(pdev,
  113. DP_PPDU_STATS_CFG_SNIFFER,
  114. pdev->pdev_id);
  115. return status;
  116. }
  117. #else
  118. static inline void
  119. dp_reset_mcopy_mode(struct dp_pdev *pdev)
  120. {
  121. }
  122. static inline QDF_STATUS
  123. dp_config_mcopy_mode(struct dp_pdev *pdev, int val)
  124. {
  125. return QDF_STATUS_E_INVAL;
  126. }
  127. #endif /* QCA_MCOPY_SUPPORT */
  128. #ifdef QCA_UNDECODED_METADATA_SUPPORT
  129. static QDF_STATUS
  130. dp_reset_undecoded_metadata_capture(struct dp_pdev *pdev)
  131. {
  132. QDF_STATUS status = QDF_STATUS_SUCCESS;
  133. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  134. if (mon_pdev->undecoded_metadata_capture) {
  135. dp_mon_filter_reset_undecoded_metadata_mode(pdev);
  136. status = dp_mon_filter_update(pdev);
  137. if (status != QDF_STATUS_SUCCESS) {
  138. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  139. FL("Undecoded capture filter reset failed"));
  140. }
  141. }
  142. mon_pdev->undecoded_metadata_capture = 0;
  143. return status;
  144. }
  145. static QDF_STATUS
  146. dp_enable_undecoded_metadata_capture(struct dp_pdev *pdev, int val)
  147. {
  148. QDF_STATUS status = QDF_STATUS_SUCCESS;
  149. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  150. struct dp_mon_ops *mon_ops;
  151. if (!mon_pdev->mvdev) {
  152. qdf_err("monitor_pdev is NULL");
  153. return QDF_STATUS_E_RESOURCES;
  154. }
  155. mon_pdev->undecoded_metadata_capture = val;
  156. mon_pdev->monitor_configured = true;
  157. mon_ops = dp_mon_ops_get(pdev->soc);
  158. /* Setup the undecoded metadata capture mode filter. */
  159. dp_mon_filter_setup_undecoded_metadata_mode(pdev);
  160. status = dp_mon_filter_update(pdev);
  161. if (status != QDF_STATUS_SUCCESS) {
  162. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  163. FL("Failed to set Undecoded capture filters"));
  164. dp_mon_filter_reset_undecoded_metadata_mode(pdev);
  165. return status;
  166. }
  167. return status;
  168. }
  169. #else
  170. static inline QDF_STATUS
  171. dp_reset_undecoded_metadata_capture(struct dp_pdev *pdev)
  172. {
  173. return QDF_STATUS_E_INVAL;
  174. }
  175. static inline QDF_STATUS
  176. dp_enable_undecoded_metadata_capture(struct dp_pdev *pdev, int val)
  177. {
  178. return QDF_STATUS_E_INVAL;
  179. }
  180. #endif /* QCA_UNDECODED_METADATA_SUPPORT */
  181. QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl,
  182. uint8_t pdev_id,
  183. uint8_t special_monitor)
  184. {
  185. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  186. struct dp_pdev *pdev =
  187. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  188. pdev_id);
  189. QDF_STATUS status = QDF_STATUS_SUCCESS;
  190. struct dp_mon_pdev *mon_pdev;
  191. struct cdp_mon_ops *cdp_ops;
  192. if (!pdev)
  193. return QDF_STATUS_E_FAILURE;
  194. mon_pdev = pdev->monitor_pdev;
  195. qdf_spin_lock_bh(&mon_pdev->mon_lock);
  196. cdp_ops = dp_mon_cdp_ops_get(soc);
  197. if (cdp_ops && cdp_ops->soc_config_full_mon_mode)
  198. cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
  199. DP_FULL_MON_DISABLE);
  200. mon_pdev->mvdev = NULL;
  201. /*
  202. * Lite monitor mode, smart monitor mode and monitor
  203. * mode uses this APIs to filter reset and mode disable
  204. */
  205. if (mon_pdev->mcopy_mode) {
  206. #if defined(QCA_MCOPY_SUPPORT)
  207. dp_pdev_disable_mcopy_code(pdev);
  208. dp_mon_filter_reset_mcopy_mode(pdev);
  209. #endif /* QCA_MCOPY_SUPPORT */
  210. } else if (special_monitor) {
  211. #if defined(ATH_SUPPORT_NAC)
  212. dp_mon_filter_reset_smart_monitor(pdev);
  213. #endif /* ATH_SUPPORT_NAC */
  214. /* for mon 2.0 we make use of lite mon to
  215. * set filters for smart monitor use case.
  216. */
  217. dp_monitor_lite_mon_disable_rx(pdev);
  218. } else if (mon_pdev->undecoded_metadata_capture) {
  219. #ifdef QCA_UNDECODED_METADATA_SUPPORT
  220. dp_reset_undecoded_metadata_capture(pdev);
  221. #endif
  222. } else {
  223. dp_mon_filter_reset_mon_mode(pdev);
  224. }
  225. status = dp_mon_filter_update(pdev);
  226. if (status != QDF_STATUS_SUCCESS) {
  227. dp_rx_mon_dest_err("%pK: Failed to reset monitor filters",
  228. soc);
  229. }
  230. mon_pdev->monitor_configured = false;
  231. qdf_spin_unlock_bh(&mon_pdev->mon_lock);
  232. return QDF_STATUS_SUCCESS;
  233. }
  234. #ifdef QCA_ADVANCE_MON_FILTER_SUPPORT
  235. QDF_STATUS
  236. dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  237. struct cdp_monitor_filter *filter_val)
  238. {
  239. /* Many monitor VAPs can exists in a system but only one can be up at
  240. * anytime
  241. */
  242. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  243. struct dp_vdev *vdev;
  244. struct dp_pdev *pdev =
  245. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  246. pdev_id);
  247. QDF_STATUS status = QDF_STATUS_SUCCESS;
  248. struct dp_mon_pdev *mon_pdev;
  249. if (!pdev || !pdev->monitor_pdev)
  250. return QDF_STATUS_E_FAILURE;
  251. mon_pdev = pdev->monitor_pdev;
  252. vdev = mon_pdev->mvdev;
  253. if (!vdev)
  254. return QDF_STATUS_E_FAILURE;
  255. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  256. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
  257. pdev, pdev_id, soc, vdev);
  258. /*Check if current pdev's monitor_vdev exists */
  259. if (!mon_pdev->mvdev) {
  260. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  261. "vdev=%pK", vdev);
  262. qdf_assert(vdev);
  263. }
  264. /* update filter mode, type in pdev structure */
  265. mon_pdev->mon_filter_mode = filter_val->mode;
  266. mon_pdev->fp_mgmt_filter = filter_val->fp_mgmt;
  267. mon_pdev->fp_ctrl_filter = filter_val->fp_ctrl;
  268. mon_pdev->fp_data_filter = filter_val->fp_data;
  269. mon_pdev->mo_mgmt_filter = filter_val->mo_mgmt;
  270. mon_pdev->mo_ctrl_filter = filter_val->mo_ctrl;
  271. mon_pdev->mo_data_filter = filter_val->mo_data;
  272. dp_mon_filter_setup_mon_mode(pdev);
  273. status = dp_mon_filter_update(pdev);
  274. if (status != QDF_STATUS_SUCCESS) {
  275. dp_rx_mon_dest_err("%pK: Failed to set filter for adv mon mode",
  276. soc);
  277. dp_mon_filter_reset_mon_mode(pdev);
  278. }
  279. return status;
  280. }
  281. #endif
  282. QDF_STATUS
  283. dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf)
  284. {
  285. struct dp_pdev *pdev =
  286. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
  287. pdev_id);
  288. if (!pdev)
  289. return QDF_STATUS_E_FAILURE;
  290. dp_deliver_mgmt_frm(pdev, nbuf);
  291. return QDF_STATUS_SUCCESS;
  292. }
  293. #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
  294. /**
  295. * dp_scan_spcl_vap_stats_attach() - alloc spcl vap stats struct
  296. * @mon_vdev: Datapath mon VDEV handle
  297. *
  298. * Return: 0 on success, not 0 on failure
  299. */
  300. static inline QDF_STATUS
  301. dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev)
  302. {
  303. mon_vdev->scan_spcl_vap_stats =
  304. qdf_mem_malloc(sizeof(struct cdp_scan_spcl_vap_stats));
  305. if (!mon_vdev->scan_spcl_vap_stats) {
  306. dp_mon_err("scan spcl vap stats attach fail");
  307. return QDF_STATUS_E_NOMEM;
  308. }
  309. return QDF_STATUS_SUCCESS;
  310. }
  311. /**
  312. * dp_scan_spcl_vap_stats_detach() - free spcl vap stats struct
  313. * @mon_vdev: Datapath mon VDEV handle
  314. *
  315. * Return: void
  316. */
  317. static inline void
  318. dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev)
  319. {
  320. if (mon_vdev->scan_spcl_vap_stats) {
  321. qdf_mem_free(mon_vdev->scan_spcl_vap_stats);
  322. mon_vdev->scan_spcl_vap_stats = NULL;
  323. }
  324. }
  325. /**
  326. * dp_reset_scan_spcl_vap_stats() - reset spcl vap rx stats
  327. * @vdev: Datapath VDEV handle
  328. *
  329. * Return: void
  330. */
  331. static inline void
  332. dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev)
  333. {
  334. struct dp_mon_vdev *mon_vdev;
  335. struct dp_mon_pdev *mon_pdev;
  336. mon_pdev = vdev->pdev->monitor_pdev;
  337. if (!mon_pdev || !mon_pdev->reset_scan_spcl_vap_stats_enable)
  338. return;
  339. mon_vdev = vdev->monitor_vdev;
  340. if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats)
  341. return;
  342. qdf_mem_zero(mon_vdev->scan_spcl_vap_stats,
  343. sizeof(struct cdp_scan_spcl_vap_stats));
  344. }
  345. /**
  346. * dp_get_scan_spcl_vap_stats() - get spcl vap rx stats
  347. * @soc_hdl: Datapath soc handle
  348. * @vdev_id: vdev id
  349. * @stats: structure to hold spcl vap stats
  350. *
  351. * Return: 0 on success, not 0 on failure
  352. */
  353. static QDF_STATUS
  354. dp_get_scan_spcl_vap_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  355. struct cdp_scan_spcl_vap_stats *stats)
  356. {
  357. struct dp_mon_vdev *mon_vdev = NULL;
  358. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  359. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  360. DP_MOD_ID_CDP);
  361. if (!vdev || !stats) {
  362. if (vdev)
  363. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  364. return QDF_STATUS_E_INVAL;
  365. }
  366. mon_vdev = vdev->monitor_vdev;
  367. if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats) {
  368. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  369. return QDF_STATUS_E_INVAL;
  370. }
  371. qdf_mem_copy(stats, mon_vdev->scan_spcl_vap_stats,
  372. sizeof(struct cdp_scan_spcl_vap_stats));
  373. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  374. return QDF_STATUS_SUCCESS;
  375. }
  376. #else
  377. static inline void
  378. dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev)
  379. {
  380. }
  381. static inline QDF_STATUS
  382. dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev)
  383. {
  384. return QDF_STATUS_SUCCESS;
  385. }
  386. static inline void
  387. dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev)
  388. {
  389. }
  390. #endif
  391. /**
  392. * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
  393. * @vdev_handle: Datapath VDEV handle
  394. * @smart_monitor: Flag to denote if its smart monitor mode
  395. *
  396. * Return: 0 on success, not 0 on failure
  397. */
  398. static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *dp_soc,
  399. uint8_t vdev_id,
  400. uint8_t special_monitor)
  401. {
  402. struct dp_soc *soc = (struct dp_soc *)dp_soc;
  403. struct dp_pdev *pdev;
  404. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  405. DP_MOD_ID_CDP);
  406. QDF_STATUS status = QDF_STATUS_SUCCESS;
  407. struct dp_mon_pdev *mon_pdev;
  408. struct cdp_mon_ops *cdp_ops;
  409. if (!vdev)
  410. return QDF_STATUS_E_FAILURE;
  411. pdev = vdev->pdev;
  412. if (!pdev || !pdev->monitor_pdev)
  413. return QDF_STATUS_E_FAILURE;
  414. mon_pdev = pdev->monitor_pdev;
  415. mon_pdev->mvdev = vdev;
  416. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  417. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
  418. pdev, pdev->pdev_id, pdev->soc, vdev);
  419. /*
  420. * do not configure monitor buf ring and filter for smart and
  421. * lite monitor
  422. * for smart monitor filters are added along with first NAC
  423. * for lite monitor required configuration done through
  424. * dp_set_pdev_param
  425. */
  426. if (special_monitor) {
  427. status = QDF_STATUS_SUCCESS;
  428. goto fail;
  429. }
  430. if (mon_pdev->scan_spcl_vap_configured)
  431. dp_reset_scan_spcl_vap_stats(vdev);
  432. /*Check if current pdev's monitor_vdev exists */
  433. if (mon_pdev->monitor_configured) {
  434. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  435. "monitor vap already created vdev=%pK\n", vdev);
  436. status = QDF_STATUS_E_RESOURCES;
  437. goto fail;
  438. }
  439. mon_pdev->monitor_configured = true;
  440. /* disable lite mon if configured, monitor vap takes
  441. * priority over lite mon when its created. Lite mon
  442. * can be configured later again.
  443. */
  444. dp_monitor_lite_mon_disable_rx(pdev);
  445. cdp_ops = dp_mon_cdp_ops_get(soc);
  446. if (cdp_ops && cdp_ops->soc_config_full_mon_mode)
  447. cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
  448. DP_FULL_MON_ENABLE);
  449. dp_mon_filter_setup_mon_mode(pdev);
  450. status = dp_mon_filter_update(pdev);
  451. if (status != QDF_STATUS_SUCCESS) {
  452. dp_cdp_err("%pK: Failed to reset monitor filters", soc);
  453. dp_mon_filter_reset_mon_mode(pdev);
  454. mon_pdev->monitor_configured = false;
  455. mon_pdev->mvdev = NULL;
  456. }
  457. fail:
  458. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  459. return status;
  460. }
  461. #ifdef QCA_TX_CAPTURE_SUPPORT
  462. static QDF_STATUS
  463. dp_config_tx_capture_mode(struct dp_pdev *pdev)
  464. {
  465. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  466. mon_pdev->tx_sniffer_enable = 1;
  467. mon_pdev->monitor_configured = false;
  468. if (!mon_pdev->pktlog_ppdu_stats)
  469. dp_h2t_cfg_stats_msg_send(pdev,
  470. DP_PPDU_STATS_CFG_SNIFFER,
  471. pdev->pdev_id);
  472. return QDF_STATUS_SUCCESS;
  473. }
  474. #else
  475. #ifdef QCA_MCOPY_SUPPORT
  476. static QDF_STATUS
  477. dp_config_tx_capture_mode(struct dp_pdev *pdev)
  478. {
  479. return QDF_STATUS_E_INVAL;
  480. }
  481. #endif
  482. #endif
  483. #if defined(QCA_MCOPY_SUPPORT) || defined(QCA_TX_CAPTURE_SUPPORT)
  484. QDF_STATUS
  485. dp_config_debug_sniffer(struct dp_pdev *pdev, int val)
  486. {
  487. QDF_STATUS status = QDF_STATUS_SUCCESS;
  488. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  489. /*
  490. * Note: The mirror copy mode cannot co-exist with any other
  491. * monitor modes. Hence disabling the filter for this mode will
  492. * reset the monitor destination ring filters.
  493. */
  494. dp_reset_mcopy_mode(pdev);
  495. switch (val) {
  496. case 0:
  497. mon_pdev->tx_sniffer_enable = 0;
  498. mon_pdev->monitor_configured = false;
  499. /*
  500. * We don't need to reset the Rx monitor status ring or call
  501. * the API dp_ppdu_ring_reset() if all debug sniffer mode is
  502. * disabled. The Rx monitor status ring will be disabled when
  503. * the last mode using the monitor status ring get disabled.
  504. */
  505. if (!mon_pdev->pktlog_ppdu_stats &&
  506. !mon_pdev->enhanced_stats_en &&
  507. !mon_pdev->bpr_enable) {
  508. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  509. } else if (mon_pdev->enhanced_stats_en &&
  510. !mon_pdev->bpr_enable) {
  511. dp_h2t_cfg_stats_msg_send(pdev,
  512. DP_PPDU_STATS_CFG_ENH_STATS,
  513. pdev->pdev_id);
  514. } else if (!mon_pdev->enhanced_stats_en &&
  515. mon_pdev->bpr_enable) {
  516. dp_h2t_cfg_stats_msg_send(pdev,
  517. DP_PPDU_STATS_CFG_BPR_ENH,
  518. pdev->pdev_id);
  519. } else {
  520. dp_h2t_cfg_stats_msg_send(pdev,
  521. DP_PPDU_STATS_CFG_BPR,
  522. pdev->pdev_id);
  523. }
  524. break;
  525. case 1:
  526. status = dp_config_tx_capture_mode(pdev);
  527. break;
  528. case 2:
  529. case 4:
  530. status = dp_config_mcopy_mode(pdev, val);
  531. break;
  532. default:
  533. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  534. "Invalid value, mode not supported");
  535. status = QDF_STATUS_E_INVAL;
  536. break;
  537. }
  538. return status;
  539. }
  540. #endif
  541. #ifdef QCA_UNDECODED_METADATA_SUPPORT
  542. QDF_STATUS
  543. dp_mon_config_undecoded_metadata_capture(struct dp_pdev *pdev, int val)
  544. {
  545. QDF_STATUS status = QDF_STATUS_SUCCESS;
  546. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  547. if (!mon_pdev->mvdev && !mon_pdev->scan_spcl_vap_configured) {
  548. qdf_err("No monitor or Special vap, undecoded capture not supported");
  549. return QDF_STATUS_E_RESOURCES;
  550. }
  551. if (val)
  552. status = dp_enable_undecoded_metadata_capture(pdev, val);
  553. else
  554. status = dp_reset_undecoded_metadata_capture(pdev);
  555. return status;
  556. }
  557. #endif
  558. /**
  559. * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
  560. * ring based on target
  561. * @soc: soc handle
  562. * @mac_for_pdev: WIN- pdev_id, MCL- mac id
  563. * @pdev: physical device handle
  564. * @ring_num: mac id
  565. * @htt_tlv_filter: tlv filter
  566. *
  567. * Return: zero on success, non-zero on failure
  568. */
  569. static inline QDF_STATUS
  570. dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
  571. struct dp_pdev *pdev, uint8_t ring_num,
  572. struct htt_rx_ring_tlv_filter htt_tlv_filter)
  573. {
  574. QDF_STATUS status;
  575. if (soc->wlan_cfg_ctx->rxdma1_enable)
  576. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  577. soc->rxdma_mon_buf_ring[ring_num]
  578. .hal_srng,
  579. RXDMA_MONITOR_BUF,
  580. RX_MONITOR_BUFFER_SIZE,
  581. &htt_tlv_filter);
  582. else
  583. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  584. pdev->rx_mac_buf_ring[ring_num]
  585. .hal_srng,
  586. RXDMA_BUF, RX_DATA_BUFFER_SIZE,
  587. &htt_tlv_filter);
  588. return status;
  589. }
  590. /**
  591. * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode
  592. * @soc_hdl: datapath soc handle
  593. * @pdev_id: physical device instance id
  594. *
  595. * Return: virtual interface id
  596. */
  597. static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl,
  598. uint8_t pdev_id)
  599. {
  600. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  601. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  602. if (qdf_unlikely(!pdev || !pdev->monitor_pdev ||
  603. !pdev->monitor_pdev->mvdev))
  604. return -EINVAL;
  605. return pdev->monitor_pdev->mvdev->vdev_id;
  606. }
  607. #if defined(QCA_TX_CAPTURE_SUPPORT) || defined(QCA_ENHANCED_STATS_SUPPORT)
  608. #ifndef WLAN_TX_PKT_CAPTURE_ENH
  609. void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
  610. {
  611. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  612. if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode) {
  613. dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
  614. nbuf, HTT_INVALID_PEER,
  615. WDI_NO_VAL, pdev->pdev_id);
  616. } else {
  617. if (!mon_pdev->bpr_enable)
  618. qdf_nbuf_free(nbuf);
  619. }
  620. }
  621. #endif
  622. #endif
  623. QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
  624. {
  625. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  626. mon_pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
  627. if (!mon_pdev->ppdu_tlv_buf) {
  628. QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
  629. return QDF_STATUS_E_NOMEM;
  630. }
  631. return QDF_STATUS_SUCCESS;
  632. }
  633. void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
  634. {
  635. struct ppdu_info *ppdu_info, *ppdu_info_next;
  636. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  637. TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list,
  638. ppdu_info_list_elem, ppdu_info_next) {
  639. if (!ppdu_info)
  640. break;
  641. TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
  642. ppdu_info, ppdu_info_list_elem);
  643. mon_pdev->list_depth--;
  644. qdf_assert_always(ppdu_info->nbuf);
  645. qdf_nbuf_free(ppdu_info->nbuf);
  646. qdf_mem_free(ppdu_info);
  647. }
  648. TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->sched_comp_ppdu_list,
  649. ppdu_info_list_elem, ppdu_info_next) {
  650. if (!ppdu_info)
  651. break;
  652. TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list,
  653. ppdu_info, ppdu_info_list_elem);
  654. mon_pdev->sched_comp_list_depth--;
  655. qdf_assert_always(ppdu_info->nbuf);
  656. qdf_nbuf_free(ppdu_info->nbuf);
  657. qdf_mem_free(ppdu_info);
  658. }
  659. if (mon_pdev->ppdu_tlv_buf)
  660. qdf_mem_free(mon_pdev->ppdu_tlv_buf);
  661. }
  662. QDF_STATUS dp_pdev_get_rx_mon_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  663. struct cdp_pdev_mon_stats *stats)
  664. {
  665. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  666. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  667. struct dp_mon_pdev *mon_pdev;
  668. if (!pdev)
  669. return QDF_STATUS_E_FAILURE;
  670. mon_pdev = pdev->monitor_pdev;
  671. if (!mon_pdev)
  672. return QDF_STATUS_E_FAILURE;
  673. qdf_mem_copy(stats, &mon_pdev->rx_mon_stats,
  674. sizeof(struct cdp_pdev_mon_stats));
  675. return QDF_STATUS_SUCCESS;
  676. }
  677. #ifdef QCA_UNDECODED_METADATA_SUPPORT
  678. /**
  679. * dp_pdev_get_undecoded_capture_stats() - Get undecoded metadata captured
  680. * monitor pdev stats
  681. * @mon_pdev: Monitor PDEV handle
  682. * @rx_mon_stats: Monitor pdev status/destination ring stats
  683. *
  684. * Return: None
  685. */
  686. static inline void
  687. dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev *mon_pdev,
  688. struct cdp_pdev_mon_stats *rx_mon_stats)
  689. {
  690. char undecoded_error[DP_UNDECODED_ERR_LENGTH];
  691. uint8_t index = 0, i;
  692. DP_PRINT_STATS("Rx Undecoded Frame count:%d",
  693. rx_mon_stats->rx_undecoded_count);
  694. index = 0;
  695. for (i = 0; i < (CDP_PHYRX_ERR_MAX); i++) {
  696. index += qdf_snprint(&undecoded_error[index],
  697. DP_UNDECODED_ERR_LENGTH - index,
  698. " %d", rx_mon_stats->rx_undecoded_error[i]);
  699. }
  700. DP_PRINT_STATS("Undecoded Error (0-63):%s", undecoded_error);
  701. }
  702. #else
  703. static inline void
  704. dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev *mon_pdev,
  705. struct cdp_pdev_mon_stats *rx_mon_stats)
  706. {
  707. }
  708. #endif
  709. void
  710. dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
  711. {
  712. struct cdp_pdev_mon_stats *rx_mon_stats;
  713. uint32_t *stat_ring_ppdu_ids;
  714. uint32_t *dest_ring_ppdu_ids;
  715. int i, idx;
  716. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  717. struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc;
  718. rx_mon_stats = &mon_pdev->rx_mon_stats;
  719. DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
  720. DP_PRINT_STATS("status_ppdu_compl_cnt = %d",
  721. rx_mon_stats->status_ppdu_compl);
  722. DP_PRINT_STATS("status_ppdu_start_cnt = %d",
  723. rx_mon_stats->status_ppdu_start);
  724. DP_PRINT_STATS("status_ppdu_end_cnt = %d",
  725. rx_mon_stats->status_ppdu_end);
  726. DP_PRINT_STATS("status_ppdu_start_mis_cnt = %d",
  727. rx_mon_stats->status_ppdu_start_mis);
  728. DP_PRINT_STATS("status_ppdu_end_mis_cnt = %d",
  729. rx_mon_stats->status_ppdu_end_mis);
  730. DP_PRINT_STATS("status_ppdu_done_cnt = %d",
  731. rx_mon_stats->status_ppdu_done);
  732. DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
  733. rx_mon_stats->dest_ppdu_done);
  734. DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
  735. rx_mon_stats->dest_mpdu_done);
  736. DP_PRINT_STATS("tlv_tag_status_err_cnt = %u",
  737. rx_mon_stats->tlv_tag_status_err);
  738. DP_PRINT_STATS("mon status DMA not done WAR count= %u",
  739. rx_mon_stats->status_buf_done_war);
  740. DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
  741. rx_mon_stats->dest_mpdu_drop);
  742. DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d",
  743. rx_mon_stats->dup_mon_linkdesc_cnt);
  744. DP_PRINT_STATS("dup_mon_buf_cnt = %d",
  745. rx_mon_stats->dup_mon_buf_cnt);
  746. DP_PRINT_STATS("mon_rx_buf_reaped = %u",
  747. rx_mon_stats->mon_rx_bufs_reaped_dest);
  748. DP_PRINT_STATS("mon_rx_buf_replenished = %u",
  749. rx_mon_stats->mon_rx_bufs_replenished_dest);
  750. DP_PRINT_STATS("ppdu_id_mismatch = %u",
  751. rx_mon_stats->ppdu_id_mismatch);
  752. DP_PRINT_STATS("mpdu_ppdu_id_match_cnt = %d",
  753. rx_mon_stats->ppdu_id_match);
  754. DP_PRINT_STATS("ppdus dropped frm status ring = %d",
  755. rx_mon_stats->status_ppdu_drop);
  756. DP_PRINT_STATS("ppdus dropped frm dest ring = %d",
  757. rx_mon_stats->dest_ppdu_drop);
  758. DP_PRINT_STATS("mpdu_ppdu_id_mismatch_drop = %u",
  759. rx_mon_stats->mpdu_ppdu_id_mismatch_drop);
  760. DP_PRINT_STATS("mpdu_decap_type_invalid = %u",
  761. rx_mon_stats->mpdu_decap_type_invalid);
  762. stat_ring_ppdu_ids =
  763. (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST);
  764. dest_ring_ppdu_ids =
  765. (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST);
  766. if (!stat_ring_ppdu_ids || !dest_ring_ppdu_ids)
  767. DP_PRINT_STATS("Unable to allocate ppdu id hist mem\n");
  768. qdf_spin_lock_bh(&mon_pdev->mon_lock);
  769. idx = rx_mon_stats->ppdu_id_hist_idx;
  770. qdf_mem_copy(stat_ring_ppdu_ids,
  771. rx_mon_stats->stat_ring_ppdu_id_hist,
  772. sizeof(uint32_t) * MAX_PPDU_ID_HIST);
  773. qdf_mem_copy(dest_ring_ppdu_ids,
  774. rx_mon_stats->dest_ring_ppdu_id_hist,
  775. sizeof(uint32_t) * MAX_PPDU_ID_HIST);
  776. qdf_spin_unlock_bh(&mon_pdev->mon_lock);
  777. DP_PRINT_STATS("PPDU Id history:");
  778. DP_PRINT_STATS("stat_ring_ppdu_ids\t dest_ring_ppdu_ids");
  779. for (i = 0; i < MAX_PPDU_ID_HIST; i++) {
  780. idx = (idx + 1) & (MAX_PPDU_ID_HIST - 1);
  781. DP_PRINT_STATS("%*u\t%*u", 16,
  782. rx_mon_stats->stat_ring_ppdu_id_hist[idx], 16,
  783. rx_mon_stats->dest_ring_ppdu_id_hist[idx]);
  784. }
  785. qdf_mem_free(stat_ring_ppdu_ids);
  786. qdf_mem_free(dest_ring_ppdu_ids);
  787. DP_PRINT_STATS("mon_rx_dest_stuck = %d",
  788. rx_mon_stats->mon_rx_dest_stuck);
  789. DP_PRINT_STATS("rx_hdr_not_received = %d",
  790. rx_mon_stats->rx_hdr_not_received);
  791. DP_PRINT_STATS("parent_buf_alloc = %d",
  792. rx_mon_stats->parent_buf_alloc);
  793. DP_PRINT_STATS("parent_buf_free = %d",
  794. rx_mon_stats->parent_buf_free);
  795. DP_PRINT_STATS("mpdus_buf_to_stack = %d",
  796. rx_mon_stats->mpdus_buf_to_stack);
  797. DP_PRINT_STATS("frag_alloc = %d",
  798. mon_soc->stats.frag_alloc);
  799. DP_PRINT_STATS("frag_free = %d",
  800. mon_soc->stats.frag_free);
  801. DP_PRINT_STATS("status_buf_count = %d",
  802. rx_mon_stats->status_buf_count);
  803. DP_PRINT_STATS("pkt_buf_count = %d",
  804. rx_mon_stats->pkt_buf_count);
  805. dp_pdev_get_undecoded_capture_stats(mon_pdev, rx_mon_stats);
  806. }
  807. #ifdef QCA_SUPPORT_BPR
  808. QDF_STATUS
  809. dp_set_bpr_enable(struct dp_pdev *pdev, int val)
  810. {
  811. struct dp_mon_ops *mon_ops;
  812. mon_ops = dp_mon_ops_get(pdev->soc);
  813. if (mon_ops && mon_ops->mon_set_bpr_enable)
  814. return mon_ops->mon_set_bpr_enable(pdev, val);
  815. return QDF_STATUS_E_FAILURE;
  816. }
  817. #endif
  818. #ifdef WDI_EVENT_ENABLE
  819. #ifdef BE_PKTLOG_SUPPORT
  820. static bool
  821. dp_set_hybrid_pktlog_enable(struct dp_pdev *pdev,
  822. struct dp_mon_pdev *mon_pdev,
  823. struct dp_soc *soc)
  824. {
  825. if (mon_pdev->mvdev) {
  826. /* Nothing needs to be done if monitor mode is
  827. * enabled
  828. */
  829. mon_pdev->pktlog_hybrid_mode = true;
  830. return false;
  831. }
  832. if (!mon_pdev->pktlog_hybrid_mode) {
  833. mon_pdev->pktlog_hybrid_mode = true;
  834. dp_mon_filter_setup_pktlog_hybrid(pdev);
  835. if (dp_mon_filter_update(pdev) !=
  836. QDF_STATUS_SUCCESS) {
  837. dp_cdp_err("Set hybrid filters failed");
  838. dp_mon_filter_reset_pktlog_hybrid(pdev);
  839. mon_pdev->rx_pktlog_mode =
  840. DP_RX_PKTLOG_DISABLED;
  841. return false;
  842. }
  843. dp_monitor_reap_timer_start(soc, CDP_MON_REAP_SOURCE_PKTLOG);
  844. }
  845. return true;
  846. }
  847. static void
  848. dp_set_hybrid_pktlog_disable(struct dp_mon_pdev *mon_pdev)
  849. {
  850. mon_pdev->pktlog_hybrid_mode = false;
  851. }
  852. #else
  853. static void
  854. dp_set_hybrid_pktlog_disable(struct dp_mon_pdev *mon_pdev)
  855. {
  856. }
  857. static bool
  858. dp_set_hybrid_pktlog_enable(struct dp_pdev *pdev,
  859. struct dp_mon_pdev *mon_pdev,
  860. struct dp_soc *soc)
  861. {
  862. dp_cdp_err("Hybrid mode is supported only on beryllium");
  863. return true;
  864. }
  865. #endif
  866. int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
  867. bool enable)
  868. {
  869. struct dp_soc *soc = NULL;
  870. int max_mac_rings = wlan_cfg_get_num_mac_rings
  871. (pdev->wlan_cfg_ctx);
  872. uint8_t mac_id = 0;
  873. struct dp_mon_soc *mon_soc;
  874. struct dp_mon_ops *mon_ops;
  875. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  876. soc = pdev->soc;
  877. mon_soc = soc->monitor_soc;
  878. mon_ops = dp_mon_ops_get(soc);
  879. if (!mon_ops)
  880. return 0;
  881. dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
  882. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  883. FL("Max_mac_rings %d "),
  884. max_mac_rings);
  885. if (enable) {
  886. switch (event) {
  887. case WDI_EVENT_RX_DESC:
  888. if (mon_pdev->mvdev) {
  889. /* Nothing needs to be done if monitor mode is
  890. * enabled
  891. */
  892. mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
  893. return 0;
  894. }
  895. if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL)
  896. break;
  897. mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
  898. dp_mon_filter_setup_rx_pkt_log_full(pdev);
  899. if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
  900. dp_cdp_err("%pK: Pktlog full filters set failed",
  901. soc);
  902. dp_mon_filter_reset_rx_pkt_log_full(pdev);
  903. mon_pdev->rx_pktlog_mode =
  904. DP_RX_PKTLOG_DISABLED;
  905. return 0;
  906. }
  907. dp_monitor_reap_timer_start(soc,
  908. CDP_MON_REAP_SOURCE_PKTLOG);
  909. break;
  910. case WDI_EVENT_LITE_RX:
  911. if (mon_pdev->mvdev) {
  912. /* Nothing needs to be done if monitor mode is
  913. * enabled
  914. */
  915. mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
  916. return 0;
  917. }
  918. if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE)
  919. break;
  920. mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
  921. /*
  922. * Set the packet log lite mode filter.
  923. */
  924. dp_mon_filter_setup_rx_pkt_log_lite(pdev);
  925. if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
  926. dp_cdp_err("%pK: Pktlog lite filters set failed",
  927. soc);
  928. dp_mon_filter_reset_rx_pkt_log_lite(pdev);
  929. mon_pdev->rx_pktlog_mode =
  930. DP_RX_PKTLOG_DISABLED;
  931. return 0;
  932. }
  933. dp_monitor_reap_timer_start(soc,
  934. CDP_MON_REAP_SOURCE_PKTLOG);
  935. break;
  936. case WDI_EVENT_LITE_T2H:
  937. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  938. int mac_for_pdev = dp_get_mac_id_for_pdev(
  939. mac_id, pdev->pdev_id);
  940. mon_pdev->pktlog_ppdu_stats = true;
  941. dp_h2t_cfg_stats_msg_send(pdev,
  942. DP_PPDU_TXLITE_STATS_BITMASK_CFG,
  943. mac_for_pdev);
  944. }
  945. break;
  946. case WDI_EVENT_RX_CBF:
  947. if (mon_pdev->mvdev) {
  948. /* Nothing needs to be done if monitor mode is
  949. * enabled
  950. */
  951. dp_mon_info("Mon mode, CBF setting filters");
  952. mon_pdev->rx_pktlog_cbf = true;
  953. return 0;
  954. }
  955. if (mon_pdev->rx_pktlog_cbf)
  956. break;
  957. mon_pdev->rx_pktlog_cbf = true;
  958. mon_pdev->monitor_configured = true;
  959. if (mon_ops->mon_vdev_set_monitor_mode_buf_rings)
  960. mon_ops->mon_vdev_set_monitor_mode_buf_rings(
  961. pdev);
  962. /*
  963. * Set the packet log lite mode filter.
  964. */
  965. qdf_info("Non mon mode: Enable destination ring");
  966. dp_mon_filter_setup_rx_pkt_log_cbf(pdev);
  967. if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
  968. dp_mon_err("Pktlog set CBF filters failed");
  969. dp_mon_filter_reset_rx_pktlog_cbf(pdev);
  970. mon_pdev->rx_pktlog_mode =
  971. DP_RX_PKTLOG_DISABLED;
  972. mon_pdev->monitor_configured = false;
  973. return 0;
  974. }
  975. dp_monitor_reap_timer_start(soc,
  976. CDP_MON_REAP_SOURCE_PKTLOG);
  977. break;
  978. case WDI_EVENT_HYBRID_TX:
  979. if (!dp_set_hybrid_pktlog_enable(pdev, mon_pdev, soc))
  980. return 0;
  981. break;
  982. default:
  983. /* Nothing needs to be done for other pktlog types */
  984. break;
  985. }
  986. } else {
  987. switch (event) {
  988. case WDI_EVENT_RX_DESC:
  989. case WDI_EVENT_LITE_RX:
  990. if (mon_pdev->mvdev) {
  991. /* Nothing needs to be done if monitor mode is
  992. * enabled
  993. */
  994. mon_pdev->rx_pktlog_mode =
  995. DP_RX_PKTLOG_DISABLED;
  996. return 0;
  997. }
  998. if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_DISABLED)
  999. break;
  1000. mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
  1001. dp_mon_filter_reset_rx_pkt_log_full(pdev);
  1002. if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
  1003. dp_cdp_err("%pK: Pktlog filters reset failed",
  1004. soc);
  1005. return 0;
  1006. }
  1007. dp_mon_filter_reset_rx_pkt_log_lite(pdev);
  1008. if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
  1009. dp_cdp_err("%pK: Pktlog filters reset failed",
  1010. soc);
  1011. return 0;
  1012. }
  1013. dp_monitor_reap_timer_stop(soc,
  1014. CDP_MON_REAP_SOURCE_PKTLOG);
  1015. break;
  1016. case WDI_EVENT_LITE_T2H:
  1017. /*
  1018. * To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
  1019. * passing value 0. Once these macros will define in htt
  1020. * header file will use proper macros
  1021. */
  1022. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  1023. int mac_for_pdev =
  1024. dp_get_mac_id_for_pdev(mac_id,
  1025. pdev->pdev_id);
  1026. mon_pdev->pktlog_ppdu_stats = false;
  1027. if (!mon_pdev->enhanced_stats_en &&
  1028. !mon_pdev->tx_sniffer_enable &&
  1029. !mon_pdev->mcopy_mode) {
  1030. dp_h2t_cfg_stats_msg_send(pdev, 0,
  1031. mac_for_pdev);
  1032. } else if (mon_pdev->tx_sniffer_enable ||
  1033. mon_pdev->mcopy_mode) {
  1034. dp_h2t_cfg_stats_msg_send(pdev,
  1035. DP_PPDU_STATS_CFG_SNIFFER,
  1036. mac_for_pdev);
  1037. } else if (mon_pdev->enhanced_stats_en) {
  1038. dp_h2t_cfg_stats_msg_send(pdev,
  1039. DP_PPDU_STATS_CFG_ENH_STATS,
  1040. mac_for_pdev);
  1041. }
  1042. }
  1043. break;
  1044. case WDI_EVENT_RX_CBF:
  1045. mon_pdev->rx_pktlog_cbf = false;
  1046. break;
  1047. case WDI_EVENT_HYBRID_TX:
  1048. dp_set_hybrid_pktlog_disable(mon_pdev);
  1049. break;
  1050. default:
  1051. /* Nothing needs to be done for other pktlog types */
  1052. break;
  1053. }
  1054. }
  1055. return 0;
  1056. }
  1057. #endif
  1058. /* MCL specific functions */
  1059. #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
  1060. void dp_pktlogmod_exit(struct dp_pdev *pdev)
  1061. {
  1062. struct dp_soc *soc = pdev->soc;
  1063. struct hif_opaque_softc *scn = soc->hif_handle;
  1064. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1065. if (!scn) {
  1066. dp_mon_err("Invalid hif(scn) handle");
  1067. return;
  1068. }
  1069. dp_monitor_reap_timer_stop(soc, CDP_MON_REAP_SOURCE_PKTLOG);
  1070. pktlogmod_exit(scn);
  1071. mon_pdev->pkt_log_init = false;
  1072. }
  1073. #endif /*DP_CON_MON*/
  1074. #if defined(WDI_EVENT_ENABLE) && defined(QCA_ENHANCED_STATS_SUPPORT)
  1075. #ifdef IPA_OFFLOAD
  1076. void dp_peer_get_tx_rx_stats(struct dp_peer *peer,
  1077. struct cdp_interface_peer_stats *peer_stats_intf)
  1078. {
  1079. struct dp_rx_tid *rx_tid = NULL;
  1080. uint8_t i = 0;
  1081. for (i = 0; i < DP_MAX_TIDS; i++) {
  1082. rx_tid = &peer->rx_tid[i];
  1083. peer_stats_intf->rx_byte_count +=
  1084. rx_tid->rx_msdu_cnt.bytes;
  1085. peer_stats_intf->rx_packet_count +=
  1086. rx_tid->rx_msdu_cnt.num;
  1087. }
  1088. peer_stats_intf->tx_packet_count =
  1089. peer->monitor_peer->stats.tx.tx_ucast_success.num;
  1090. peer_stats_intf->tx_byte_count =
  1091. peer->monitor_peer->stats.tx.tx_ucast_success.bytes;
  1092. }
  1093. #else
  1094. void dp_peer_get_tx_rx_stats(struct dp_peer *peer,
  1095. struct cdp_interface_peer_stats *peer_stats_intf)
  1096. {
  1097. struct dp_txrx_peer *txrx_peer = NULL;
  1098. struct dp_peer *tgt_peer = NULL;
  1099. tgt_peer = dp_get_tgt_peer_from_peer(peer);
  1100. txrx_peer = tgt_peer->txrx_peer;
  1101. peer_stats_intf->rx_packet_count = txrx_peer->to_stack.num;
  1102. peer_stats_intf->rx_byte_count = txrx_peer->to_stack.bytes;
  1103. peer_stats_intf->tx_packet_count =
  1104. txrx_peer->stats.per_pkt_stats.tx.ucast.num;
  1105. peer_stats_intf->tx_byte_count =
  1106. txrx_peer->stats.per_pkt_stats.tx.tx_success.bytes;
  1107. }
  1108. #endif
  1109. QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer)
  1110. {
  1111. struct cdp_interface_peer_stats peer_stats_intf = {0};
  1112. struct dp_mon_peer_stats *mon_peer_stats = NULL;
  1113. struct dp_peer *tgt_peer = NULL;
  1114. struct dp_txrx_peer *txrx_peer = NULL;
  1115. if (!peer || !peer->vdev || !peer->monitor_peer)
  1116. return QDF_STATUS_E_FAULT;
  1117. tgt_peer = dp_get_tgt_peer_from_peer(peer);
  1118. if (!tgt_peer)
  1119. return QDF_STATUS_E_FAULT;
  1120. txrx_peer = tgt_peer->txrx_peer;
  1121. if (!txrx_peer)
  1122. return QDF_STATUS_E_FAULT;
  1123. mon_peer_stats = &peer->monitor_peer->stats;
  1124. if (mon_peer_stats->rx.last_snr != mon_peer_stats->rx.snr)
  1125. peer_stats_intf.rssi_changed = true;
  1126. if ((mon_peer_stats->rx.snr && peer_stats_intf.rssi_changed) ||
  1127. (mon_peer_stats->tx.tx_rate &&
  1128. mon_peer_stats->tx.tx_rate != mon_peer_stats->tx.last_tx_rate)) {
  1129. qdf_mem_copy(peer_stats_intf.peer_mac, peer->mac_addr.raw,
  1130. QDF_MAC_ADDR_SIZE);
  1131. peer_stats_intf.vdev_id = peer->vdev->vdev_id;
  1132. peer_stats_intf.last_peer_tx_rate =
  1133. mon_peer_stats->tx.last_tx_rate;
  1134. peer_stats_intf.peer_tx_rate = mon_peer_stats->tx.tx_rate;
  1135. peer_stats_intf.peer_rssi = mon_peer_stats->rx.snr;
  1136. peer_stats_intf.ack_rssi = mon_peer_stats->tx.last_ack_rssi;
  1137. dp_peer_get_tx_rx_stats(peer, &peer_stats_intf);
  1138. peer_stats_intf.per = tgt_peer->stats.tx.last_per;
  1139. peer_stats_intf.free_buff = INVALID_FREE_BUFF;
  1140. dp_wdi_event_handler(WDI_EVENT_PEER_STATS, dp_pdev->soc,
  1141. (void *)&peer_stats_intf, 0,
  1142. WDI_NO_VAL, dp_pdev->pdev_id);
  1143. }
  1144. return QDF_STATUS_SUCCESS;
  1145. }
  1146. #endif
  1147. #ifdef FEATURE_NAC_RSSI
  1148. /**
  1149. * dp_rx_nac_filter(): Function to perform filtering of non-associated
  1150. * clients
  1151. * @pdev: DP pdev handle
  1152. * @rx_pkt_hdr: Rx packet Header
  1153. *
  1154. * return: dp_vdev*
  1155. */
  1156. static
  1157. struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
  1158. uint8_t *rx_pkt_hdr)
  1159. {
  1160. struct ieee80211_frame *wh;
  1161. struct dp_neighbour_peer *peer = NULL;
  1162. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1163. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  1164. if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
  1165. return NULL;
  1166. qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
  1167. TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
  1168. neighbour_peer_list_elem) {
  1169. if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  1170. wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) {
  1171. dp_rx_debug("%pK: NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x",
  1172. pdev->soc,
  1173. peer->neighbour_peers_macaddr.raw[0],
  1174. peer->neighbour_peers_macaddr.raw[1],
  1175. peer->neighbour_peers_macaddr.raw[2],
  1176. peer->neighbour_peers_macaddr.raw[3],
  1177. peer->neighbour_peers_macaddr.raw[4],
  1178. peer->neighbour_peers_macaddr.raw[5]);
  1179. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  1180. return mon_pdev->mvdev;
  1181. }
  1182. }
  1183. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  1184. return NULL;
  1185. }
  1186. QDF_STATUS dp_filter_neighbour_peer(struct dp_pdev *pdev,
  1187. uint8_t *rx_pkt_hdr)
  1188. {
  1189. struct dp_vdev *vdev = NULL;
  1190. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1191. if (mon_pdev->filter_neighbour_peers) {
  1192. /* Next Hop scenario not yet handle */
  1193. vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
  1194. if (vdev) {
  1195. dp_rx_mon_deliver(pdev->soc, pdev->pdev_id,
  1196. pdev->invalid_peer_head_msdu,
  1197. pdev->invalid_peer_tail_msdu);
  1198. pdev->invalid_peer_head_msdu = NULL;
  1199. pdev->invalid_peer_tail_msdu = NULL;
  1200. return QDF_STATUS_SUCCESS;
  1201. }
  1202. }
  1203. return QDF_STATUS_E_FAILURE;
  1204. }
  1205. #endif
  1206. #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
  1207. /*
  1208. * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
  1209. * address for smart mesh filtering
  1210. * @txrx_soc: cdp soc handle
  1211. * @vdev_id: id of virtual device object
  1212. * @cmd: Add/Del command
  1213. * @macaddr: nac client mac address
  1214. *
  1215. * Return: success/failure
  1216. */
  1217. static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc_hdl,
  1218. uint8_t vdev_id,
  1219. uint32_t cmd, uint8_t *macaddr)
  1220. {
  1221. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  1222. struct dp_pdev *pdev;
  1223. struct dp_neighbour_peer *peer = NULL;
  1224. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  1225. DP_MOD_ID_CDP);
  1226. struct dp_mon_pdev *mon_pdev;
  1227. if (!vdev || !macaddr)
  1228. goto fail0;
  1229. pdev = vdev->pdev;
  1230. if (!pdev)
  1231. goto fail0;
  1232. mon_pdev = pdev->monitor_pdev;
  1233. /* Store address of NAC (neighbour peer) which will be checked
  1234. * against TA of received packets.
  1235. */
  1236. if (cmd == DP_NAC_PARAM_ADD) {
  1237. peer = (struct dp_neighbour_peer *)qdf_mem_malloc(
  1238. sizeof(*peer));
  1239. if (!peer) {
  1240. dp_cdp_err("%pK: DP neighbour peer node memory allocation failed"
  1241. , soc);
  1242. goto fail0;
  1243. }
  1244. qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
  1245. macaddr, QDF_MAC_ADDR_SIZE);
  1246. peer->vdev = vdev;
  1247. qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
  1248. /* add this neighbour peer into the list */
  1249. TAILQ_INSERT_TAIL(&mon_pdev->neighbour_peers_list, peer,
  1250. neighbour_peer_list_elem);
  1251. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  1252. /* first neighbour */
  1253. if (!mon_pdev->neighbour_peers_added) {
  1254. QDF_STATUS status = QDF_STATUS_SUCCESS;
  1255. mon_pdev->neighbour_peers_added = true;
  1256. dp_mon_filter_setup_smart_monitor(pdev);
  1257. status = dp_mon_filter_update(pdev);
  1258. if (status != QDF_STATUS_SUCCESS) {
  1259. dp_cdp_err("%pK: smart mon filter setup failed",
  1260. soc);
  1261. dp_mon_filter_reset_smart_monitor(pdev);
  1262. mon_pdev->neighbour_peers_added = false;
  1263. }
  1264. }
  1265. } else if (cmd == DP_NAC_PARAM_DEL) {
  1266. qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
  1267. TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
  1268. neighbour_peer_list_elem) {
  1269. if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  1270. macaddr, QDF_MAC_ADDR_SIZE)) {
  1271. /* delete this peer from the list */
  1272. TAILQ_REMOVE(&mon_pdev->neighbour_peers_list,
  1273. peer, neighbour_peer_list_elem);
  1274. qdf_mem_free(peer);
  1275. break;
  1276. }
  1277. }
  1278. /* last neighbour deleted */
  1279. if (TAILQ_EMPTY(&mon_pdev->neighbour_peers_list)) {
  1280. QDF_STATUS status = QDF_STATUS_SUCCESS;
  1281. dp_mon_filter_reset_smart_monitor(pdev);
  1282. status = dp_mon_filter_update(pdev);
  1283. if (status != QDF_STATUS_SUCCESS) {
  1284. dp_cdp_err("%pK: smart mon filter clear failed",
  1285. soc);
  1286. }
  1287. mon_pdev->neighbour_peers_added = false;
  1288. }
  1289. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  1290. }
  1291. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  1292. return 1;
  1293. fail0:
  1294. if (vdev)
  1295. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  1296. return 0;
  1297. }
  1298. #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
  1299. #ifdef ATH_SUPPORT_NAC_RSSI
  1300. /**
  1301. * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
  1302. * @soc_hdl: DP soc handle
  1303. * @vdev_id: id of DP vdev handle
  1304. * @mac_addr: neighbour mac
  1305. * @rssi: rssi value
  1306. *
  1307. * Return: 0 for success. nonzero for failure.
  1308. */
  1309. static QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc_hdl,
  1310. uint8_t vdev_id,
  1311. char *mac_addr,
  1312. uint8_t *rssi)
  1313. {
  1314. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1315. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  1316. DP_MOD_ID_CDP);
  1317. struct dp_pdev *pdev;
  1318. struct dp_neighbour_peer *peer = NULL;
  1319. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  1320. struct dp_mon_pdev *mon_pdev;
  1321. if (!vdev)
  1322. return status;
  1323. pdev = vdev->pdev;
  1324. mon_pdev = pdev->monitor_pdev;
  1325. *rssi = 0;
  1326. qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
  1327. TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
  1328. neighbour_peer_list_elem) {
  1329. if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  1330. mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
  1331. *rssi = peer->rssi;
  1332. status = QDF_STATUS_SUCCESS;
  1333. break;
  1334. }
  1335. }
  1336. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  1337. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  1338. return status;
  1339. }
  1340. static QDF_STATUS
  1341. dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc,
  1342. uint8_t vdev_id,
  1343. enum cdp_nac_param_cmd cmd, char *bssid,
  1344. char *client_macaddr,
  1345. uint8_t chan_num)
  1346. {
  1347. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  1348. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  1349. DP_MOD_ID_CDP);
  1350. struct dp_pdev *pdev;
  1351. struct dp_mon_pdev *mon_pdev;
  1352. if (!vdev)
  1353. return QDF_STATUS_E_FAILURE;
  1354. pdev = (struct dp_pdev *)vdev->pdev;
  1355. mon_pdev = pdev->monitor_pdev;
  1356. mon_pdev->nac_rssi_filtering = 1;
  1357. /* Store address of NAC (neighbour peer) which will be checked
  1358. * against TA of received packets.
  1359. */
  1360. if (cmd == CDP_NAC_PARAM_ADD) {
  1361. dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
  1362. DP_NAC_PARAM_ADD,
  1363. (uint8_t *)client_macaddr);
  1364. } else if (cmd == CDP_NAC_PARAM_DEL) {
  1365. dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
  1366. DP_NAC_PARAM_DEL,
  1367. (uint8_t *)client_macaddr);
  1368. }
  1369. if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
  1370. soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
  1371. (soc->ctrl_psoc, pdev->pdev_id,
  1372. vdev->vdev_id, cmd, bssid, client_macaddr);
  1373. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  1374. return QDF_STATUS_SUCCESS;
  1375. }
  1376. #endif
  1377. bool
  1378. dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl,
  1379. enum cdp_mon_reap_source source,
  1380. bool enable)
  1381. {
  1382. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1383. if (enable)
  1384. return dp_monitor_reap_timer_start(soc, source);
  1385. else
  1386. return dp_monitor_reap_timer_stop(soc, source);
  1387. }
  1388. #if defined(DP_CON_MON)
  1389. #ifndef REMOVE_PKT_LOG
  1390. /**
  1391. * dp_pkt_log_init() - API to initialize packet log
  1392. * @soc_hdl: Datapath soc handle
  1393. * @pdev_id: id of data path pdev handle
  1394. * @scn: HIF context
  1395. *
  1396. * Return: none
  1397. */
  1398. void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
  1399. {
  1400. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1401. struct dp_pdev *handle =
  1402. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1403. struct dp_mon_pdev *mon_pdev;
  1404. if (!handle) {
  1405. dp_mon_err("pdev handle is NULL");
  1406. return;
  1407. }
  1408. mon_pdev = handle->monitor_pdev;
  1409. if (mon_pdev->pkt_log_init) {
  1410. dp_mon_err("%pK: Packet log not initialized", soc);
  1411. return;
  1412. }
  1413. pktlog_sethandle(&mon_pdev->pl_dev, scn);
  1414. pktlog_set_pdev_id(mon_pdev->pl_dev, pdev_id);
  1415. pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
  1416. if (pktlogmod_init(scn)) {
  1417. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1418. "%s: pktlogmod_init failed", __func__);
  1419. mon_pdev->pkt_log_init = false;
  1420. } else {
  1421. mon_pdev->pkt_log_init = true;
  1422. }
  1423. }
  1424. /**
  1425. * dp_pkt_log_con_service() - connect packet log service
  1426. * @soc_hdl: Datapath soc handle
  1427. * @pdev_id: id of data path pdev handle
  1428. * @scn: device context
  1429. *
  1430. * Return: none
  1431. */
  1432. static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
  1433. uint8_t pdev_id, void *scn)
  1434. {
  1435. dp_pkt_log_init(soc_hdl, pdev_id, scn);
  1436. pktlog_htc_attach();
  1437. }
  1438. /**
  1439. * dp_pkt_log_exit() - Wrapper API to cleanup pktlog info
  1440. * @soc_hdl: Datapath soc handle
  1441. * @pdev_id: id of data path pdev handle
  1442. *
  1443. * Return: none
  1444. */
  1445. static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1446. {
  1447. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1448. struct dp_pdev *pdev =
  1449. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1450. if (!pdev) {
  1451. dp_err("pdev handle is NULL");
  1452. return;
  1453. }
  1454. dp_pktlogmod_exit(pdev);
  1455. }
  1456. #else
  1457. static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
  1458. uint8_t pdev_id, void *scn)
  1459. {
  1460. }
  1461. static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1462. {
  1463. }
  1464. #endif
  1465. #endif
  1466. void dp_neighbour_peers_detach(struct dp_pdev *pdev)
  1467. {
  1468. struct dp_neighbour_peer *peer = NULL;
  1469. struct dp_neighbour_peer *temp_peer = NULL;
  1470. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1471. TAILQ_FOREACH_SAFE(peer, &mon_pdev->neighbour_peers_list,
  1472. neighbour_peer_list_elem, temp_peer) {
  1473. /* delete this peer from the list */
  1474. TAILQ_REMOVE(&mon_pdev->neighbour_peers_list,
  1475. peer, neighbour_peer_list_elem);
  1476. qdf_mem_free(peer);
  1477. }
  1478. qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex);
  1479. }
  1480. #ifdef QCA_ENHANCED_STATS_SUPPORT
  1481. /*
  1482. * dp_mon_tx_enable_enhanced_stats() - Enable enhanced Tx stats
  1483. * @pdev: Datapath pdev handle
  1484. *
  1485. * Return: void
  1486. */
  1487. static void dp_mon_tx_enable_enhanced_stats(struct dp_pdev *pdev)
  1488. {
  1489. struct dp_soc *soc = pdev->soc;
  1490. struct dp_mon_ops *mon_ops = NULL;
  1491. mon_ops = dp_mon_ops_get(soc);
  1492. if (mon_ops && mon_ops->mon_tx_enable_enhanced_stats)
  1493. mon_ops->mon_tx_enable_enhanced_stats(pdev);
  1494. }
  1495. /*
  1496. * dp_enable_enhanced_stats()- API to enable enhanced statistcs
  1497. * @soc_handle: DP_SOC handle
  1498. * @pdev_id: id of DP_PDEV handle
  1499. *
  1500. * Return: QDF_STATUS
  1501. */
  1502. static QDF_STATUS
  1503. dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
  1504. {
  1505. struct dp_pdev *pdev = NULL;
  1506. QDF_STATUS status = QDF_STATUS_SUCCESS;
  1507. struct dp_mon_pdev *mon_pdev;
  1508. pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  1509. pdev_id);
  1510. if (!pdev)
  1511. return QDF_STATUS_E_FAILURE;
  1512. mon_pdev = pdev->monitor_pdev;
  1513. if (!mon_pdev)
  1514. return QDF_STATUS_E_FAILURE;
  1515. if (mon_pdev->enhanced_stats_en == 0)
  1516. dp_cal_client_timer_start(mon_pdev->cal_client_ctx);
  1517. mon_pdev->enhanced_stats_en = 1;
  1518. pdev->enhanced_stats_en = true;
  1519. dp_mon_filter_setup_enhanced_stats(pdev);
  1520. status = dp_mon_filter_update(pdev);
  1521. if (status != QDF_STATUS_SUCCESS) {
  1522. dp_cdp_err("%pK: Failed to set enhanced mode filters", soc);
  1523. dp_mon_filter_reset_enhanced_stats(pdev);
  1524. dp_cal_client_timer_stop(mon_pdev->cal_client_ctx);
  1525. mon_pdev->enhanced_stats_en = 0;
  1526. pdev->enhanced_stats_en = false;
  1527. return QDF_STATUS_E_FAILURE;
  1528. }
  1529. dp_mon_tx_enable_enhanced_stats(pdev);
  1530. return QDF_STATUS_SUCCESS;
  1531. }
  1532. /*
  1533. * dp_mon_tx_disable_enhanced_stats() - Disable enhanced Tx stats
  1534. * @pdev: Datapath pdev handle
  1535. *
  1536. * Return: void
  1537. */
  1538. static void dp_mon_tx_disable_enhanced_stats(struct dp_pdev *pdev)
  1539. {
  1540. struct dp_soc *soc = pdev->soc;
  1541. struct dp_mon_ops *mon_ops = NULL;
  1542. mon_ops = dp_mon_ops_get(soc);
  1543. if (mon_ops && mon_ops->mon_tx_disable_enhanced_stats)
  1544. mon_ops->mon_tx_disable_enhanced_stats(pdev);
  1545. }
  1546. /*
  1547. * dp_disable_enhanced_stats()- API to disable enhanced statistcs
  1548. *
  1549. * @param soc - the soc handle
  1550. * @param pdev_id - pdev_id of pdev
  1551. * @return - QDF_STATUS
  1552. */
  1553. static QDF_STATUS
  1554. dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
  1555. {
  1556. struct dp_pdev *pdev =
  1557. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  1558. pdev_id);
  1559. struct dp_mon_pdev *mon_pdev;
  1560. if (!pdev || !pdev->monitor_pdev)
  1561. return QDF_STATUS_E_FAILURE;
  1562. mon_pdev = pdev->monitor_pdev;
  1563. if (mon_pdev->enhanced_stats_en == 1)
  1564. dp_cal_client_timer_stop(mon_pdev->cal_client_ctx);
  1565. mon_pdev->enhanced_stats_en = 0;
  1566. pdev->enhanced_stats_en = false;
  1567. dp_mon_tx_disable_enhanced_stats(pdev);
  1568. dp_mon_filter_reset_enhanced_stats(pdev);
  1569. if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
  1570. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1571. FL("Failed to reset enhanced mode filters"));
  1572. }
  1573. return QDF_STATUS_SUCCESS;
  1574. }
  1575. #ifdef WDI_EVENT_ENABLE
  1576. QDF_STATUS dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
  1577. struct cdp_rx_stats_ppdu_user *ppdu_user)
  1578. {
  1579. struct cdp_interface_peer_qos_stats qos_stats_intf;
  1580. if (ppdu_user->peer_id == HTT_INVALID_PEER) {
  1581. dp_mon_warn("Invalid peer id");
  1582. return QDF_STATUS_E_FAILURE;
  1583. }
  1584. qdf_mem_zero(&qos_stats_intf, sizeof(qos_stats_intf));
  1585. qdf_mem_copy(qos_stats_intf.peer_mac, ppdu_user->mac_addr,
  1586. QDF_MAC_ADDR_SIZE);
  1587. qos_stats_intf.frame_control = ppdu_user->frame_control;
  1588. qos_stats_intf.frame_control_info_valid =
  1589. ppdu_user->frame_control_info_valid;
  1590. qos_stats_intf.qos_control = ppdu_user->qos_control;
  1591. qos_stats_intf.qos_control_info_valid =
  1592. ppdu_user->qos_control_info_valid;
  1593. qos_stats_intf.vdev_id = ppdu_user->vdev_id;
  1594. dp_wdi_event_handler(WDI_EVENT_PEER_QOS_STATS, dp_pdev->soc,
  1595. (void *)&qos_stats_intf, 0,
  1596. WDI_NO_VAL, dp_pdev->pdev_id);
  1597. return QDF_STATUS_SUCCESS;
  1598. }
  1599. #else
  1600. static inline QDF_STATUS
  1601. dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
  1602. struct cdp_rx_stats_ppdu_user *ppdu_user)
  1603. {
  1604. return QDF_STATUS_SUCCESS;
  1605. }
  1606. #endif
  1607. #endif /* QCA_ENHANCED_STATS_SUPPORT */
  1608. /**
  1609. * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
  1610. * for pktlog
  1611. * @soc: cdp_soc handle
  1612. * @pdev_id: id of dp pdev handle
  1613. * @mac_addr: Peer mac address
  1614. * @enb_dsb: Enable or disable peer based filtering
  1615. *
  1616. * Return: QDF_STATUS
  1617. */
  1618. static int
  1619. dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id,
  1620. uint8_t *mac_addr, uint8_t enb_dsb)
  1621. {
  1622. struct dp_peer *peer;
  1623. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  1624. struct dp_pdev *pdev =
  1625. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  1626. pdev_id);
  1627. struct dp_mon_pdev *mon_pdev;
  1628. if (!pdev)
  1629. return QDF_STATUS_E_FAILURE;
  1630. mon_pdev = pdev->monitor_pdev;
  1631. peer = dp_peer_find_hash_find((struct dp_soc *)soc, mac_addr,
  1632. 0, DP_VDEV_ALL, DP_MOD_ID_CDP);
  1633. if (!peer) {
  1634. dp_mon_err("Invalid Peer");
  1635. return QDF_STATUS_E_FAILURE;
  1636. }
  1637. if (!IS_MLO_DP_MLD_PEER(peer) && peer->monitor_peer) {
  1638. peer->monitor_peer->peer_based_pktlog_filter = enb_dsb;
  1639. mon_pdev->dp_peer_based_pktlog = enb_dsb;
  1640. status = QDF_STATUS_SUCCESS;
  1641. }
  1642. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1643. return status;
  1644. }
  1645. /**
  1646. * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
  1647. * @soc: DP_SOC handle
  1648. * @pdev_id: id of DP_PDEV handle
  1649. * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
  1650. * @is_tx_pkt_cap_enable: enable/disable/delete/print
  1651. * Tx packet capture in monitor mode
  1652. * @peer_mac: MAC address for which the above need to be enabled/disabled
  1653. *
  1654. * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
  1655. */
  1656. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  1657. static QDF_STATUS
  1658. dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
  1659. uint8_t pdev_id,
  1660. bool is_rx_pkt_cap_enable,
  1661. uint8_t is_tx_pkt_cap_enable,
  1662. uint8_t *peer_mac)
  1663. {
  1664. struct dp_peer *peer;
  1665. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  1666. struct dp_pdev *pdev =
  1667. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  1668. pdev_id);
  1669. if (!pdev)
  1670. return QDF_STATUS_E_FAILURE;
  1671. peer = dp_peer_find_hash_find((struct dp_soc *)soc,
  1672. peer_mac, 0, DP_VDEV_ALL,
  1673. DP_MOD_ID_CDP);
  1674. if (!peer)
  1675. return QDF_STATUS_E_FAILURE;
  1676. /* we need to set tx pkt capture for non associated peer */
  1677. if (!IS_MLO_DP_MLD_PEER(peer)) {
  1678. status = dp_monitor_tx_peer_filter(pdev, peer,
  1679. is_tx_pkt_cap_enable,
  1680. peer_mac);
  1681. status = dp_peer_set_rx_capture_enabled(pdev, peer,
  1682. is_rx_pkt_cap_enable,
  1683. peer_mac);
  1684. }
  1685. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1686. return status;
  1687. }
  1688. #endif
  1689. #ifdef QCA_MCOPY_SUPPORT
  1690. QDF_STATUS dp_mcopy_check_deliver(struct dp_pdev *pdev,
  1691. uint16_t peer_id,
  1692. uint32_t ppdu_id,
  1693. uint8_t first_msdu)
  1694. {
  1695. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1696. if (mon_pdev->mcopy_mode) {
  1697. if (mon_pdev->mcopy_mode == M_COPY) {
  1698. if ((mon_pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
  1699. (mon_pdev->m_copy_id.tx_peer_id == peer_id)) {
  1700. return QDF_STATUS_E_INVAL;
  1701. }
  1702. }
  1703. if (!first_msdu)
  1704. return QDF_STATUS_E_INVAL;
  1705. mon_pdev->m_copy_id.tx_ppdu_id = ppdu_id;
  1706. mon_pdev->m_copy_id.tx_peer_id = peer_id;
  1707. }
  1708. return QDF_STATUS_SUCCESS;
  1709. }
  1710. #endif
  1711. #ifdef WDI_EVENT_ENABLE
  1712. #ifndef REMOVE_PKT_LOG
  1713. static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1714. {
  1715. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1716. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1717. if (!pdev || !pdev->monitor_pdev)
  1718. return NULL;
  1719. return pdev->monitor_pdev->pl_dev;
  1720. }
  1721. #else
  1722. static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1723. {
  1724. return NULL;
  1725. }
  1726. #endif
  1727. #endif
  1728. QDF_STATUS dp_rx_populate_cbf_hdr(struct dp_soc *soc,
  1729. uint32_t mac_id,
  1730. uint32_t event,
  1731. qdf_nbuf_t mpdu,
  1732. uint32_t msdu_timestamp)
  1733. {
  1734. uint32_t data_size, hdr_size, ppdu_id, align4byte;
  1735. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1736. uint32_t *msg_word;
  1737. if (!pdev)
  1738. return QDF_STATUS_E_INVAL;
  1739. ppdu_id = pdev->monitor_pdev->ppdu_info.com_info.ppdu_id;
  1740. hdr_size = HTT_T2H_PPDU_STATS_IND_HDR_SIZE
  1741. + qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload);
  1742. data_size = qdf_nbuf_len(mpdu);
  1743. qdf_nbuf_push_head(mpdu, hdr_size);
  1744. msg_word = (uint32_t *)qdf_nbuf_data(mpdu);
  1745. /*
  1746. * Populate the PPDU Stats Indication header
  1747. */
  1748. HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_T2H_MSG_TYPE_PPDU_STATS_IND);
  1749. HTT_T2H_PPDU_STATS_MAC_ID_SET(*msg_word, mac_id);
  1750. HTT_T2H_PPDU_STATS_PDEV_ID_SET(*msg_word, pdev->pdev_id);
  1751. align4byte = ((data_size +
  1752. qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload)
  1753. + 3) >> 2) << 2;
  1754. HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_SET(*msg_word, align4byte);
  1755. msg_word++;
  1756. HTT_T2H_PPDU_STATS_PPDU_ID_SET(*msg_word, ppdu_id);
  1757. msg_word++;
  1758. *msg_word = msdu_timestamp;
  1759. msg_word++;
  1760. /* Skip reserved field */
  1761. msg_word++;
  1762. /*
  1763. * Populate MGMT_CTRL Payload TLV first
  1764. */
  1765. HTT_STATS_TLV_TAG_SET(*msg_word,
  1766. HTT_PPDU_STATS_RX_MGMTCTRL_PAYLOAD_TLV);
  1767. align4byte = ((data_size - sizeof(htt_tlv_hdr_t) +
  1768. qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload)
  1769. + 3) >> 2) << 2;
  1770. HTT_STATS_TLV_LENGTH_SET(*msg_word, align4byte);
  1771. msg_word++;
  1772. HTT_PPDU_STATS_RX_MGMTCTRL_TLV_FRAME_LENGTH_SET(
  1773. *msg_word, data_size);
  1774. msg_word++;
  1775. dp_wdi_event_handler(event, soc, (void *)mpdu,
  1776. HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id);
  1777. qdf_nbuf_pull_head(mpdu, hdr_size);
  1778. return QDF_STATUS_SUCCESS;
  1779. }
  1780. #ifdef ATH_SUPPORT_EXT_STAT
  1781. #ifdef WLAN_TELEMETRY_STATS_SUPPORT
  1782. /* dp_peer_update_telemetry_stats- update peer telemetry stats
  1783. * @peer : Datapath peer
  1784. */
  1785. static inline
  1786. void dp_peer_update_telemetry_stats(struct dp_peer *peer)
  1787. {
  1788. struct dp_pdev *pdev;
  1789. struct dp_vdev *vdev;
  1790. struct dp_mon_peer *mon_peer = NULL;
  1791. uint8_t idx;
  1792. vdev = peer->vdev;
  1793. if (!vdev)
  1794. return;
  1795. pdev = vdev->pdev;
  1796. if (!pdev)
  1797. return;
  1798. mon_peer = peer->monitor_peer;
  1799. if (qdf_likely(mon_peer)) {
  1800. DP_STATS_INC(pdev, telemetry_stats.tx_mpdu_failed,
  1801. mon_peer->stats.tx.retries);
  1802. DP_STATS_INC(pdev, telemetry_stats.tx_mpdu_total,
  1803. mon_peer->stats.tx.tx_mpdus_tried);
  1804. idx = mon_peer->stats.airtime_consumption.avg_consumption.idx;
  1805. mon_peer->stats.airtime_consumption.avg_consumption.avg_consumption_per_sec[idx] =
  1806. mon_peer->stats.airtime_consumption.consumption;
  1807. mon_peer->stats.airtime_consumption.consumption = 0;
  1808. mon_peer->stats.airtime_consumption.avg_consumption.idx++;
  1809. if (idx == MAX_CONSUMPTION_TIME)
  1810. mon_peer->stats.airtime_consumption.avg_consumption.idx = 0;
  1811. }
  1812. }
  1813. #else
  1814. static inline
  1815. void dp_peer_update_telemetry_stats(struct dp_peer *peer)
  1816. { }
  1817. #endif
  1818. /*dp_peer_cal_clients_stats_update - update peer stats on cal client timer
  1819. * @soc : Datapath SOC
  1820. * @peer : Datapath peer
  1821. * @arg : argument to iter function
  1822. */
  1823. #ifdef IPA_OFFLOAD
  1824. static void
  1825. dp_peer_cal_clients_stats_update(struct dp_soc *soc,
  1826. struct dp_peer *peer,
  1827. void *arg)
  1828. {
  1829. struct cdp_calibr_stats_intf peer_stats_intf = {0};
  1830. struct dp_peer *tgt_peer = NULL;
  1831. struct dp_txrx_peer *txrx_peer = NULL;
  1832. dp_peer_update_telemetry_stats(peer);
  1833. if (!dp_peer_is_primary_link_peer(peer))
  1834. return;
  1835. tgt_peer = dp_get_tgt_peer_from_peer(peer);
  1836. if (!tgt_peer || !(tgt_peer->txrx_peer))
  1837. return;
  1838. txrx_peer = tgt_peer->txrx_peer;
  1839. peer_stats_intf.to_stack = txrx_peer->to_stack;
  1840. peer_stats_intf.tx_success =
  1841. peer->monitor_peer->stats.tx.tx_ucast_success;
  1842. peer_stats_intf.tx_ucast =
  1843. peer->monitor_peer->stats.tx.tx_ucast_total;
  1844. dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf,
  1845. &tgt_peer->stats);
  1846. dp_peer_get_rxtid_stats_ipa(peer, dp_peer_update_tid_stats_from_reo);
  1847. }
  1848. #else
  1849. static void
  1850. dp_peer_cal_clients_stats_update(struct dp_soc *soc,
  1851. struct dp_peer *peer,
  1852. void *arg)
  1853. {
  1854. struct cdp_calibr_stats_intf peer_stats_intf = {0};
  1855. struct dp_peer *tgt_peer = NULL;
  1856. struct dp_txrx_peer *txrx_peer = NULL;
  1857. dp_peer_update_telemetry_stats(peer);
  1858. if (!dp_peer_is_primary_link_peer(peer))
  1859. return;
  1860. tgt_peer = dp_get_tgt_peer_from_peer(peer);
  1861. if (!tgt_peer || !(tgt_peer->txrx_peer))
  1862. return;
  1863. txrx_peer = tgt_peer->txrx_peer;
  1864. peer_stats_intf.to_stack = txrx_peer->to_stack;
  1865. peer_stats_intf.tx_success =
  1866. txrx_peer->stats.per_pkt_stats.tx.tx_success;
  1867. peer_stats_intf.tx_ucast =
  1868. txrx_peer->stats.per_pkt_stats.tx.ucast;
  1869. dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf,
  1870. &tgt_peer->stats);
  1871. }
  1872. #endif
  1873. /*dp_iterate_update_peer_list - update peer stats on cal client timer
  1874. * @pdev_hdl: pdev handle
  1875. */
  1876. static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
  1877. {
  1878. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  1879. dp_pdev_iterate_peer(pdev, dp_peer_cal_clients_stats_update, NULL,
  1880. DP_MOD_ID_CDP);
  1881. }
  1882. #else
  1883. static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
  1884. {
  1885. }
  1886. #endif
  1887. #ifdef ATH_SUPPORT_NAC
  1888. int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
  1889. bool val)
  1890. {
  1891. /* Enable/Disable smart mesh filtering. This flag will be checked
  1892. * during rx processing to check if packets are from NAC clients.
  1893. */
  1894. pdev->monitor_pdev->filter_neighbour_peers = val;
  1895. return 0;
  1896. }
  1897. #endif /* ATH_SUPPORT_NAC */
  1898. #ifdef WLAN_ATF_ENABLE
  1899. void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
  1900. {
  1901. if (!pdev) {
  1902. dp_cdp_err("Invalid pdev");
  1903. return;
  1904. }
  1905. pdev->monitor_pdev->dp_atf_stats_enable = value;
  1906. }
  1907. #endif
  1908. #ifdef QCA_ENHANCED_STATS_SUPPORT
  1909. /*
  1910. * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
  1911. * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
  1912. * @pdev: DP PDEV handle
  1913. * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
  1914. * @length: tlv_length
  1915. *
  1916. * return:QDF_STATUS_SUCCESS if nbuf has to be freed in caller
  1917. */
  1918. QDF_STATUS
  1919. dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
  1920. qdf_nbuf_t tag_buf,
  1921. uint32_t ppdu_id)
  1922. {
  1923. uint32_t *nbuf_ptr;
  1924. uint8_t trim_size;
  1925. size_t head_size;
  1926. struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info;
  1927. uint32_t *msg_word;
  1928. uint32_t tsf_hdr;
  1929. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1930. if ((!mon_pdev->tx_sniffer_enable) && (!mon_pdev->mcopy_mode) &&
  1931. (!mon_pdev->bpr_enable) && (!mon_pdev->tx_capture_enabled))
  1932. return QDF_STATUS_SUCCESS;
  1933. /*
  1934. * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t
  1935. */
  1936. msg_word = (uint32_t *)qdf_nbuf_data(tag_buf);
  1937. msg_word = msg_word + 2;
  1938. tsf_hdr = *msg_word;
  1939. trim_size = ((mon_pdev->mgmtctrl_frm_info.mgmt_buf +
  1940. HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
  1941. qdf_nbuf_data(tag_buf));
  1942. if (!qdf_nbuf_pull_head(tag_buf, trim_size))
  1943. return QDF_STATUS_SUCCESS;
  1944. qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
  1945. mon_pdev->mgmtctrl_frm_info.mgmt_buf_len);
  1946. if (mon_pdev->tx_capture_enabled) {
  1947. head_size = sizeof(struct cdp_tx_mgmt_comp_info);
  1948. if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) {
  1949. qdf_err("Fail to get headroom h_sz %zu h_avail %d\n",
  1950. head_size, qdf_nbuf_headroom(tag_buf));
  1951. qdf_assert_always(0);
  1952. return QDF_STATUS_E_NOMEM;
  1953. }
  1954. ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *)
  1955. qdf_nbuf_push_head(tag_buf, head_size);
  1956. qdf_assert_always(ptr_mgmt_comp_info);
  1957. ptr_mgmt_comp_info->ppdu_id = ppdu_id;
  1958. ptr_mgmt_comp_info->is_sgen_pkt = true;
  1959. ptr_mgmt_comp_info->tx_tsf = tsf_hdr;
  1960. } else {
  1961. head_size = sizeof(ppdu_id);
  1962. nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size);
  1963. *nbuf_ptr = ppdu_id;
  1964. }
  1965. if (mon_pdev->bpr_enable) {
  1966. dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
  1967. tag_buf, HTT_INVALID_PEER,
  1968. WDI_NO_VAL, pdev->pdev_id);
  1969. }
  1970. dp_deliver_mgmt_frm(pdev, tag_buf);
  1971. return QDF_STATUS_E_ALREADY;
  1972. }
  1973. /*
  1974. * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv
  1975. * bitmap for sniffer mode
  1976. * @bitmap: received bitmap
  1977. *
  1978. * Return: expected bitmap value, returns zero if doesn't match with
  1979. * either 64-bit Tx window or 256-bit window tlv bitmap
  1980. */
  1981. int
  1982. dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
  1983. {
  1984. if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
  1985. return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
  1986. else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
  1987. return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
  1988. return 0;
  1989. }
  1990. /*
  1991. * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats.
  1992. * @peer: Datapath peer handle
  1993. * @ppdu: User PPDU Descriptor
  1994. * @cur_ppdu_id: PPDU_ID
  1995. *
  1996. * Return: None
  1997. *
  1998. * on Tx data frame, we may get delayed ba set
  1999. * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we
  2000. * request Block Ack Request(BAR). Successful msdu is received only after Block
  2001. * Ack. To populate peer stats we need successful msdu(data frame).
  2002. * So we hold the Tx data stats on delayed_ba for stats update.
  2003. */
  2004. static void
  2005. dp_peer_copy_delay_stats(struct dp_peer *peer,
  2006. struct cdp_tx_completion_ppdu_user *ppdu,
  2007. uint32_t cur_ppdu_id)
  2008. {
  2009. struct dp_pdev *pdev;
  2010. struct dp_vdev *vdev;
  2011. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  2012. if (mon_peer->last_delayed_ba) {
  2013. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2014. "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]",
  2015. mon_peer->last_delayed_ba_ppduid, cur_ppdu_id);
  2016. vdev = peer->vdev;
  2017. if (vdev) {
  2018. pdev = vdev->pdev;
  2019. pdev->stats.cdp_delayed_ba_not_recev++;
  2020. }
  2021. }
  2022. mon_peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size;
  2023. mon_peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc;
  2024. mon_peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re;
  2025. mon_peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf;
  2026. mon_peer->delayed_ba_ppdu_stats.bw = ppdu->bw;
  2027. mon_peer->delayed_ba_ppdu_stats.nss = ppdu->nss;
  2028. mon_peer->delayed_ba_ppdu_stats.gi = ppdu->gi;
  2029. mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
  2030. mon_peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc;
  2031. mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
  2032. mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast =
  2033. ppdu->mpdu_tried_ucast;
  2034. mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast =
  2035. ppdu->mpdu_tried_mcast;
  2036. mon_peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl;
  2037. mon_peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl;
  2038. mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
  2039. mon_peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start;
  2040. mon_peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones;
  2041. mon_peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast;
  2042. mon_peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos;
  2043. mon_peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id;
  2044. mon_peer->last_delayed_ba = true;
  2045. ppdu->debug_copied = true;
  2046. }
  2047. /*
  2048. * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats.
  2049. * @peer: Datapath peer handle
  2050. * @ppdu: PPDU Descriptor
  2051. *
  2052. * Return: None
  2053. *
  2054. * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info
  2055. * from Tx BAR frame not required to populate peer stats.
  2056. * But we need successful MPDU and MSDU to update previous
  2057. * transmitted Tx data frame. Overwrite ppdu stats with the previous
  2058. * stored ppdu stats.
  2059. */
  2060. static void
  2061. dp_peer_copy_stats_to_bar(struct dp_peer *peer,
  2062. struct cdp_tx_completion_ppdu_user *ppdu)
  2063. {
  2064. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  2065. ppdu->ltf_size = mon_peer->delayed_ba_ppdu_stats.ltf_size;
  2066. ppdu->stbc = mon_peer->delayed_ba_ppdu_stats.stbc;
  2067. ppdu->he_re = mon_peer->delayed_ba_ppdu_stats.he_re;
  2068. ppdu->txbf = mon_peer->delayed_ba_ppdu_stats.txbf;
  2069. ppdu->bw = mon_peer->delayed_ba_ppdu_stats.bw;
  2070. ppdu->nss = mon_peer->delayed_ba_ppdu_stats.nss;
  2071. ppdu->gi = mon_peer->delayed_ba_ppdu_stats.gi;
  2072. ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
  2073. ppdu->ldpc = mon_peer->delayed_ba_ppdu_stats.ldpc;
  2074. ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
  2075. ppdu->mpdu_tried_ucast =
  2076. mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast;
  2077. ppdu->mpdu_tried_mcast =
  2078. mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast;
  2079. ppdu->frame_ctrl = mon_peer->delayed_ba_ppdu_stats.frame_ctrl;
  2080. ppdu->qos_ctrl = mon_peer->delayed_ba_ppdu_stats.qos_ctrl;
  2081. ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
  2082. ppdu->ru_start = mon_peer->delayed_ba_ppdu_stats.ru_start;
  2083. ppdu->ru_tones = mon_peer->delayed_ba_ppdu_stats.ru_tones;
  2084. ppdu->is_mcast = mon_peer->delayed_ba_ppdu_stats.is_mcast;
  2085. ppdu->user_pos = mon_peer->delayed_ba_ppdu_stats.user_pos;
  2086. ppdu->mu_group_id = mon_peer->delayed_ba_ppdu_stats.mu_group_id;
  2087. mon_peer->last_delayed_ba = false;
  2088. ppdu->debug_copied = true;
  2089. }
  2090. /*
  2091. * dp_tx_rate_stats_update() - Update rate per-peer statistics
  2092. * @peer: Datapath peer handle
  2093. * @ppdu: PPDU Descriptor
  2094. *
  2095. * Return: None
  2096. */
  2097. static void
  2098. dp_tx_rate_stats_update(struct dp_peer *peer,
  2099. struct cdp_tx_completion_ppdu_user *ppdu)
  2100. {
  2101. uint32_t ratekbps = 0;
  2102. uint64_t ppdu_tx_rate = 0;
  2103. uint32_t rix;
  2104. uint16_t ratecode = 0;
  2105. struct dp_mon_peer *mon_peer = NULL;
  2106. if (!peer || !ppdu)
  2107. return;
  2108. if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK)
  2109. return;
  2110. mon_peer = peer->monitor_peer;
  2111. if (!mon_peer)
  2112. return;
  2113. ratekbps = dp_getrateindex(ppdu->gi,
  2114. ppdu->mcs,
  2115. ppdu->nss,
  2116. ppdu->preamble,
  2117. ppdu->bw,
  2118. ppdu->punc_mode,
  2119. &rix,
  2120. &ratecode);
  2121. if (!ratekbps)
  2122. return;
  2123. /* Calculate goodput in non-training period
  2124. * In training period, don't do anything as
  2125. * pending pkt is send as goodput.
  2126. */
  2127. if ((!peer->bss_peer) && (!ppdu->sa_is_training)) {
  2128. ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) *
  2129. (CDP_PERCENT_MACRO - ppdu->current_rate_per));
  2130. }
  2131. ppdu->rix = rix;
  2132. ppdu->tx_ratekbps = ratekbps;
  2133. ppdu->tx_ratecode = ratecode;
  2134. DP_STATS_UPD(mon_peer, tx.tx_rate, ratekbps);
  2135. mon_peer->stats.tx.avg_tx_rate =
  2136. dp_ath_rate_lpf(mon_peer->stats.tx.avg_tx_rate, ratekbps);
  2137. ppdu_tx_rate = dp_ath_rate_out(mon_peer->stats.tx.avg_tx_rate);
  2138. DP_STATS_UPD(mon_peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
  2139. mon_peer->stats.tx.bw_info = ppdu->bw;
  2140. mon_peer->stats.tx.gi_info = ppdu->gi;
  2141. mon_peer->stats.tx.nss_info = ppdu->nss;
  2142. mon_peer->stats.tx.mcs_info = ppdu->mcs;
  2143. mon_peer->stats.tx.preamble_info = ppdu->preamble;
  2144. if (peer->vdev) {
  2145. /*
  2146. * In STA mode:
  2147. * We get ucast stats as BSS peer stats.
  2148. *
  2149. * In AP mode:
  2150. * We get mcast stats as BSS peer stats.
  2151. * We get ucast stats as assoc peer stats.
  2152. */
  2153. if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) {
  2154. peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
  2155. peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
  2156. } else {
  2157. peer->vdev->stats.tx.last_tx_rate = ratekbps;
  2158. peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
  2159. }
  2160. }
  2161. }
  2162. #if defined(FEATURE_PERPKT_INFO) && defined(WDI_EVENT_ENABLE)
  2163. void dp_send_stats_event(struct dp_pdev *pdev, struct dp_peer *peer,
  2164. uint16_t peer_id)
  2165. {
  2166. struct cdp_interface_peer_stats peer_stats_intf;
  2167. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  2168. struct dp_txrx_peer *txrx_peer = NULL;
  2169. if (!mon_peer)
  2170. return;
  2171. qdf_mem_zero(&peer_stats_intf,
  2172. sizeof(struct cdp_interface_peer_stats));
  2173. mon_peer->stats.rx.rx_snr_measured_time = qdf_system_ticks();
  2174. peer_stats_intf.rx_avg_snr = mon_peer->stats.rx.avg_snr;
  2175. txrx_peer = dp_get_txrx_peer(peer);
  2176. if (txrx_peer) {
  2177. peer_stats_intf.rx_byte_count = txrx_peer->to_stack.bytes;
  2178. peer_stats_intf.tx_byte_count =
  2179. txrx_peer->stats.per_pkt_stats.tx.tx_success.bytes;
  2180. }
  2181. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
  2182. &peer_stats_intf, peer_id,
  2183. UPDATE_PEER_STATS, pdev->pdev_id);
  2184. }
  2185. #endif
  2186. #ifdef WLAN_FEATURE_11BE
  2187. /*
  2188. * dp_get_ru_index_frm_ru_tones() - get ru index
  2189. * @ru_tones: ru tones
  2190. *
  2191. * Return: ru index
  2192. */
  2193. static inline enum cdp_ru_index dp_get_ru_index_frm_ru_tones(uint16_t ru_tones)
  2194. {
  2195. enum cdp_ru_index ru_index;
  2196. switch (ru_tones) {
  2197. case RU_26:
  2198. ru_index = RU_26_INDEX;
  2199. break;
  2200. case RU_52:
  2201. ru_index = RU_52_INDEX;
  2202. break;
  2203. case RU_52_26:
  2204. ru_index = RU_52_26_INDEX;
  2205. break;
  2206. case RU_106:
  2207. ru_index = RU_106_INDEX;
  2208. break;
  2209. case RU_106_26:
  2210. ru_index = RU_106_26_INDEX;
  2211. break;
  2212. case RU_242:
  2213. ru_index = RU_242_INDEX;
  2214. break;
  2215. case RU_484:
  2216. ru_index = RU_484_INDEX;
  2217. break;
  2218. case RU_484_242:
  2219. ru_index = RU_484_242_INDEX;
  2220. break;
  2221. case RU_996:
  2222. ru_index = RU_996_INDEX;
  2223. break;
  2224. case RU_996_484:
  2225. ru_index = RU_996_484_INDEX;
  2226. break;
  2227. case RU_996_484_242:
  2228. ru_index = RU_996_484_242_INDEX;
  2229. break;
  2230. case RU_2X996:
  2231. ru_index = RU_2X996_INDEX;
  2232. break;
  2233. case RU_2X996_484:
  2234. ru_index = RU_2X996_484_INDEX;
  2235. break;
  2236. case RU_3X996:
  2237. ru_index = RU_3X996_INDEX;
  2238. break;
  2239. case RU_3X996_484:
  2240. ru_index = RU_2X996_484_INDEX;
  2241. break;
  2242. case RU_4X996:
  2243. ru_index = RU_4X996_INDEX;
  2244. break;
  2245. default:
  2246. ru_index = RU_INDEX_MAX;
  2247. break;
  2248. }
  2249. return ru_index;
  2250. }
  2251. /*
  2252. * dp_mon_get_ru_width_from_ru_size() - get ru_width from ru_size enum
  2253. * @ru_size: HTT ru_size enum
  2254. *
  2255. * Return: ru_width of uint32_t type
  2256. */
  2257. static uint32_t dp_mon_get_ru_width_from_ru_size(uint16_t ru_size)
  2258. {
  2259. uint32_t width = 0;
  2260. switch (ru_size) {
  2261. case HTT_PPDU_STATS_RU_26:
  2262. width = RU_26;
  2263. break;
  2264. case HTT_PPDU_STATS_RU_52:
  2265. width = RU_52;
  2266. break;
  2267. case HTT_PPDU_STATS_RU_52_26:
  2268. width = RU_52_26;
  2269. break;
  2270. case HTT_PPDU_STATS_RU_106:
  2271. width = RU_106;
  2272. break;
  2273. case HTT_PPDU_STATS_RU_106_26:
  2274. width = RU_106_26;
  2275. break;
  2276. case HTT_PPDU_STATS_RU_242:
  2277. width = RU_242;
  2278. break;
  2279. case HTT_PPDU_STATS_RU_484:
  2280. width = RU_484;
  2281. break;
  2282. case HTT_PPDU_STATS_RU_484_242:
  2283. width = RU_484_242;
  2284. break;
  2285. case HTT_PPDU_STATS_RU_996:
  2286. width = RU_996;
  2287. break;
  2288. case HTT_PPDU_STATS_RU_996_484:
  2289. width = RU_996_484;
  2290. break;
  2291. case HTT_PPDU_STATS_RU_996_484_242:
  2292. width = RU_996_484_242;
  2293. break;
  2294. case HTT_PPDU_STATS_RU_996x2:
  2295. width = RU_2X996;
  2296. break;
  2297. case HTT_PPDU_STATS_RU_996x2_484:
  2298. width = RU_2X996_484;
  2299. break;
  2300. case HTT_PPDU_STATS_RU_996x3:
  2301. width = RU_3X996;
  2302. break;
  2303. case HTT_PPDU_STATS_RU_996x3_484:
  2304. width = RU_3X996_484;
  2305. break;
  2306. case HTT_PPDU_STATS_RU_996x4:
  2307. width = RU_4X996;
  2308. break;
  2309. default:
  2310. dp_mon_debug("Unsupported ru_size: %d rcvd", ru_size);
  2311. }
  2312. return width;
  2313. }
  2314. #else
  2315. static inline enum cdp_ru_index dp_get_ru_index_frm_ru_tones(uint16_t ru_tones)
  2316. {
  2317. enum cdp_ru_index ru_index;
  2318. switch (ru_tones) {
  2319. case RU_26:
  2320. ru_index = RU_26_INDEX;
  2321. break;
  2322. case RU_52:
  2323. ru_index = RU_52_INDEX;
  2324. break;
  2325. case RU_106:
  2326. ru_index = RU_106_INDEX;
  2327. break;
  2328. case RU_242:
  2329. ru_index = RU_242_INDEX;
  2330. break;
  2331. case RU_484:
  2332. ru_index = RU_484_INDEX;
  2333. break;
  2334. case RU_996:
  2335. ru_index = RU_996_INDEX;
  2336. break;
  2337. default:
  2338. ru_index = RU_INDEX_MAX;
  2339. break;
  2340. }
  2341. return ru_index;
  2342. }
  2343. static uint32_t dp_mon_get_ru_width_from_ru_size(uint16_t ru_size)
  2344. {
  2345. uint32_t width = 0;
  2346. switch (ru_size) {
  2347. case HTT_PPDU_STATS_RU_26:
  2348. width = RU_26;
  2349. break;
  2350. case HTT_PPDU_STATS_RU_52:
  2351. width = RU_52;
  2352. break;
  2353. case HTT_PPDU_STATS_RU_106:
  2354. width = RU_106;
  2355. break;
  2356. case HTT_PPDU_STATS_RU_242:
  2357. width = RU_242;
  2358. break;
  2359. case HTT_PPDU_STATS_RU_484:
  2360. width = RU_484;
  2361. break;
  2362. case HTT_PPDU_STATS_RU_996:
  2363. width = RU_996;
  2364. break;
  2365. default:
  2366. dp_mon_debug("Unsupported ru_size: %d rcvd", ru_size);
  2367. }
  2368. return width;
  2369. }
  2370. #endif
  2371. /*
  2372. * dp_tx_stats_update() - Update per-peer statistics
  2373. * @pdev: Datapath pdev handle
  2374. * @peer: Datapath peer handle
  2375. * @ppdu: PPDU Descriptor
  2376. * @ack_rssi: RSSI of last ack received
  2377. *
  2378. * Return: None
  2379. */
  2380. static void
  2381. dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
  2382. struct cdp_tx_completion_ppdu_user *ppdu,
  2383. uint32_t ack_rssi)
  2384. {
  2385. uint8_t preamble, mcs;
  2386. uint16_t num_msdu;
  2387. uint16_t num_mpdu;
  2388. uint16_t mpdu_tried;
  2389. uint16_t mpdu_failed;
  2390. struct dp_mon_ops *mon_ops;
  2391. enum cdp_ru_index ru_index;
  2392. struct dp_mon_peer *mon_peer = NULL;
  2393. uint32_t ratekbps = 0;
  2394. uint64_t tx_byte_count;
  2395. preamble = ppdu->preamble;
  2396. mcs = ppdu->mcs;
  2397. num_msdu = ppdu->num_msdu;
  2398. num_mpdu = ppdu->mpdu_success;
  2399. mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
  2400. mpdu_failed = mpdu_tried - num_mpdu;
  2401. tx_byte_count = ppdu->success_bytes;
  2402. /* If the peer statistics are already processed as part of
  2403. * per-MSDU completion handler, do not process these again in per-PPDU
  2404. * indications
  2405. */
  2406. if (pdev->soc->process_tx_status)
  2407. return;
  2408. mon_peer = peer->monitor_peer;
  2409. if (!mon_peer)
  2410. return;
  2411. if (!ppdu->is_mcast) {
  2412. DP_STATS_INC(mon_peer, tx.tx_ucast_total.num, num_msdu);
  2413. DP_STATS_INC(mon_peer, tx.tx_ucast_total.bytes,
  2414. tx_byte_count);
  2415. }
  2416. if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
  2417. /*
  2418. * All failed mpdu will be retried, so incrementing
  2419. * retries mpdu based on mpdu failed. Even for
  2420. * ack failure i.e for long retries we get
  2421. * mpdu failed equal mpdu tried.
  2422. */
  2423. DP_STATS_INC(mon_peer, tx.retries, mpdu_failed);
  2424. return;
  2425. }
  2426. if (ppdu->is_ppdu_cookie_valid)
  2427. DP_STATS_INC(mon_peer, tx.num_ppdu_cookie_valid, 1);
  2428. if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
  2429. ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
  2430. if (qdf_unlikely(ppdu->mu_group_id &&
  2431. !(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
  2432. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2433. "mu_group_id out of bound!!\n");
  2434. else
  2435. DP_STATS_UPD(mon_peer, tx.mu_group_id[ppdu->mu_group_id],
  2436. (ppdu->user_pos + 1));
  2437. }
  2438. if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
  2439. ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
  2440. DP_STATS_UPD(mon_peer, tx.ru_tones, ppdu->ru_tones);
  2441. DP_STATS_UPD(mon_peer, tx.ru_start, ppdu->ru_start);
  2442. ru_index = dp_get_ru_index_frm_ru_tones(ppdu->ru_tones);
  2443. if (ru_index != RU_INDEX_MAX) {
  2444. DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].num_msdu,
  2445. num_msdu);
  2446. DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].num_mpdu,
  2447. num_mpdu);
  2448. DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].mpdu_tried,
  2449. mpdu_tried);
  2450. }
  2451. }
  2452. /*
  2453. * All failed mpdu will be retried, so incrementing
  2454. * retries mpdu based on mpdu failed. Even for
  2455. * ack failure i.e for long retries we get
  2456. * mpdu failed equal mpdu tried.
  2457. */
  2458. DP_STATS_INC(mon_peer, tx.retries, mpdu_failed);
  2459. DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].num_msdu,
  2460. num_msdu);
  2461. DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu,
  2462. num_mpdu);
  2463. DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried,
  2464. mpdu_tried);
  2465. DP_STATS_INC(mon_peer, tx.sgi_count[ppdu->gi], num_msdu);
  2466. DP_STATS_INC(mon_peer, tx.bw[ppdu->bw], num_msdu);
  2467. DP_STATS_INC(mon_peer, tx.nss[ppdu->nss], num_msdu);
  2468. if (ppdu->tid < CDP_DATA_TID_MAX)
  2469. DP_STATS_INC(mon_peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
  2470. num_msdu);
  2471. DP_STATS_INCC(mon_peer, tx.stbc, num_msdu, ppdu->stbc);
  2472. DP_STATS_INCC(mon_peer, tx.ldpc, num_msdu, ppdu->ldpc);
  2473. if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
  2474. DP_STATS_UPD(mon_peer, tx.last_ack_rssi, ack_rssi);
  2475. if (!ppdu->is_mcast) {
  2476. DP_STATS_INC(mon_peer, tx.tx_ucast_success.num, num_msdu);
  2477. DP_STATS_INC(mon_peer, tx.tx_ucast_success.bytes,
  2478. tx_byte_count);
  2479. }
  2480. DP_STATS_INCC(mon_peer,
  2481. tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  2482. ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
  2483. DP_STATS_INCC(mon_peer,
  2484. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  2485. ((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
  2486. DP_STATS_INCC(mon_peer,
  2487. tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  2488. ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
  2489. DP_STATS_INCC(mon_peer,
  2490. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  2491. ((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
  2492. DP_STATS_INCC(mon_peer,
  2493. tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  2494. ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
  2495. DP_STATS_INCC(mon_peer,
  2496. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  2497. ((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
  2498. DP_STATS_INCC(mon_peer,
  2499. tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  2500. ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
  2501. DP_STATS_INCC(mon_peer,
  2502. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  2503. ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
  2504. DP_STATS_INCC(mon_peer,
  2505. tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  2506. ((mcs >= MAX_MCS_11AX) && (preamble == DOT11_AX)));
  2507. DP_STATS_INCC(mon_peer,
  2508. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  2509. ((mcs < MAX_MCS_11AX) && (preamble == DOT11_AX)));
  2510. DP_STATS_INCC(mon_peer, tx.ampdu_cnt, num_mpdu, ppdu->is_ampdu);
  2511. DP_STATS_INCC(mon_peer, tx.non_ampdu_cnt, num_mpdu, !(ppdu->is_ampdu));
  2512. DP_STATS_INCC(mon_peer, tx.pream_punct_cnt, 1, ppdu->pream_punct);
  2513. DP_STATS_INC(mon_peer, tx.tx_ppdus, 1);
  2514. DP_STATS_INC(mon_peer, tx.tx_mpdus_success, num_mpdu);
  2515. DP_STATS_INC(mon_peer, tx.tx_mpdus_tried, mpdu_tried);
  2516. mon_ops = dp_mon_ops_get(pdev->soc);
  2517. if (mon_ops && mon_ops->mon_tx_stats_update)
  2518. mon_ops->mon_tx_stats_update(mon_peer, ppdu);
  2519. dp_tx_rate_stats_update(peer, ppdu);
  2520. dp_peer_stats_notify(pdev, peer);
  2521. ratekbps = mon_peer->stats.tx.tx_rate;
  2522. DP_STATS_UPD(mon_peer, tx.last_tx_rate, ratekbps);
  2523. dp_send_stats_event(pdev, peer, ppdu->peer_id);
  2524. }
  2525. /*
  2526. * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
  2527. * if a new peer id arrives in a PPDU
  2528. * pdev: DP pdev handle
  2529. * @peer_id : peer unique identifier
  2530. * @ppdu_info: per ppdu tlv structure
  2531. *
  2532. * return:user index to be populated
  2533. */
  2534. static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
  2535. uint16_t peer_id,
  2536. struct ppdu_info *ppdu_info)
  2537. {
  2538. uint8_t user_index = 0;
  2539. struct cdp_tx_completion_ppdu *ppdu_desc;
  2540. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2541. ppdu_desc =
  2542. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2543. while ((user_index + 1) <= ppdu_info->last_user) {
  2544. ppdu_user_desc = &ppdu_desc->user[user_index];
  2545. if (ppdu_user_desc->peer_id != peer_id) {
  2546. user_index++;
  2547. continue;
  2548. } else {
  2549. /* Max users possible is 8 so user array index should
  2550. * not exceed 7
  2551. */
  2552. qdf_assert_always(user_index <= (ppdu_desc->max_users - 1));
  2553. return user_index;
  2554. }
  2555. }
  2556. ppdu_info->last_user++;
  2557. /* Max users possible is 8 so last user should not exceed 8 */
  2558. qdf_assert_always(ppdu_info->last_user <= ppdu_desc->max_users);
  2559. return ppdu_info->last_user - 1;
  2560. }
  2561. /*
  2562. * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
  2563. * pdev: DP pdev handle
  2564. * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
  2565. * @ppdu_info: per ppdu tlv structure
  2566. *
  2567. * return:void
  2568. */
  2569. static void
  2570. dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
  2571. uint32_t *tag_buf,
  2572. struct ppdu_info *ppdu_info)
  2573. {
  2574. uint16_t frame_type;
  2575. uint16_t frame_ctrl;
  2576. uint16_t freq;
  2577. struct dp_soc *soc = NULL;
  2578. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  2579. uint64_t ppdu_start_timestamp;
  2580. uint32_t *start_tag_buf;
  2581. start_tag_buf = tag_buf;
  2582. ppdu_desc =
  2583. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2584. ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
  2585. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID);
  2586. ppdu_info->sched_cmdid =
  2587. HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
  2588. ppdu_desc->num_users =
  2589. HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
  2590. qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
  2591. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE);
  2592. frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
  2593. ppdu_desc->htt_frame_type = frame_type;
  2594. frame_ctrl = ppdu_desc->frame_ctrl;
  2595. ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id;
  2596. switch (frame_type) {
  2597. case HTT_STATS_FTYPE_TIDQ_DATA_SU:
  2598. case HTT_STATS_FTYPE_TIDQ_DATA_MU:
  2599. case HTT_STATS_FTYPE_SGEN_QOS_NULL:
  2600. /*
  2601. * for management packet, frame type come as DATA_SU
  2602. * need to check frame_ctrl before setting frame_type
  2603. */
  2604. if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL)
  2605. ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
  2606. else
  2607. ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
  2608. break;
  2609. case HTT_STATS_FTYPE_SGEN_MU_BAR:
  2610. case HTT_STATS_FTYPE_SGEN_BAR:
  2611. case HTT_STATS_FTYPE_SGEN_BE_MU_BAR:
  2612. ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
  2613. break;
  2614. default:
  2615. ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
  2616. break;
  2617. }
  2618. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US);
  2619. ppdu_desc->tx_duration = *tag_buf;
  2620. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
  2621. ppdu_desc->ppdu_start_timestamp = *tag_buf;
  2622. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE);
  2623. freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
  2624. if (freq != ppdu_desc->channel) {
  2625. soc = pdev->soc;
  2626. ppdu_desc->channel = freq;
  2627. pdev->operating_channel.freq = freq;
  2628. if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
  2629. pdev->operating_channel.num =
  2630. soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc,
  2631. pdev->pdev_id,
  2632. freq);
  2633. if (soc && soc->cdp_soc.ol_ops->freq_to_band)
  2634. pdev->operating_channel.band =
  2635. soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc,
  2636. pdev->pdev_id,
  2637. freq);
  2638. }
  2639. ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
  2640. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM);
  2641. ppdu_desc->phy_ppdu_tx_time_us =
  2642. HTT_PPDU_STATS_COMMON_TLV_PHY_PPDU_TX_TIME_US_GET(*tag_buf);
  2643. ppdu_desc->beam_change =
  2644. HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf);
  2645. ppdu_desc->doppler =
  2646. HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf);
  2647. ppdu_desc->spatial_reuse =
  2648. HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf);
  2649. dp_tx_capture_htt_frame_counter(pdev, frame_type);
  2650. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US);
  2651. ppdu_start_timestamp = *tag_buf;
  2652. ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp <<
  2653. HTT_SHIFT_UPPER_TIMESTAMP) &
  2654. HTT_MASK_UPPER_TIMESTAMP);
  2655. ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
  2656. ppdu_desc->tx_duration;
  2657. /* Ack time stamp is same as end time stamp*/
  2658. ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
  2659. ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
  2660. ppdu_desc->tx_duration;
  2661. ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp;
  2662. ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp;
  2663. ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration;
  2664. /* Ack time stamp is same as end time stamp*/
  2665. ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
  2666. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(BSSCOLOR_OBSS_PSR);
  2667. ppdu_desc->bss_color =
  2668. HTT_PPDU_STATS_COMMON_TLV_BSS_COLOR_ID_GET(*tag_buf);
  2669. }
  2670. /*
  2671. * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
  2672. * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
  2673. * @ppdu_info: per ppdu tlv structure
  2674. *
  2675. * return:void
  2676. */
  2677. static void dp_process_ppdu_stats_user_common_tlv(
  2678. struct dp_pdev *pdev, uint32_t *tag_buf,
  2679. struct ppdu_info *ppdu_info)
  2680. {
  2681. uint16_t peer_id;
  2682. struct cdp_tx_completion_ppdu *ppdu_desc;
  2683. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2684. uint8_t curr_user_index = 0;
  2685. struct dp_peer *peer;
  2686. struct dp_vdev *vdev;
  2687. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2688. ppdu_desc =
  2689. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2690. tag_buf++;
  2691. peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
  2692. curr_user_index =
  2693. dp_get_ppdu_info_user_index(pdev,
  2694. peer_id, ppdu_info);
  2695. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2696. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2697. ppdu_desc->vdev_id =
  2698. HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
  2699. ppdu_user_desc->peer_id = peer_id;
  2700. tag_buf++;
  2701. if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
  2702. ppdu_user_desc->delayed_ba = 1;
  2703. ppdu_desc->delayed_ba = 1;
  2704. }
  2705. if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
  2706. ppdu_user_desc->is_mcast = true;
  2707. ppdu_user_desc->mpdu_tried_mcast =
  2708. HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
  2709. ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
  2710. } else {
  2711. ppdu_user_desc->mpdu_tried_ucast =
  2712. HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
  2713. }
  2714. ppdu_user_desc->is_seq_num_valid =
  2715. HTT_PPDU_STATS_USER_COMMON_TLV_IS_SQNUM_VALID_IN_BUFFER_GET(*tag_buf);
  2716. tag_buf++;
  2717. ppdu_user_desc->qos_ctrl =
  2718. HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
  2719. ppdu_user_desc->frame_ctrl =
  2720. HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
  2721. ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
  2722. if (ppdu_user_desc->delayed_ba)
  2723. ppdu_user_desc->mpdu_success = 0;
  2724. tag_buf += 3;
  2725. if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
  2726. ppdu_user_desc->ppdu_cookie =
  2727. HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
  2728. ppdu_user_desc->is_ppdu_cookie_valid = 1;
  2729. }
  2730. /* returning earlier causes other feilds unpopulated */
  2731. if (peer_id == DP_SCAN_PEER_ID) {
  2732. vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
  2733. DP_MOD_ID_TX_PPDU_STATS);
  2734. if (!vdev)
  2735. return;
  2736. qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
  2737. QDF_MAC_ADDR_SIZE);
  2738. dp_vdev_unref_delete(pdev->soc, vdev, DP_MOD_ID_TX_PPDU_STATS);
  2739. } else {
  2740. peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
  2741. DP_MOD_ID_TX_PPDU_STATS);
  2742. if (!peer) {
  2743. /*
  2744. * fw sends peer_id which is about to removed but
  2745. * it was already removed in host.
  2746. * eg: for disassoc, fw send ppdu stats
  2747. * with peer id equal to previously associated
  2748. * peer's peer_id but it was removed
  2749. */
  2750. vdev = dp_vdev_get_ref_by_id(pdev->soc,
  2751. ppdu_desc->vdev_id,
  2752. DP_MOD_ID_TX_PPDU_STATS);
  2753. if (!vdev)
  2754. return;
  2755. qdf_mem_copy(ppdu_user_desc->mac_addr,
  2756. vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE);
  2757. dp_vdev_unref_delete(pdev->soc, vdev,
  2758. DP_MOD_ID_TX_PPDU_STATS);
  2759. return;
  2760. }
  2761. qdf_mem_copy(ppdu_user_desc->mac_addr,
  2762. peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
  2763. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  2764. }
  2765. }
  2766. /**
  2767. * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
  2768. * @pdev: DP pdev handle
  2769. * @tag_buf: T2H message buffer carrying the user rate TLV
  2770. * @ppdu_info: per ppdu tlv structure
  2771. *
  2772. * return:void
  2773. */
  2774. static void
  2775. dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
  2776. uint32_t *tag_buf,
  2777. struct ppdu_info *ppdu_info)
  2778. {
  2779. uint16_t peer_id;
  2780. struct cdp_tx_completion_ppdu *ppdu_desc;
  2781. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2782. uint8_t curr_user_index = 0;
  2783. struct dp_vdev *vdev;
  2784. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2785. uint8_t bw, ru_format;
  2786. uint16_t ru_size;
  2787. ppdu_desc =
  2788. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2789. tag_buf++;
  2790. peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
  2791. curr_user_index =
  2792. dp_get_ppdu_info_user_index(pdev,
  2793. peer_id, ppdu_info);
  2794. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2795. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2796. if (peer_id == DP_SCAN_PEER_ID) {
  2797. vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
  2798. DP_MOD_ID_TX_PPDU_STATS);
  2799. if (!vdev)
  2800. return;
  2801. dp_vdev_unref_delete(pdev->soc, vdev,
  2802. DP_MOD_ID_TX_PPDU_STATS);
  2803. }
  2804. ppdu_user_desc->peer_id = peer_id;
  2805. ppdu_user_desc->tid =
  2806. HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
  2807. tag_buf += 1;
  2808. ppdu_user_desc->user_pos =
  2809. HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
  2810. ppdu_user_desc->mu_group_id =
  2811. HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
  2812. ru_format = HTT_PPDU_STATS_USER_RATE_TLV_RU_FORMAT_GET(*tag_buf);
  2813. tag_buf += 1;
  2814. if (!ru_format) {
  2815. /* ru_format = 0: ru_end, ru_start */
  2816. ppdu_user_desc->ru_start =
  2817. HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
  2818. ppdu_user_desc->ru_tones =
  2819. (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
  2820. HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
  2821. } else if (ru_format == 1) {
  2822. /* ru_format = 1: ru_index, ru_size */
  2823. ru_size = HTT_PPDU_STATS_USER_RATE_TLV_RU_SIZE_GET(*tag_buf);
  2824. ppdu_user_desc->ru_tones =
  2825. dp_mon_get_ru_width_from_ru_size(ru_size);
  2826. } else {
  2827. dp_mon_debug("Unsupported ru_format: %d rcvd", ru_format);
  2828. }
  2829. ppdu_desc->usr_ru_tones_sum += ppdu_user_desc->ru_tones;
  2830. tag_buf += 2;
  2831. ppdu_user_desc->ppdu_type =
  2832. HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
  2833. tag_buf++;
  2834. ppdu_user_desc->tx_rate = *tag_buf;
  2835. ppdu_user_desc->ltf_size =
  2836. HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
  2837. ppdu_user_desc->stbc =
  2838. HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
  2839. ppdu_user_desc->he_re =
  2840. HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
  2841. ppdu_user_desc->txbf =
  2842. HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
  2843. bw = HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf);
  2844. /* Align bw value as per host data structures */
  2845. if (bw == HTT_PPDU_STATS_BANDWIDTH_320MHZ)
  2846. ppdu_user_desc->bw = bw - 3;
  2847. else
  2848. ppdu_user_desc->bw = bw - 2;
  2849. ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
  2850. ppdu_desc->usr_nss_sum += ppdu_user_desc->nss;
  2851. ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
  2852. ppdu_user_desc->preamble =
  2853. HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
  2854. ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
  2855. ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
  2856. ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
  2857. tag_buf += 2;
  2858. ppdu_user_desc->punc_pattern_bitmap =
  2859. HTT_PPDU_STATS_USER_RATE_TLV_PUNC_PATTERN_BITMAP_GET(*tag_buf);
  2860. }
  2861. /*
  2862. * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
  2863. * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
  2864. * pdev: DP PDEV handle
  2865. * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
  2866. * @ppdu_info: per ppdu tlv structure
  2867. *
  2868. * return:void
  2869. */
  2870. static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
  2871. struct dp_pdev *pdev, uint32_t *tag_buf,
  2872. struct ppdu_info *ppdu_info)
  2873. {
  2874. htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
  2875. (htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
  2876. struct cdp_tx_completion_ppdu *ppdu_desc;
  2877. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2878. uint8_t curr_user_index = 0;
  2879. uint16_t peer_id;
  2880. uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
  2881. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2882. ppdu_desc =
  2883. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2884. tag_buf++;
  2885. peer_id =
  2886. HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
  2887. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  2888. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2889. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2890. ppdu_user_desc->peer_id = peer_id;
  2891. ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
  2892. qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
  2893. sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
  2894. dp_process_ppdu_stats_update_failed_bitmap(pdev,
  2895. (void *)ppdu_user_desc,
  2896. ppdu_info->ppdu_id,
  2897. size);
  2898. }
  2899. /*
  2900. * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
  2901. * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
  2902. * soc: DP SOC handle
  2903. * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
  2904. * @ppdu_info: per ppdu tlv structure
  2905. *
  2906. * return:void
  2907. */
  2908. static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
  2909. struct dp_pdev *pdev, uint32_t *tag_buf,
  2910. struct ppdu_info *ppdu_info)
  2911. {
  2912. htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
  2913. (htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
  2914. struct cdp_tx_completion_ppdu *ppdu_desc;
  2915. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2916. uint8_t curr_user_index = 0;
  2917. uint16_t peer_id;
  2918. uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
  2919. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2920. ppdu_desc =
  2921. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2922. tag_buf++;
  2923. peer_id =
  2924. HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
  2925. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  2926. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2927. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2928. ppdu_user_desc->peer_id = peer_id;
  2929. ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
  2930. qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
  2931. sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
  2932. dp_process_ppdu_stats_update_failed_bitmap(pdev,
  2933. (void *)ppdu_user_desc,
  2934. ppdu_info->ppdu_id,
  2935. size);
  2936. }
  2937. /*
  2938. * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
  2939. * htt_ppdu_stats_user_cmpltn_common_tlv
  2940. * soc: DP SOC handle
  2941. * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
  2942. * @ppdu_info: per ppdu tlv structure
  2943. *
  2944. * return:void
  2945. */
  2946. static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
  2947. struct dp_pdev *pdev, uint32_t *tag_buf,
  2948. struct ppdu_info *ppdu_info)
  2949. {
  2950. uint16_t peer_id;
  2951. struct cdp_tx_completion_ppdu *ppdu_desc;
  2952. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2953. uint8_t curr_user_index = 0;
  2954. uint8_t bw_iter;
  2955. htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
  2956. (htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
  2957. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2958. ppdu_desc =
  2959. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2960. tag_buf++;
  2961. peer_id =
  2962. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
  2963. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  2964. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2965. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2966. ppdu_user_desc->peer_id = peer_id;
  2967. ppdu_user_desc->completion_status =
  2968. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
  2969. *tag_buf);
  2970. ppdu_user_desc->tid =
  2971. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
  2972. tag_buf++;
  2973. if (qdf_likely(ppdu_user_desc->completion_status ==
  2974. HTT_PPDU_STATS_USER_STATUS_OK)) {
  2975. ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
  2976. ppdu_user_desc->usr_ack_rssi = dp_stats_buf->ack_rssi;
  2977. ppdu_user_desc->ack_rssi_valid = 1;
  2978. } else {
  2979. ppdu_user_desc->ack_rssi_valid = 0;
  2980. }
  2981. tag_buf++;
  2982. ppdu_user_desc->mpdu_success =
  2983. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
  2984. ppdu_user_desc->mpdu_failed =
  2985. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) -
  2986. ppdu_user_desc->mpdu_success;
  2987. tag_buf++;
  2988. ppdu_user_desc->long_retries =
  2989. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
  2990. ppdu_user_desc->short_retries =
  2991. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
  2992. ppdu_user_desc->retry_mpdus =
  2993. ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
  2994. ppdu_user_desc->is_ampdu =
  2995. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
  2996. ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
  2997. ppdu_desc->resp_type =
  2998. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf);
  2999. ppdu_desc->mprot_type =
  3000. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf);
  3001. ppdu_desc->rts_success =
  3002. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf);
  3003. ppdu_desc->rts_failure =
  3004. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf);
  3005. ppdu_user_desc->pream_punct =
  3006. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PREAM_PUNC_TX_GET(*tag_buf);
  3007. ppdu_info->compltn_common_tlv++;
  3008. /*
  3009. * MU BAR may send request to n users but we may received ack only from
  3010. * m users. To have count of number of users respond back, we have a
  3011. * separate counter bar_num_users per PPDU that get increment for every
  3012. * htt_ppdu_stats_user_cmpltn_common_tlv
  3013. */
  3014. ppdu_desc->bar_num_users++;
  3015. tag_buf++;
  3016. for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) {
  3017. ppdu_user_desc->rssi_chain[bw_iter] =
  3018. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf);
  3019. tag_buf++;
  3020. }
  3021. ppdu_user_desc->sa_tx_antenna =
  3022. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf);
  3023. tag_buf++;
  3024. ppdu_user_desc->sa_is_training =
  3025. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf);
  3026. if (ppdu_user_desc->sa_is_training) {
  3027. ppdu_user_desc->sa_goodput =
  3028. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf);
  3029. }
  3030. tag_buf++;
  3031. for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) {
  3032. ppdu_user_desc->sa_max_rates[bw_iter] =
  3033. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]);
  3034. }
  3035. tag_buf += CDP_NUM_SA_BW;
  3036. ppdu_user_desc->current_rate_per =
  3037. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf);
  3038. }
  3039. /*
  3040. * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
  3041. * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
  3042. * pdev: DP PDEV handle
  3043. * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
  3044. * @ppdu_info: per ppdu tlv structure
  3045. *
  3046. * return:void
  3047. */
  3048. static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
  3049. struct dp_pdev *pdev, uint32_t *tag_buf,
  3050. struct ppdu_info *ppdu_info)
  3051. {
  3052. htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
  3053. (htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
  3054. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  3055. struct cdp_tx_completion_ppdu *ppdu_desc;
  3056. uint8_t curr_user_index = 0;
  3057. uint16_t peer_id;
  3058. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  3059. ppdu_desc =
  3060. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  3061. tag_buf++;
  3062. peer_id =
  3063. HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
  3064. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  3065. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  3066. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  3067. ppdu_user_desc->peer_id = peer_id;
  3068. ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
  3069. qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
  3070. sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
  3071. ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32;
  3072. }
  3073. /*
  3074. * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
  3075. * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
  3076. * pdev: DP PDEV handle
  3077. * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
  3078. * @ppdu_info: per ppdu tlv structure
  3079. *
  3080. * return:void
  3081. */
  3082. static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
  3083. struct dp_pdev *pdev, uint32_t *tag_buf,
  3084. struct ppdu_info *ppdu_info)
  3085. {
  3086. htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
  3087. (htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
  3088. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  3089. struct cdp_tx_completion_ppdu *ppdu_desc;
  3090. uint8_t curr_user_index = 0;
  3091. uint16_t peer_id;
  3092. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  3093. ppdu_desc =
  3094. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  3095. tag_buf++;
  3096. peer_id =
  3097. HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
  3098. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  3099. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  3100. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  3101. ppdu_user_desc->peer_id = peer_id;
  3102. ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
  3103. qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
  3104. sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
  3105. ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32;
  3106. }
  3107. /*
  3108. * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
  3109. * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
  3110. * pdev: DP PDE handle
  3111. * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
  3112. * @ppdu_info: per ppdu tlv structure
  3113. *
  3114. * return:void
  3115. */
  3116. static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
  3117. struct dp_pdev *pdev, uint32_t *tag_buf,
  3118. struct ppdu_info *ppdu_info)
  3119. {
  3120. uint16_t peer_id;
  3121. struct cdp_tx_completion_ppdu *ppdu_desc;
  3122. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  3123. uint8_t curr_user_index = 0;
  3124. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  3125. ppdu_desc =
  3126. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  3127. tag_buf += 2;
  3128. peer_id =
  3129. HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
  3130. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  3131. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  3132. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  3133. if (!ppdu_user_desc->ack_ba_tlv) {
  3134. ppdu_user_desc->ack_ba_tlv = 1;
  3135. } else {
  3136. pdev->stats.ack_ba_comes_twice++;
  3137. return;
  3138. }
  3139. ppdu_user_desc->peer_id = peer_id;
  3140. tag_buf++;
  3141. /* not to update ppdu_desc->tid from this TLV */
  3142. ppdu_user_desc->num_mpdu =
  3143. HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
  3144. ppdu_user_desc->num_msdu =
  3145. HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
  3146. ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
  3147. tag_buf++;
  3148. ppdu_user_desc->start_seq =
  3149. HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(
  3150. *tag_buf);
  3151. tag_buf++;
  3152. ppdu_user_desc->success_bytes = *tag_buf;
  3153. /* increase ack ba tlv counter on successful mpdu */
  3154. if (ppdu_user_desc->num_mpdu)
  3155. ppdu_info->ack_ba_tlv++;
  3156. if (ppdu_user_desc->ba_size == 0) {
  3157. ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq;
  3158. ppdu_user_desc->ba_bitmap[0] = 1;
  3159. ppdu_user_desc->ba_size = 1;
  3160. }
  3161. }
  3162. /*
  3163. * dp_process_ppdu_stats_user_common_array_tlv: Process
  3164. * htt_ppdu_stats_user_common_array_tlv
  3165. * pdev: DP PDEV handle
  3166. * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
  3167. * @ppdu_info: per ppdu tlv structure
  3168. *
  3169. * return:void
  3170. */
  3171. static void dp_process_ppdu_stats_user_common_array_tlv(
  3172. struct dp_pdev *pdev, uint32_t *tag_buf,
  3173. struct ppdu_info *ppdu_info)
  3174. {
  3175. uint32_t peer_id;
  3176. struct cdp_tx_completion_ppdu *ppdu_desc;
  3177. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  3178. uint8_t curr_user_index = 0;
  3179. struct htt_tx_ppdu_stats_info *dp_stats_buf;
  3180. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  3181. ppdu_desc =
  3182. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  3183. tag_buf++;
  3184. dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
  3185. tag_buf += 3;
  3186. peer_id =
  3187. HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
  3188. if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
  3189. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  3190. "Invalid peer");
  3191. return;
  3192. }
  3193. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  3194. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  3195. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  3196. ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
  3197. ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
  3198. tag_buf++;
  3199. ppdu_user_desc->success_msdus =
  3200. HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
  3201. ppdu_user_desc->retry_msdus =
  3202. HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
  3203. tag_buf++;
  3204. ppdu_user_desc->failed_msdus =
  3205. HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
  3206. }
  3207. /*
  3208. * dp_process_ppdu_stats_flush_tlv: Process
  3209. * htt_ppdu_stats_flush_tlv
  3210. * @pdev: DP PDEV handle
  3211. * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
  3212. * @ppdu_info: per ppdu tlv structure
  3213. *
  3214. * return:void
  3215. */
  3216. static void
  3217. dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
  3218. uint32_t *tag_buf,
  3219. struct ppdu_info *ppdu_info)
  3220. {
  3221. struct cdp_tx_completion_ppdu *ppdu_desc;
  3222. uint32_t peer_id;
  3223. uint8_t tid;
  3224. struct dp_peer *peer;
  3225. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3226. struct dp_mon_peer *mon_peer = NULL;
  3227. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  3228. qdf_nbuf_data(ppdu_info->nbuf);
  3229. ppdu_desc->is_flush = 1;
  3230. tag_buf++;
  3231. ppdu_desc->drop_reason = *tag_buf;
  3232. tag_buf++;
  3233. ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
  3234. ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf);
  3235. ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf);
  3236. tag_buf++;
  3237. peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
  3238. tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
  3239. ppdu_desc->num_users = 1;
  3240. ppdu_desc->user[0].peer_id = peer_id;
  3241. ppdu_desc->user[0].tid = tid;
  3242. ppdu_desc->queue_type =
  3243. HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf);
  3244. peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
  3245. DP_MOD_ID_TX_PPDU_STATS);
  3246. if (!peer)
  3247. goto add_ppdu_to_sched_list;
  3248. if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
  3249. mon_peer = peer->monitor_peer;
  3250. DP_STATS_INC(mon_peer,
  3251. tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
  3252. ppdu_desc->num_msdu);
  3253. }
  3254. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  3255. add_ppdu_to_sched_list:
  3256. ppdu_info->done = 1;
  3257. TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
  3258. mon_pdev->list_depth--;
  3259. TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info,
  3260. ppdu_info_list_elem);
  3261. mon_pdev->sched_comp_list_depth++;
  3262. }
  3263. /**
  3264. * dp_process_ppdu_stats_sch_cmd_status_tlv: Process schedule command status tlv
  3265. * Here we are not going to process the buffer.
  3266. * @pdev: DP PDEV handle
  3267. * @ppdu_info: per ppdu tlv structure
  3268. *
  3269. * return:void
  3270. */
  3271. static void
  3272. dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev,
  3273. struct ppdu_info *ppdu_info)
  3274. {
  3275. struct cdp_tx_completion_ppdu *ppdu_desc;
  3276. struct dp_peer *peer;
  3277. uint8_t num_users;
  3278. uint8_t i;
  3279. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3280. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  3281. qdf_nbuf_data(ppdu_info->nbuf);
  3282. num_users = ppdu_desc->bar_num_users;
  3283. for (i = 0; i < num_users; i++) {
  3284. if (ppdu_desc->user[i].user_pos == 0) {
  3285. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
  3286. /* update phy mode for bar frame */
  3287. ppdu_desc->phy_mode =
  3288. ppdu_desc->user[i].preamble;
  3289. ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs;
  3290. break;
  3291. }
  3292. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) {
  3293. ppdu_desc->frame_ctrl =
  3294. ppdu_desc->user[i].frame_ctrl;
  3295. break;
  3296. }
  3297. }
  3298. }
  3299. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
  3300. ppdu_desc->delayed_ba) {
  3301. qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
  3302. for (i = 0; i < ppdu_desc->num_users; i++) {
  3303. struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
  3304. uint64_t start_tsf;
  3305. uint64_t end_tsf;
  3306. uint32_t ppdu_id;
  3307. struct dp_mon_peer *mon_peer;
  3308. ppdu_id = ppdu_desc->ppdu_id;
  3309. peer = dp_peer_get_ref_by_id
  3310. (pdev->soc, ppdu_desc->user[i].peer_id,
  3311. DP_MOD_ID_TX_PPDU_STATS);
  3312. /**
  3313. * This check is to make sure peer is not deleted
  3314. * after processing the TLVs.
  3315. */
  3316. if (!peer)
  3317. continue;
  3318. mon_peer = peer->monitor_peer;
  3319. delay_ppdu = &mon_peer->delayed_ba_ppdu_stats;
  3320. start_tsf = ppdu_desc->ppdu_start_timestamp;
  3321. end_tsf = ppdu_desc->ppdu_end_timestamp;
  3322. /**
  3323. * save delayed ba user info
  3324. */
  3325. if (ppdu_desc->user[i].delayed_ba) {
  3326. dp_peer_copy_delay_stats(peer,
  3327. &ppdu_desc->user[i],
  3328. ppdu_id);
  3329. mon_peer->last_delayed_ba_ppduid = ppdu_id;
  3330. delay_ppdu->ppdu_start_timestamp = start_tsf;
  3331. delay_ppdu->ppdu_end_timestamp = end_tsf;
  3332. }
  3333. ppdu_desc->user[i].peer_last_delayed_ba =
  3334. mon_peer->last_delayed_ba;
  3335. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  3336. if (ppdu_desc->user[i].delayed_ba &&
  3337. !ppdu_desc->user[i].debug_copied) {
  3338. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3339. QDF_TRACE_LEVEL_INFO_MED,
  3340. "%s: %d ppdu_id[%d] bar_ppdu_id[%d] num_users[%d] usr[%d] htt_frame_type[%d]\n",
  3341. __func__, __LINE__,
  3342. ppdu_desc->ppdu_id,
  3343. ppdu_desc->bar_ppdu_id,
  3344. ppdu_desc->num_users,
  3345. i,
  3346. ppdu_desc->htt_frame_type);
  3347. }
  3348. }
  3349. }
  3350. /*
  3351. * when frame type is BAR and STATS_COMMON_TLV is set
  3352. * copy the store peer delayed info to BAR status
  3353. */
  3354. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
  3355. for (i = 0; i < ppdu_desc->bar_num_users; i++) {
  3356. struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
  3357. uint64_t start_tsf;
  3358. uint64_t end_tsf;
  3359. struct dp_mon_peer *mon_peer;
  3360. peer = dp_peer_get_ref_by_id
  3361. (pdev->soc,
  3362. ppdu_desc->user[i].peer_id,
  3363. DP_MOD_ID_TX_PPDU_STATS);
  3364. /**
  3365. * This check is to make sure peer is not deleted
  3366. * after processing the TLVs.
  3367. */
  3368. if (!peer)
  3369. continue;
  3370. mon_peer = peer->monitor_peer;
  3371. if (ppdu_desc->user[i].completion_status !=
  3372. HTT_PPDU_STATS_USER_STATUS_OK) {
  3373. dp_peer_unref_delete(peer,
  3374. DP_MOD_ID_TX_PPDU_STATS);
  3375. continue;
  3376. }
  3377. delay_ppdu = &mon_peer->delayed_ba_ppdu_stats;
  3378. start_tsf = delay_ppdu->ppdu_start_timestamp;
  3379. end_tsf = delay_ppdu->ppdu_end_timestamp;
  3380. if (mon_peer->last_delayed_ba) {
  3381. dp_peer_copy_stats_to_bar(peer,
  3382. &ppdu_desc->user[i]);
  3383. ppdu_desc->ppdu_id =
  3384. mon_peer->last_delayed_ba_ppduid;
  3385. ppdu_desc->ppdu_start_timestamp = start_tsf;
  3386. ppdu_desc->ppdu_end_timestamp = end_tsf;
  3387. }
  3388. ppdu_desc->user[i].peer_last_delayed_ba =
  3389. mon_peer->last_delayed_ba;
  3390. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  3391. }
  3392. }
  3393. TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
  3394. mon_pdev->list_depth--;
  3395. TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info,
  3396. ppdu_info_list_elem);
  3397. mon_pdev->sched_comp_list_depth++;
  3398. }
  3399. /**
  3400. * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU
  3401. *
  3402. * If the TLV length sent as part of PPDU TLV is less that expected size i.e
  3403. * size of corresponding data structure, pad the remaining bytes with zeros
  3404. * and continue processing the TLVs
  3405. *
  3406. * @pdev: DP pdev handle
  3407. * @tag_buf: TLV buffer
  3408. * @tlv_expected_size: Expected size of Tag
  3409. * @tlv_len: TLV length received from FW
  3410. *
  3411. * Return: Pointer to updated TLV
  3412. */
  3413. static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev,
  3414. uint32_t *tag_buf,
  3415. uint16_t tlv_expected_size,
  3416. uint16_t tlv_len)
  3417. {
  3418. uint32_t *tlv_desc = tag_buf;
  3419. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3420. qdf_assert_always(tlv_len != 0);
  3421. if (tlv_len < tlv_expected_size) {
  3422. qdf_mem_zero(mon_pdev->ppdu_tlv_buf, tlv_expected_size);
  3423. qdf_mem_copy(mon_pdev->ppdu_tlv_buf, tag_buf, tlv_len);
  3424. tlv_desc = mon_pdev->ppdu_tlv_buf;
  3425. }
  3426. return tlv_desc;
  3427. }
  3428. /**
  3429. * dp_process_ppdu_tag(): Function to process the PPDU TLVs
  3430. * @pdev: DP pdev handle
  3431. * @tag_buf: TLV buffer
  3432. * @tlv_len: length of tlv
  3433. * @ppdu_info: per ppdu tlv structure
  3434. *
  3435. * return: void
  3436. */
  3437. static void dp_process_ppdu_tag(struct dp_pdev *pdev,
  3438. uint32_t *tag_buf,
  3439. uint32_t tlv_len,
  3440. struct ppdu_info *ppdu_info)
  3441. {
  3442. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  3443. uint16_t tlv_expected_size;
  3444. uint32_t *tlv_desc;
  3445. switch (tlv_type) {
  3446. case HTT_PPDU_STATS_COMMON_TLV:
  3447. tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv);
  3448. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3449. tlv_expected_size, tlv_len);
  3450. dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info);
  3451. break;
  3452. case HTT_PPDU_STATS_USR_COMMON_TLV:
  3453. tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv);
  3454. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3455. tlv_expected_size, tlv_len);
  3456. dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc,
  3457. ppdu_info);
  3458. break;
  3459. case HTT_PPDU_STATS_USR_RATE_TLV:
  3460. tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv);
  3461. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3462. tlv_expected_size, tlv_len);
  3463. dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc,
  3464. ppdu_info);
  3465. break;
  3466. case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
  3467. tlv_expected_size =
  3468. sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv);
  3469. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3470. tlv_expected_size, tlv_len);
  3471. dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
  3472. pdev, tlv_desc, ppdu_info);
  3473. break;
  3474. case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
  3475. tlv_expected_size =
  3476. sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv);
  3477. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3478. tlv_expected_size, tlv_len);
  3479. dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
  3480. pdev, tlv_desc, ppdu_info);
  3481. break;
  3482. case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
  3483. tlv_expected_size =
  3484. sizeof(htt_ppdu_stats_user_cmpltn_common_tlv);
  3485. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3486. tlv_expected_size, tlv_len);
  3487. dp_process_ppdu_stats_user_cmpltn_common_tlv(
  3488. pdev, tlv_desc, ppdu_info);
  3489. break;
  3490. case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
  3491. tlv_expected_size =
  3492. sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv);
  3493. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3494. tlv_expected_size, tlv_len);
  3495. dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
  3496. pdev, tlv_desc, ppdu_info);
  3497. break;
  3498. case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
  3499. tlv_expected_size =
  3500. sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv);
  3501. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3502. tlv_expected_size, tlv_len);
  3503. dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
  3504. pdev, tlv_desc, ppdu_info);
  3505. break;
  3506. case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
  3507. tlv_expected_size =
  3508. sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv);
  3509. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3510. tlv_expected_size, tlv_len);
  3511. dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
  3512. pdev, tlv_desc, ppdu_info);
  3513. break;
  3514. case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
  3515. tlv_expected_size =
  3516. sizeof(htt_ppdu_stats_usr_common_array_tlv_v);
  3517. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3518. tlv_expected_size, tlv_len);
  3519. dp_process_ppdu_stats_user_common_array_tlv(
  3520. pdev, tlv_desc, ppdu_info);
  3521. break;
  3522. case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
  3523. tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv);
  3524. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3525. tlv_expected_size, tlv_len);
  3526. dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc,
  3527. ppdu_info);
  3528. break;
  3529. case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV:
  3530. dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info);
  3531. break;
  3532. default:
  3533. break;
  3534. }
  3535. }
  3536. #ifdef WLAN_TELEMETRY_STATS_SUPPORT
  3537. static inline
  3538. void dp_ppdu_desc_user_airtime_consumption_update(
  3539. struct dp_peer *peer,
  3540. struct cdp_tx_completion_ppdu_user *user)
  3541. {
  3542. struct dp_mon_peer *mon_peer = NULL;
  3543. mon_peer = peer->monitor_peer;
  3544. if (qdf_unlikely(!mon_peer))
  3545. return;
  3546. DP_STATS_INC(mon_peer, airtime_consumption.consumption,
  3547. user->phy_tx_time_us);
  3548. }
  3549. #else
  3550. static inline
  3551. void dp_ppdu_desc_user_airtime_consumption_update(
  3552. struct dp_peer *peer,
  3553. struct cdp_tx_completion_ppdu_user *user)
  3554. { }
  3555. #endif
  3556. #if defined(WLAN_ATF_ENABLE) || defined(WLAN_TELEMETRY_STATS_SUPPORT)
  3557. static void
  3558. dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
  3559. struct dp_peer *peer,
  3560. struct cdp_tx_completion_ppdu *ppdu_desc,
  3561. struct cdp_tx_completion_ppdu_user *user)
  3562. {
  3563. uint32_t nss_ru_width_sum = 0;
  3564. struct dp_mon_peer *mon_peer = NULL;
  3565. if (!pdev || !ppdu_desc || !user || !peer)
  3566. return;
  3567. if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA)
  3568. return;
  3569. mon_peer = peer->monitor_peer;
  3570. if (qdf_unlikely(!mon_peer))
  3571. return;
  3572. nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum;
  3573. if (!nss_ru_width_sum)
  3574. nss_ru_width_sum = 1;
  3575. /**
  3576. * For SU-MIMO PPDU phy Tx time is same for the single user.
  3577. * For MU-MIMO phy Tx time is calculated per user as below
  3578. * user phy tx time =
  3579. * Entire PPDU duration * MU Ratio * OFDMA Ratio
  3580. * MU Ratio = usr_nss / Sum_of_nss_of_all_users
  3581. * OFDMA_ratio = usr_ru_width / Sum_of_ru_width_of_all_users
  3582. * usr_ru_widt = ru_end – ru_start + 1
  3583. */
  3584. if (ppdu_desc->htt_frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) {
  3585. user->phy_tx_time_us = ppdu_desc->phy_ppdu_tx_time_us;
  3586. } else {
  3587. user->phy_tx_time_us = (ppdu_desc->phy_ppdu_tx_time_us *
  3588. user->nss * user->ru_tones) / nss_ru_width_sum;
  3589. }
  3590. dp_ppdu_desc_user_airtime_consumption_update(peer, user);
  3591. }
  3592. #else
  3593. static void
  3594. dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
  3595. struct dp_peer *peer,
  3596. struct cdp_tx_completion_ppdu *ppdu_desc,
  3597. struct cdp_tx_completion_ppdu_user *user)
  3598. {
  3599. }
  3600. #endif
  3601. /**
  3602. * dp_ppdu_desc_user_stats_update(): Function to update TX user stats
  3603. * @pdev: DP pdev handle
  3604. * @ppdu_info: per PPDU TLV descriptor
  3605. *
  3606. * return: void
  3607. */
  3608. void
  3609. dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
  3610. struct ppdu_info *ppdu_info)
  3611. {
  3612. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  3613. struct dp_peer *peer = NULL;
  3614. uint32_t tlv_bitmap_expected;
  3615. uint32_t tlv_bitmap_default;
  3616. uint16_t i;
  3617. uint32_t num_users;
  3618. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3619. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  3620. qdf_nbuf_data(ppdu_info->nbuf);
  3621. if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR)
  3622. ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
  3623. tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
  3624. if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode ||
  3625. mon_pdev->tx_capture_enabled) {
  3626. if (ppdu_info->is_ampdu)
  3627. tlv_bitmap_expected =
  3628. dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
  3629. ppdu_info->tlv_bitmap);
  3630. }
  3631. tlv_bitmap_default = tlv_bitmap_expected;
  3632. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
  3633. num_users = ppdu_desc->bar_num_users;
  3634. ppdu_desc->num_users = ppdu_desc->bar_num_users;
  3635. } else {
  3636. num_users = ppdu_desc->num_users;
  3637. }
  3638. qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
  3639. for (i = 0; i < num_users; i++) {
  3640. ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
  3641. ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
  3642. peer = dp_peer_get_ref_by_id(pdev->soc,
  3643. ppdu_desc->user[i].peer_id,
  3644. DP_MOD_ID_TX_PPDU_STATS);
  3645. /**
  3646. * This check is to make sure peer is not deleted
  3647. * after processing the TLVs.
  3648. */
  3649. if (!peer)
  3650. continue;
  3651. ppdu_desc->user[i].is_bss_peer = peer->bss_peer;
  3652. /*
  3653. * different frame like DATA, BAR or CTRL has different
  3654. * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we
  3655. * receive other tlv in-order/sequential from fw.
  3656. * Since ACK_BA_STATUS TLV come from Hardware it is
  3657. * asynchronous So we need to depend on some tlv to confirm
  3658. * all tlv is received for a ppdu.
  3659. * So we depend on both SCHED_CMD_STATUS_TLV and
  3660. * ACK_BA_STATUS_TLV. for failure packet we won't get
  3661. * ACK_BA_STATUS_TLV.
  3662. */
  3663. if (!(ppdu_info->tlv_bitmap &
  3664. (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) ||
  3665. (!(ppdu_info->tlv_bitmap &
  3666. (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) &&
  3667. (ppdu_desc->user[i].completion_status ==
  3668. HTT_PPDU_STATS_USER_STATUS_OK))) {
  3669. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  3670. continue;
  3671. }
  3672. /**
  3673. * Update tx stats for data frames having Qos as well as
  3674. * non-Qos data tid
  3675. */
  3676. if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
  3677. (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) ||
  3678. (ppdu_desc->htt_frame_type ==
  3679. HTT_STATS_FTYPE_SGEN_QOS_NULL) ||
  3680. ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) &&
  3681. (ppdu_desc->num_mpdu > 1))) &&
  3682. (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) {
  3683. dp_tx_stats_update(pdev, peer,
  3684. &ppdu_desc->user[i],
  3685. ppdu_desc->ack_rssi);
  3686. }
  3687. dp_ppdu_desc_user_phy_tx_time_update(pdev, peer, ppdu_desc,
  3688. &ppdu_desc->user[i]);
  3689. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  3690. tlv_bitmap_expected = tlv_bitmap_default;
  3691. }
  3692. }
  3693. #if !defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(QCA_MONITOR_2_0_SUPPORT)
  3694. /*
  3695. * dp_tx_ppdu_desc_notify() - Notify to upper layer about PPDU via WDI
  3696. *
  3697. * @pdev: Datapath pdev handle
  3698. * @nbuf: Buffer to be delivered to upper layer
  3699. *
  3700. * Return: void
  3701. */
  3702. static void dp_tx_ppdu_desc_notify(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
  3703. {
  3704. struct dp_soc *soc = pdev->soc;
  3705. struct dp_mon_ops *mon_ops = NULL;
  3706. mon_ops = dp_mon_ops_get(soc);
  3707. if (mon_ops && mon_ops->mon_ppdu_desc_notify)
  3708. mon_ops->mon_ppdu_desc_notify(pdev, nbuf);
  3709. else
  3710. qdf_nbuf_free(nbuf);
  3711. }
  3712. void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
  3713. struct ppdu_info *ppdu_info)
  3714. {
  3715. struct ppdu_info *s_ppdu_info = NULL;
  3716. struct ppdu_info *ppdu_info_next = NULL;
  3717. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  3718. qdf_nbuf_t nbuf;
  3719. uint32_t time_delta = 0;
  3720. bool starved = 0;
  3721. bool matched = 0;
  3722. bool recv_ack_ba_done = 0;
  3723. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3724. if (ppdu_info->tlv_bitmap &
  3725. (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
  3726. ppdu_info->done)
  3727. recv_ack_ba_done = 1;
  3728. mon_pdev->last_sched_cmdid = ppdu_info->sched_cmdid;
  3729. s_ppdu_info = TAILQ_FIRST(&mon_pdev->sched_comp_ppdu_list);
  3730. TAILQ_FOREACH_SAFE(s_ppdu_info, &mon_pdev->sched_comp_ppdu_list,
  3731. ppdu_info_list_elem, ppdu_info_next) {
  3732. if (s_ppdu_info->tsf_l32 > ppdu_info->tsf_l32)
  3733. time_delta = (MAX_TSF_32 - s_ppdu_info->tsf_l32) +
  3734. ppdu_info->tsf_l32;
  3735. else
  3736. time_delta = ppdu_info->tsf_l32 - s_ppdu_info->tsf_l32;
  3737. if (!s_ppdu_info->done && !recv_ack_ba_done) {
  3738. if (time_delta < MAX_SCHED_STARVE) {
  3739. dp_mon_info("pdev[%d] ppdu_id[%d] sched_cmdid[%d] TLV_B[0x%x] TSF[%u] D[%d]",
  3740. pdev->pdev_id,
  3741. s_ppdu_info->ppdu_id,
  3742. s_ppdu_info->sched_cmdid,
  3743. s_ppdu_info->tlv_bitmap,
  3744. s_ppdu_info->tsf_l32,
  3745. s_ppdu_info->done);
  3746. break;
  3747. }
  3748. starved = 1;
  3749. }
  3750. mon_pdev->delivered_sched_cmdid = s_ppdu_info->sched_cmdid;
  3751. TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list, s_ppdu_info,
  3752. ppdu_info_list_elem);
  3753. mon_pdev->sched_comp_list_depth--;
  3754. nbuf = s_ppdu_info->nbuf;
  3755. qdf_assert_always(nbuf);
  3756. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  3757. qdf_nbuf_data(nbuf);
  3758. ppdu_desc->tlv_bitmap = s_ppdu_info->tlv_bitmap;
  3759. if (starved) {
  3760. dp_mon_info("ppdu starved fc[0x%x] h_ftype[%d] tlv_bitmap[0x%x] cs[%d]\n",
  3761. ppdu_desc->frame_ctrl,
  3762. ppdu_desc->htt_frame_type,
  3763. ppdu_desc->tlv_bitmap,
  3764. ppdu_desc->user[0].completion_status);
  3765. starved = 0;
  3766. }
  3767. if (ppdu_info->ppdu_id == s_ppdu_info->ppdu_id &&
  3768. ppdu_info->sched_cmdid == s_ppdu_info->sched_cmdid)
  3769. matched = 1;
  3770. dp_ppdu_desc_user_stats_update(pdev, s_ppdu_info);
  3771. qdf_mem_free(s_ppdu_info);
  3772. dp_tx_ppdu_desc_notify(pdev, nbuf);
  3773. if (matched)
  3774. break;
  3775. }
  3776. }
  3777. #endif
  3778. /*
  3779. * dp_tx_ppdu_desc_deliver() - Deliver PPDU desc to upper layer
  3780. *
  3781. * @pdev: Datapath pdev handle
  3782. * @ppdu_info: per PPDU TLV descriptor
  3783. *
  3784. * Return: void
  3785. */
  3786. static void dp_tx_ppdu_desc_deliver(struct dp_pdev *pdev,
  3787. struct ppdu_info *ppdu_info)
  3788. {
  3789. struct dp_soc *soc = pdev->soc;
  3790. struct dp_mon_ops *mon_ops = NULL;
  3791. mon_ops = dp_mon_ops_get(soc);
  3792. if (mon_ops && mon_ops->mon_ppdu_desc_deliver) {
  3793. mon_ops->mon_ppdu_desc_deliver(pdev, ppdu_info);
  3794. } else {
  3795. qdf_nbuf_free(ppdu_info->nbuf);
  3796. ppdu_info->nbuf = NULL;
  3797. qdf_mem_free(ppdu_info);
  3798. }
  3799. }
  3800. /**
  3801. * dp_get_ppdu_desc(): Function to allocate new PPDU status
  3802. * desc for new ppdu id
  3803. * @pdev: DP pdev handle
  3804. * @ppdu_id: PPDU unique identifier
  3805. * @tlv_type: TLV type received
  3806. * @tsf_l32: timestamp received along with ppdu stats indication header
  3807. * @max_users: Maximum user for that particular ppdu
  3808. *
  3809. * return: ppdu_info per ppdu tlv structure
  3810. */
  3811. static
  3812. struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
  3813. uint8_t tlv_type, uint32_t tsf_l32,
  3814. uint8_t max_users)
  3815. {
  3816. struct ppdu_info *ppdu_info = NULL;
  3817. struct ppdu_info *s_ppdu_info = NULL;
  3818. struct ppdu_info *ppdu_info_next = NULL;
  3819. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  3820. uint32_t size = 0;
  3821. struct cdp_tx_completion_ppdu *tmp_ppdu_desc = NULL;
  3822. struct cdp_tx_completion_ppdu_user *tmp_user;
  3823. uint32_t time_delta;
  3824. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3825. /*
  3826. * Find ppdu_id node exists or not
  3827. */
  3828. TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list,
  3829. ppdu_info_list_elem, ppdu_info_next) {
  3830. if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
  3831. if (ppdu_info->tsf_l32 > tsf_l32)
  3832. time_delta = (MAX_TSF_32 -
  3833. ppdu_info->tsf_l32) + tsf_l32;
  3834. else
  3835. time_delta = tsf_l32 - ppdu_info->tsf_l32;
  3836. if (time_delta > WRAP_DROP_TSF_DELTA) {
  3837. TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
  3838. ppdu_info, ppdu_info_list_elem);
  3839. mon_pdev->list_depth--;
  3840. pdev->stats.ppdu_wrap_drop++;
  3841. tmp_ppdu_desc =
  3842. (struct cdp_tx_completion_ppdu *)
  3843. qdf_nbuf_data(ppdu_info->nbuf);
  3844. tmp_user = &tmp_ppdu_desc->user[0];
  3845. dp_htt_tx_stats_info("S_PID [%d] S_TSF[%u] TLV_BITMAP[0x%x] [CMPLTN - %d ACK_BA - %d] CS[%d] - R_PID[%d] R_TSF[%u] R_TLV_TAG[0x%x]\n",
  3846. ppdu_info->ppdu_id,
  3847. ppdu_info->tsf_l32,
  3848. ppdu_info->tlv_bitmap,
  3849. tmp_user->completion_status,
  3850. ppdu_info->compltn_common_tlv,
  3851. ppdu_info->ack_ba_tlv,
  3852. ppdu_id, tsf_l32,
  3853. tlv_type);
  3854. qdf_nbuf_free(ppdu_info->nbuf);
  3855. ppdu_info->nbuf = NULL;
  3856. qdf_mem_free(ppdu_info);
  3857. } else {
  3858. break;
  3859. }
  3860. }
  3861. }
  3862. /*
  3863. * check if it is ack ba tlv and if it is not there in ppdu info
  3864. * list then check it in sched completion ppdu list
  3865. */
  3866. if (!ppdu_info &&
  3867. tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) {
  3868. TAILQ_FOREACH(s_ppdu_info,
  3869. &mon_pdev->sched_comp_ppdu_list,
  3870. ppdu_info_list_elem) {
  3871. if (s_ppdu_info && (s_ppdu_info->ppdu_id == ppdu_id)) {
  3872. if (s_ppdu_info->tsf_l32 > tsf_l32)
  3873. time_delta = (MAX_TSF_32 -
  3874. s_ppdu_info->tsf_l32) +
  3875. tsf_l32;
  3876. else
  3877. time_delta = tsf_l32 -
  3878. s_ppdu_info->tsf_l32;
  3879. if (time_delta < WRAP_DROP_TSF_DELTA) {
  3880. ppdu_info = s_ppdu_info;
  3881. break;
  3882. }
  3883. } else {
  3884. /*
  3885. * ACK BA STATUS TLV comes sequential order
  3886. * if we received ack ba status tlv for second
  3887. * ppdu and first ppdu is still waiting for
  3888. * ACK BA STATUS TLV. Based on fw comment
  3889. * we won't receive it tlv later. So we can
  3890. * set ppdu info done.
  3891. */
  3892. if (s_ppdu_info)
  3893. s_ppdu_info->done = 1;
  3894. }
  3895. }
  3896. }
  3897. if (ppdu_info) {
  3898. if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
  3899. /**
  3900. * if we get tlv_type that is already been processed
  3901. * for ppdu, that means we got a new ppdu with same
  3902. * ppdu id. Hence Flush the older ppdu
  3903. * for MUMIMO and OFDMA, In a PPDU we have
  3904. * multiple user with same tlv types. tlv bitmap is
  3905. * used to check whether SU or MU_MIMO/OFDMA
  3906. */
  3907. if (!(ppdu_info->tlv_bitmap &
  3908. (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
  3909. return ppdu_info;
  3910. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  3911. qdf_nbuf_data(ppdu_info->nbuf);
  3912. /**
  3913. * apart from ACK BA STATUS TLV rest all comes in order
  3914. * so if tlv type not ACK BA STATUS TLV we can deliver
  3915. * ppdu_info
  3916. */
  3917. if ((tlv_type ==
  3918. HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
  3919. ((ppdu_desc->htt_frame_type ==
  3920. HTT_STATS_FTYPE_SGEN_MU_BAR) ||
  3921. (ppdu_desc->htt_frame_type ==
  3922. HTT_STATS_FTYPE_SGEN_BE_MU_BAR)))
  3923. return ppdu_info;
  3924. dp_tx_ppdu_desc_deliver(pdev, ppdu_info);
  3925. } else {
  3926. return ppdu_info;
  3927. }
  3928. }
  3929. /**
  3930. * Flush the head ppdu descriptor if ppdu desc list reaches max
  3931. * threshold
  3932. */
  3933. if (mon_pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
  3934. ppdu_info = TAILQ_FIRST(&mon_pdev->ppdu_info_list);
  3935. TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
  3936. ppdu_info, ppdu_info_list_elem);
  3937. mon_pdev->list_depth--;
  3938. pdev->stats.ppdu_drop++;
  3939. qdf_nbuf_free(ppdu_info->nbuf);
  3940. ppdu_info->nbuf = NULL;
  3941. qdf_mem_free(ppdu_info);
  3942. }
  3943. size = sizeof(struct cdp_tx_completion_ppdu) +
  3944. (max_users * sizeof(struct cdp_tx_completion_ppdu_user));
  3945. /*
  3946. * Allocate new ppdu_info node
  3947. */
  3948. ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
  3949. if (!ppdu_info)
  3950. return NULL;
  3951. ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, size,
  3952. 0, 4, TRUE);
  3953. if (!ppdu_info->nbuf) {
  3954. qdf_mem_free(ppdu_info);
  3955. return NULL;
  3956. }
  3957. ppdu_info->ppdu_desc =
  3958. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  3959. qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), size);
  3960. if (!qdf_nbuf_put_tail(ppdu_info->nbuf, size)) {
  3961. dp_mon_err("No tailroom for HTT PPDU");
  3962. qdf_nbuf_free(ppdu_info->nbuf);
  3963. ppdu_info->nbuf = NULL;
  3964. ppdu_info->last_user = 0;
  3965. qdf_mem_free(ppdu_info);
  3966. return NULL;
  3967. }
  3968. ppdu_info->ppdu_desc->max_users = max_users;
  3969. ppdu_info->tsf_l32 = tsf_l32;
  3970. /**
  3971. * No lock is needed because all PPDU TLVs are processed in
  3972. * same context and this list is updated in same context
  3973. */
  3974. TAILQ_INSERT_TAIL(&mon_pdev->ppdu_info_list, ppdu_info,
  3975. ppdu_info_list_elem);
  3976. mon_pdev->list_depth++;
  3977. return ppdu_info;
  3978. }
  3979. /**
  3980. * dp_htt_process_tlv(): Function to process each PPDU TLVs
  3981. * @pdev: DP pdev handle
  3982. * @htt_t2h_msg: HTT target to host message
  3983. *
  3984. * return: ppdu_info per ppdu tlv structure
  3985. */
  3986. static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
  3987. qdf_nbuf_t htt_t2h_msg)
  3988. {
  3989. uint32_t length;
  3990. uint32_t ppdu_id;
  3991. uint8_t tlv_type;
  3992. uint32_t tlv_length, tlv_bitmap_expected;
  3993. uint8_t *tlv_buf;
  3994. struct ppdu_info *ppdu_info = NULL;
  3995. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  3996. uint8_t max_users = CDP_MU_MAX_USERS;
  3997. uint32_t tsf_l32;
  3998. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3999. uint32_t *msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
  4000. length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
  4001. msg_word = msg_word + 1;
  4002. ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
  4003. msg_word = msg_word + 1;
  4004. tsf_l32 = (uint32_t)(*msg_word);
  4005. msg_word = msg_word + 2;
  4006. while (length > 0) {
  4007. tlv_buf = (uint8_t *)msg_word;
  4008. tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
  4009. tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
  4010. if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
  4011. pdev->stats.ppdu_stats_counter[tlv_type]++;
  4012. if (tlv_length == 0)
  4013. break;
  4014. tlv_length += HTT_TLV_HDR_LEN;
  4015. /**
  4016. * Not allocating separate ppdu descriptor for MGMT Payload
  4017. * TLV as this is sent as separate WDI indication and it
  4018. * doesn't contain any ppdu information
  4019. */
  4020. if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
  4021. mon_pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
  4022. mon_pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
  4023. mon_pdev->mgmtctrl_frm_info.mgmt_buf_len =
  4024. HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET
  4025. (*(msg_word + 1));
  4026. msg_word =
  4027. (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
  4028. length -= (tlv_length);
  4029. continue;
  4030. }
  4031. /*
  4032. * retrieve max_users if it's USERS_INFO,
  4033. * else, it's 1 for COMPLTN_FLUSH,
  4034. * else, use CDP_MU_MAX_USERS
  4035. */
  4036. if (tlv_type == HTT_PPDU_STATS_USERS_INFO_TLV) {
  4037. max_users =
  4038. HTT_PPDU_STATS_USERS_INFO_TLV_MAX_USERS_GET(*(msg_word + 1));
  4039. } else if (tlv_type == HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) {
  4040. max_users = 1;
  4041. }
  4042. ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type,
  4043. tsf_l32, max_users);
  4044. if (!ppdu_info)
  4045. return NULL;
  4046. ppdu_info->ppdu_id = ppdu_id;
  4047. ppdu_info->tlv_bitmap |= (1 << tlv_type);
  4048. dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
  4049. /**
  4050. * Increment pdev level tlv count to monitor
  4051. * missing TLVs
  4052. */
  4053. mon_pdev->tlv_count++;
  4054. ppdu_info->last_tlv_cnt = mon_pdev->tlv_count;
  4055. msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
  4056. length -= (tlv_length);
  4057. }
  4058. if (!ppdu_info)
  4059. return NULL;
  4060. mon_pdev->last_ppdu_id = ppdu_id;
  4061. tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
  4062. if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode ||
  4063. mon_pdev->tx_capture_enabled) {
  4064. if (ppdu_info->is_ampdu)
  4065. tlv_bitmap_expected =
  4066. dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
  4067. ppdu_info->tlv_bitmap);
  4068. }
  4069. ppdu_desc = ppdu_info->ppdu_desc;
  4070. if (!ppdu_desc)
  4071. return NULL;
  4072. if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
  4073. HTT_PPDU_STATS_USER_STATUS_OK) {
  4074. tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
  4075. }
  4076. /*
  4077. * for frame type DATA and BAR, we update stats based on MSDU,
  4078. * successful msdu and mpdu are populate from ACK BA STATUS TLV
  4079. * which comes out of order. successful mpdu also populated from
  4080. * COMPLTN COMMON TLV which comes in order. for every ppdu_info
  4081. * we store successful mpdu from both tlv and compare before delivering
  4082. * to make sure we received ACK BA STATUS TLV. For some self generated
  4083. * frame we won't get ack ba status tlv so no need to wait for
  4084. * ack ba status tlv.
  4085. */
  4086. if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL &&
  4087. ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) {
  4088. /*
  4089. * most of the time bar frame will have duplicate ack ba
  4090. * status tlv
  4091. */
  4092. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR &&
  4093. (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv))
  4094. return NULL;
  4095. /*
  4096. * For data frame, compltn common tlv should match ack ba status
  4097. * tlv and completion status. Reason we are checking first user
  4098. * for ofdma, completion seen at next MU BAR frm, for mimo
  4099. * only for first user completion will be immediate.
  4100. */
  4101. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
  4102. (ppdu_desc->user[0].completion_status == 0 &&
  4103. (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)))
  4104. return NULL;
  4105. }
  4106. /**
  4107. * Once all the TLVs for a given PPDU has been processed,
  4108. * return PPDU status to be delivered to higher layer.
  4109. * tlv_bitmap_expected can't be available for different frame type.
  4110. * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu.
  4111. * apart from ACK BA TLV, FW sends other TLV in sequential order.
  4112. * flush tlv comes separate.
  4113. */
  4114. if ((ppdu_info->tlv_bitmap != 0 &&
  4115. (ppdu_info->tlv_bitmap &
  4116. (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) ||
  4117. (ppdu_info->tlv_bitmap &
  4118. (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) {
  4119. ppdu_info->done = 1;
  4120. return ppdu_info;
  4121. }
  4122. return NULL;
  4123. }
  4124. #endif /* QCA_ENHANCED_STATS_SUPPORT */
  4125. #ifdef QCA_ENHANCED_STATS_SUPPORT
  4126. /**
  4127. * dp_tx_ppdu_stats_feat_enable_check() - Check if feature(s) is enabled to
  4128. * consume stats received from FW via HTT
  4129. * @pdev: Datapath pdev handle
  4130. *
  4131. * Return: void
  4132. */
  4133. static bool dp_tx_ppdu_stats_feat_enable_check(struct dp_pdev *pdev)
  4134. {
  4135. struct dp_soc *soc = pdev->soc;
  4136. struct dp_mon_ops *mon_ops = NULL;
  4137. mon_ops = dp_mon_ops_get(soc);
  4138. if (mon_ops && mon_ops->mon_ppdu_stats_feat_enable_check)
  4139. return mon_ops->mon_ppdu_stats_feat_enable_check(pdev);
  4140. else
  4141. return false;
  4142. }
  4143. #endif
  4144. #if defined(WDI_EVENT_ENABLE)
  4145. #ifdef QCA_ENHANCED_STATS_SUPPORT
  4146. /**
  4147. * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
  4148. * @soc: DP SOC handle
  4149. * @pdev_id: pdev id
  4150. * @htt_t2h_msg: HTT message nbuf
  4151. *
  4152. * return:void
  4153. */
  4154. static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
  4155. uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
  4156. {
  4157. struct dp_pdev *pdev;
  4158. struct ppdu_info *ppdu_info = NULL;
  4159. bool free_buf = true;
  4160. struct dp_mon_pdev *mon_pdev;
  4161. if (pdev_id >= MAX_PDEV_CNT)
  4162. return true;
  4163. pdev = soc->pdev_list[pdev_id];
  4164. if (!pdev)
  4165. return true;
  4166. mon_pdev = pdev->monitor_pdev;
  4167. if (!mon_pdev)
  4168. return true;
  4169. if (!dp_tx_ppdu_stats_feat_enable_check(pdev))
  4170. return free_buf;
  4171. qdf_spin_lock_bh(&mon_pdev->ppdu_stats_lock);
  4172. ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
  4173. if (mon_pdev->mgmtctrl_frm_info.mgmt_buf) {
  4174. if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
  4175. (pdev, htt_t2h_msg, mon_pdev->mgmtctrl_frm_info.ppdu_id) !=
  4176. QDF_STATUS_SUCCESS)
  4177. free_buf = false;
  4178. }
  4179. if (ppdu_info)
  4180. dp_tx_ppdu_desc_deliver(pdev, ppdu_info);
  4181. mon_pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
  4182. mon_pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
  4183. mon_pdev->mgmtctrl_frm_info.ppdu_id = 0;
  4184. qdf_spin_unlock_bh(&mon_pdev->ppdu_stats_lock);
  4185. return free_buf;
  4186. }
  4187. #elif (!defined(REMOVE_PKT_LOG))
  4188. static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
  4189. uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
  4190. {
  4191. return true;
  4192. }
  4193. #endif/* QCA_ENHANCED_STATS_SUPPORT */
  4194. #endif
  4195. #if defined(WDI_EVENT_ENABLE) &&\
  4196. (defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
  4197. /*
  4198. * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
  4199. * @htt_soc: HTT SOC handle
  4200. * @msg_word: Pointer to payload
  4201. * @htt_t2h_msg: HTT msg nbuf
  4202. *
  4203. * Return: True if buffer should be freed by caller.
  4204. */
  4205. bool
  4206. dp_ppdu_stats_ind_handler(struct htt_soc *soc,
  4207. uint32_t *msg_word,
  4208. qdf_nbuf_t htt_t2h_msg)
  4209. {
  4210. u_int8_t pdev_id;
  4211. u_int8_t target_pdev_id;
  4212. bool free_buf;
  4213. target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
  4214. pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
  4215. target_pdev_id);
  4216. dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
  4217. htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
  4218. pdev_id);
  4219. free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
  4220. htt_t2h_msg);
  4221. return free_buf;
  4222. }
  4223. #endif
  4224. void
  4225. dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor)
  4226. {
  4227. pdev->monitor_pdev->rx_mon_recv_status.bsscolor = bsscolor;
  4228. }
  4229. bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle)
  4230. {
  4231. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4232. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4233. if ((mon_pdev->fp_data_filter & FILTER_DATA_UCAST) ||
  4234. (mon_pdev->mo_data_filter & FILTER_DATA_UCAST))
  4235. return true;
  4236. return false;
  4237. }
  4238. bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle)
  4239. {
  4240. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4241. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4242. if ((mon_pdev->fp_data_filter & FILTER_DATA_MCAST) ||
  4243. (mon_pdev->mo_data_filter & FILTER_DATA_MCAST))
  4244. return true;
  4245. return false;
  4246. }
  4247. bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle)
  4248. {
  4249. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4250. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4251. if ((mon_pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
  4252. (mon_pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
  4253. if ((mon_pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
  4254. (mon_pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
  4255. return true;
  4256. }
  4257. }
  4258. return false;
  4259. }
  4260. QDF_STATUS dp_mon_soc_cfg_init(struct dp_soc *soc)
  4261. {
  4262. int target_type;
  4263. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4264. struct cdp_mon_ops *cdp_ops;
  4265. cdp_ops = dp_mon_cdp_ops_get(soc);
  4266. target_type = hal_get_target_type(soc->hal_soc);
  4267. switch (target_type) {
  4268. case TARGET_TYPE_QCA6290:
  4269. case TARGET_TYPE_QCA6390:
  4270. case TARGET_TYPE_QCA6490:
  4271. case TARGET_TYPE_QCA6750:
  4272. case TARGET_TYPE_KIWI:
  4273. case TARGET_TYPE_MANGO:
  4274. /* do nothing */
  4275. break;
  4276. case TARGET_TYPE_QCA8074:
  4277. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4278. MON_BUF_MIN_ENTRIES);
  4279. break;
  4280. case TARGET_TYPE_QCA8074V2:
  4281. case TARGET_TYPE_QCA6018:
  4282. case TARGET_TYPE_QCA9574:
  4283. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4284. MON_BUF_MIN_ENTRIES);
  4285. mon_soc->hw_nac_monitor_support = 1;
  4286. break;
  4287. case TARGET_TYPE_QCN9000:
  4288. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4289. MON_BUF_MIN_ENTRIES);
  4290. mon_soc->hw_nac_monitor_support = 1;
  4291. if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE)) {
  4292. if (cdp_ops && cdp_ops->config_full_mon_mode)
  4293. cdp_ops->config_full_mon_mode((struct cdp_soc_t *)soc, 1);
  4294. }
  4295. break;
  4296. case TARGET_TYPE_QCA5018:
  4297. case TARGET_TYPE_QCN6122:
  4298. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4299. MON_BUF_MIN_ENTRIES);
  4300. mon_soc->hw_nac_monitor_support = 1;
  4301. break;
  4302. case TARGET_TYPE_QCN9224:
  4303. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4304. MON_BUF_MIN_ENTRIES);
  4305. mon_soc->hw_nac_monitor_support = 1;
  4306. mon_soc->monitor_mode_v2 = 1;
  4307. break;
  4308. default:
  4309. dp_mon_info("%s: Unknown tgt type %d\n", __func__, target_type);
  4310. qdf_assert_always(0);
  4311. break;
  4312. }
  4313. dp_mon_info("hw_nac_monitor_support = %d",
  4314. mon_soc->hw_nac_monitor_support);
  4315. return QDF_STATUS_SUCCESS;
  4316. }
  4317. /**
  4318. * dp_mon_pdev_per_target_config() - Target specific monitor pdev configuration
  4319. * @pdev: PDEV handle [Should be valid]
  4320. *
  4321. * Return: None
  4322. */
  4323. static void dp_mon_pdev_per_target_config(struct dp_pdev *pdev)
  4324. {
  4325. struct dp_soc *soc = pdev->soc;
  4326. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4327. int target_type;
  4328. target_type = hal_get_target_type(soc->hal_soc);
  4329. switch (target_type) {
  4330. case TARGET_TYPE_KIWI:
  4331. case TARGET_TYPE_MANGO:
  4332. mon_pdev->is_tlv_hdr_64_bit = true;
  4333. break;
  4334. default:
  4335. mon_pdev->is_tlv_hdr_64_bit = false;
  4336. break;
  4337. }
  4338. }
  4339. QDF_STATUS dp_mon_pdev_attach(struct dp_pdev *pdev)
  4340. {
  4341. struct dp_soc *soc;
  4342. struct dp_mon_pdev *mon_pdev;
  4343. struct dp_mon_ops *mon_ops;
  4344. qdf_size_t mon_pdev_context_size;
  4345. if (!pdev) {
  4346. dp_mon_err("pdev is NULL");
  4347. goto fail0;
  4348. }
  4349. soc = pdev->soc;
  4350. mon_pdev_context_size = soc->arch_ops.txrx_get_mon_context_size(DP_CONTEXT_TYPE_MON_PDEV);
  4351. mon_pdev = dp_context_alloc_mem(soc, DP_MON_PDEV_TYPE, mon_pdev_context_size);
  4352. if (!mon_pdev) {
  4353. dp_mon_err("%pK: MONITOR pdev allocation failed", pdev);
  4354. goto fail0;
  4355. }
  4356. pdev->monitor_pdev = mon_pdev;
  4357. mon_ops = dp_mon_ops_get(pdev->soc);
  4358. if (!mon_ops) {
  4359. dp_mon_err("%pK: Invalid monitor ops", pdev);
  4360. goto fail1;
  4361. }
  4362. if (mon_ops->mon_pdev_alloc) {
  4363. if (mon_ops->mon_pdev_alloc(pdev)) {
  4364. dp_mon_err("%pK: MONITOR pdev alloc failed", pdev);
  4365. goto fail1;
  4366. }
  4367. }
  4368. if (mon_ops->mon_rings_alloc) {
  4369. if (mon_ops->mon_rings_alloc(pdev)) {
  4370. dp_mon_err("%pK: MONITOR rings setup failed", pdev);
  4371. goto fail2;
  4372. }
  4373. }
  4374. /* Rx monitor mode specific init */
  4375. if (mon_ops->rx_mon_desc_pool_alloc) {
  4376. if (mon_ops->rx_mon_desc_pool_alloc(pdev)) {
  4377. dp_mon_err("%pK: dp_rx_pdev_mon_attach failed", pdev);
  4378. goto fail3;
  4379. }
  4380. }
  4381. pdev->monitor_pdev = mon_pdev;
  4382. dp_mon_pdev_per_target_config(pdev);
  4383. return QDF_STATUS_SUCCESS;
  4384. fail3:
  4385. if (mon_ops->mon_rings_free)
  4386. mon_ops->mon_rings_free(pdev);
  4387. fail2:
  4388. if (mon_ops->mon_pdev_free)
  4389. mon_ops->mon_pdev_free(pdev);
  4390. fail1:
  4391. pdev->monitor_pdev = NULL;
  4392. qdf_mem_free(mon_pdev);
  4393. fail0:
  4394. return QDF_STATUS_E_NOMEM;
  4395. }
  4396. QDF_STATUS dp_mon_pdev_detach(struct dp_pdev *pdev)
  4397. {
  4398. struct dp_mon_pdev *mon_pdev;
  4399. struct dp_mon_ops *mon_ops = NULL;
  4400. if (!pdev) {
  4401. dp_mon_err("pdev is NULL");
  4402. return QDF_STATUS_E_FAILURE;
  4403. }
  4404. mon_pdev = pdev->monitor_pdev;
  4405. if (!mon_pdev) {
  4406. dp_mon_err("Monitor pdev is NULL");
  4407. return QDF_STATUS_E_FAILURE;
  4408. }
  4409. mon_ops = dp_mon_ops_get(pdev->soc);
  4410. if (!mon_ops) {
  4411. dp_mon_err("Monitor ops is NULL");
  4412. return QDF_STATUS_E_FAILURE;
  4413. }
  4414. if (mon_ops->rx_mon_desc_pool_free)
  4415. mon_ops->rx_mon_desc_pool_free(pdev);
  4416. if (mon_ops->mon_rings_free)
  4417. mon_ops->mon_rings_free(pdev);
  4418. if (mon_ops->mon_pdev_free)
  4419. mon_ops->mon_pdev_free(pdev);
  4420. qdf_mem_free(mon_pdev);
  4421. pdev->monitor_pdev = NULL;
  4422. return QDF_STATUS_SUCCESS;
  4423. }
  4424. QDF_STATUS dp_mon_pdev_init(struct dp_pdev *pdev)
  4425. {
  4426. struct dp_soc *soc;
  4427. struct dp_mon_pdev *mon_pdev;
  4428. struct dp_mon_ops *mon_ops = NULL;
  4429. if (!pdev) {
  4430. dp_mon_err("pdev is NULL");
  4431. return QDF_STATUS_E_FAILURE;
  4432. }
  4433. soc = pdev->soc;
  4434. mon_pdev = pdev->monitor_pdev;
  4435. mon_pdev->invalid_mon_peer = qdf_mem_malloc(sizeof(struct dp_mon_peer));
  4436. if (!mon_pdev->invalid_mon_peer) {
  4437. dp_mon_err("%pK: Memory allocation failed for invalid "
  4438. "monitor peer", pdev);
  4439. return QDF_STATUS_E_NOMEM;
  4440. }
  4441. mon_ops = dp_mon_ops_get(pdev->soc);
  4442. if (!mon_ops) {
  4443. dp_mon_err("Monitor ops is NULL");
  4444. goto fail0;
  4445. }
  4446. mon_pdev->filter = dp_mon_filter_alloc(mon_pdev);
  4447. if (!mon_pdev->filter) {
  4448. dp_mon_err("%pK: Memory allocation failed for monitor filter",
  4449. pdev);
  4450. goto fail0;
  4451. }
  4452. if (mon_ops->tx_mon_filter_alloc) {
  4453. if (mon_ops->tx_mon_filter_alloc(pdev)) {
  4454. dp_mon_err("%pK: Memory allocation failed for tx monitor "
  4455. "filter", pdev);
  4456. goto fail1;
  4457. }
  4458. }
  4459. qdf_spinlock_create(&mon_pdev->ppdu_stats_lock);
  4460. qdf_spinlock_create(&mon_pdev->neighbour_peer_mutex);
  4461. mon_pdev->monitor_configured = false;
  4462. mon_pdev->mon_chan_band = REG_BAND_UNKNOWN;
  4463. TAILQ_INIT(&mon_pdev->neighbour_peers_list);
  4464. mon_pdev->neighbour_peers_added = false;
  4465. mon_pdev->monitor_configured = false;
  4466. /* Monitor filter init */
  4467. mon_pdev->mon_filter_mode = MON_FILTER_ALL;
  4468. mon_pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
  4469. mon_pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
  4470. mon_pdev->fp_data_filter = FILTER_DATA_ALL;
  4471. mon_pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
  4472. mon_pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
  4473. mon_pdev->mo_data_filter = FILTER_DATA_ALL;
  4474. /*
  4475. * initialize ppdu tlv list
  4476. */
  4477. TAILQ_INIT(&mon_pdev->ppdu_info_list);
  4478. TAILQ_INIT(&mon_pdev->sched_comp_ppdu_list);
  4479. mon_pdev->list_depth = 0;
  4480. mon_pdev->tlv_count = 0;
  4481. /* initlialize cal client timer */
  4482. dp_cal_client_attach(&mon_pdev->cal_client_ctx,
  4483. dp_pdev_to_cdp_pdev(pdev),
  4484. pdev->soc->osdev,
  4485. &dp_iterate_update_peer_list);
  4486. if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
  4487. goto fail2;
  4488. if (mon_ops->mon_lite_mon_alloc) {
  4489. if (mon_ops->mon_lite_mon_alloc(pdev) != QDF_STATUS_SUCCESS) {
  4490. dp_mon_err("%pK: lite mon alloc failed", pdev);
  4491. goto fail3;
  4492. }
  4493. }
  4494. if (mon_ops->mon_rings_init) {
  4495. if (mon_ops->mon_rings_init(pdev)) {
  4496. dp_mon_err("%pK: MONITOR rings setup failed", pdev);
  4497. goto fail4;
  4498. }
  4499. }
  4500. /* initialize sw monitor rx descriptors */
  4501. if (mon_ops->rx_mon_desc_pool_init)
  4502. mon_ops->rx_mon_desc_pool_init(pdev);
  4503. /* allocate buffers and replenish the monitor RxDMA ring */
  4504. if (mon_ops->rx_mon_buffers_alloc) {
  4505. if (mon_ops->rx_mon_buffers_alloc(pdev)) {
  4506. dp_mon_err("%pK: rx mon buffers alloc failed", pdev);
  4507. goto fail5;
  4508. }
  4509. }
  4510. /* attach monitor function */
  4511. dp_monitor_tx_ppdu_stats_attach(pdev);
  4512. /* mon pdev extended init */
  4513. if (mon_ops->mon_pdev_ext_init)
  4514. mon_ops->mon_pdev_ext_init(pdev);
  4515. mon_pdev->is_dp_mon_pdev_initialized = true;
  4516. return QDF_STATUS_SUCCESS;
  4517. fail5:
  4518. if (mon_ops->rx_mon_desc_pool_deinit)
  4519. mon_ops->rx_mon_desc_pool_deinit(pdev);
  4520. if (mon_ops->mon_rings_deinit)
  4521. mon_ops->mon_rings_deinit(pdev);
  4522. fail4:
  4523. if (mon_ops->mon_lite_mon_dealloc)
  4524. mon_ops->mon_lite_mon_dealloc(pdev);
  4525. fail3:
  4526. dp_htt_ppdu_stats_detach(pdev);
  4527. fail2:
  4528. qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex);
  4529. qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock);
  4530. if (mon_ops->tx_mon_filter_dealloc)
  4531. mon_ops->tx_mon_filter_dealloc(pdev);
  4532. fail1:
  4533. dp_mon_filter_dealloc(mon_pdev);
  4534. fail0:
  4535. qdf_mem_free(mon_pdev->invalid_mon_peer);
  4536. return QDF_STATUS_E_FAILURE;
  4537. }
  4538. QDF_STATUS dp_mon_pdev_deinit(struct dp_pdev *pdev)
  4539. {
  4540. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4541. struct dp_mon_ops *mon_ops = NULL;
  4542. mon_ops = dp_mon_ops_get(pdev->soc);
  4543. if (!mon_ops) {
  4544. dp_mon_err("Monitor ops is NULL");
  4545. return QDF_STATUS_E_FAILURE;
  4546. }
  4547. if (!mon_pdev->is_dp_mon_pdev_initialized)
  4548. return QDF_STATUS_SUCCESS;
  4549. dp_mon_filters_reset(pdev);
  4550. /* mon pdev extended deinit */
  4551. if (mon_ops->mon_pdev_ext_deinit)
  4552. mon_ops->mon_pdev_ext_deinit(pdev);
  4553. /* detach monitor function */
  4554. dp_monitor_tx_ppdu_stats_detach(pdev);
  4555. if (mon_ops->rx_mon_buffers_free)
  4556. mon_ops->rx_mon_buffers_free(pdev);
  4557. if (mon_ops->rx_mon_desc_pool_deinit)
  4558. mon_ops->rx_mon_desc_pool_deinit(pdev);
  4559. if (mon_ops->mon_rings_deinit)
  4560. mon_ops->mon_rings_deinit(pdev);
  4561. dp_cal_client_detach(&mon_pdev->cal_client_ctx);
  4562. if (mon_ops->mon_lite_mon_dealloc)
  4563. mon_ops->mon_lite_mon_dealloc(pdev);
  4564. dp_htt_ppdu_stats_detach(pdev);
  4565. qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock);
  4566. dp_neighbour_peers_detach(pdev);
  4567. dp_pktlogmod_exit(pdev);
  4568. if (mon_ops->tx_mon_filter_dealloc)
  4569. mon_ops->tx_mon_filter_dealloc(pdev);
  4570. if (mon_pdev->filter)
  4571. dp_mon_filter_dealloc(mon_pdev);
  4572. if (mon_ops->mon_rings_deinit)
  4573. mon_ops->mon_rings_deinit(pdev);
  4574. if (mon_pdev->invalid_mon_peer)
  4575. qdf_mem_free(mon_pdev->invalid_mon_peer);
  4576. mon_pdev->is_dp_mon_pdev_initialized = false;
  4577. return QDF_STATUS_SUCCESS;
  4578. }
  4579. QDF_STATUS dp_mon_vdev_attach(struct dp_vdev *vdev)
  4580. {
  4581. struct dp_mon_vdev *mon_vdev;
  4582. struct dp_pdev *pdev = vdev->pdev;
  4583. mon_vdev = (struct dp_mon_vdev *)qdf_mem_malloc(sizeof(*mon_vdev));
  4584. if (!mon_vdev) {
  4585. dp_mon_err("%pK: Monitor vdev allocation failed", vdev);
  4586. return QDF_STATUS_E_NOMEM;
  4587. }
  4588. if (pdev && pdev->monitor_pdev &&
  4589. pdev->monitor_pdev->scan_spcl_vap_configured)
  4590. dp_scan_spcl_vap_stats_attach(mon_vdev);
  4591. vdev->monitor_vdev = mon_vdev;
  4592. return QDF_STATUS_SUCCESS;
  4593. }
  4594. QDF_STATUS dp_mon_vdev_detach(struct dp_vdev *vdev)
  4595. {
  4596. struct dp_mon_vdev *mon_vdev = vdev->monitor_vdev;
  4597. struct dp_pdev *pdev = vdev->pdev;
  4598. struct dp_mon_ops *mon_ops = dp_mon_ops_get(pdev->soc);
  4599. if (!mon_ops)
  4600. return QDF_STATUS_E_FAILURE;
  4601. if (!mon_vdev)
  4602. return QDF_STATUS_E_FAILURE;
  4603. if (pdev->monitor_pdev->scan_spcl_vap_configured)
  4604. dp_scan_spcl_vap_stats_detach(mon_vdev);
  4605. qdf_mem_free(mon_vdev);
  4606. vdev->monitor_vdev = NULL;
  4607. /* set mvdev to NULL only if detach is called for monitor/special vap
  4608. */
  4609. if (pdev->monitor_pdev->mvdev == vdev)
  4610. pdev->monitor_pdev->mvdev = NULL;
  4611. if (mon_ops->mon_lite_mon_vdev_delete)
  4612. mon_ops->mon_lite_mon_vdev_delete(pdev, vdev);
  4613. return QDF_STATUS_SUCCESS;
  4614. }
  4615. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  4616. /**
  4617. * dp_mon_peer_attach_notify() - Raise WDI event for peer create
  4618. * @peer: DP Peer handle
  4619. *
  4620. * Return: none
  4621. */
  4622. static inline
  4623. void dp_mon_peer_attach_notify(struct dp_peer *peer)
  4624. {
  4625. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  4626. struct dp_pdev *pdev;
  4627. struct dp_soc *soc;
  4628. struct cdp_peer_cookie peer_cookie;
  4629. pdev = peer->vdev->pdev;
  4630. soc = pdev->soc;
  4631. qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
  4632. QDF_MAC_ADDR_SIZE);
  4633. peer_cookie.ctx = NULL;
  4634. peer_cookie.pdev_id = pdev->pdev_id;
  4635. peer_cookie.cookie = pdev->next_peer_cookie++;
  4636. dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, soc,
  4637. (void *)&peer_cookie,
  4638. peer->peer_id, WDI_NO_VAL, pdev->pdev_id);
  4639. if (soc->peerstats_enabled) {
  4640. if (!peer_cookie.ctx) {
  4641. pdev->next_peer_cookie--;
  4642. qdf_err("Failed to initialize peer rate stats");
  4643. mon_peer->peerstats_ctx = NULL;
  4644. } else {
  4645. mon_peer->peerstats_ctx =
  4646. (struct cdp_peer_rate_stats_ctx *)
  4647. peer_cookie.ctx;
  4648. }
  4649. }
  4650. }
  4651. /**
  4652. * dp_mon_peer_detach_notify() - Raise WDI event for peer destroy
  4653. * @peer: DP Peer handle
  4654. *
  4655. * Return: none
  4656. */
  4657. static inline
  4658. void dp_mon_peer_detach_notify(struct dp_peer *peer)
  4659. {
  4660. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  4661. struct dp_pdev *pdev;
  4662. struct dp_soc *soc;
  4663. struct cdp_peer_cookie peer_cookie;
  4664. pdev = peer->vdev->pdev;
  4665. soc = pdev->soc;
  4666. /* send peer destroy event to upper layer */
  4667. qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
  4668. QDF_MAC_ADDR_SIZE);
  4669. peer_cookie.ctx = NULL;
  4670. peer_cookie.ctx = (struct cdp_stats_cookie *)mon_peer->peerstats_ctx;
  4671. dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
  4672. soc,
  4673. (void *)&peer_cookie,
  4674. peer->peer_id,
  4675. WDI_NO_VAL,
  4676. pdev->pdev_id);
  4677. mon_peer->peerstats_ctx = NULL;
  4678. }
  4679. #else
  4680. static inline
  4681. void dp_mon_peer_attach_notify(struct dp_peer *peer)
  4682. {
  4683. peer->monitor_peer->peerstats_ctx = NULL;
  4684. }
  4685. static inline
  4686. void dp_mon_peer_detach_notify(struct dp_peer *peer)
  4687. {
  4688. peer->monitor_peer->peerstats_ctx = NULL;
  4689. }
  4690. #endif
  4691. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
  4692. QDF_STATUS dp_mon_peer_attach(struct dp_peer *peer)
  4693. {
  4694. struct dp_mon_peer *mon_peer;
  4695. struct dp_pdev *pdev;
  4696. mon_peer = (struct dp_mon_peer *)qdf_mem_malloc(sizeof(*mon_peer));
  4697. if (!mon_peer) {
  4698. dp_mon_err("%pK: MONITOR peer allocation failed", peer);
  4699. return QDF_STATUS_E_NOMEM;
  4700. }
  4701. peer->monitor_peer = mon_peer;
  4702. pdev = peer->vdev->pdev;
  4703. /*
  4704. * In tx_monitor mode, filter may be set for unassociated peer
  4705. * when unassociated peer get associated peer need to
  4706. * update tx_cap_enabled flag to support peer filter.
  4707. */
  4708. dp_monitor_peer_tx_capture_filter_check(pdev, peer);
  4709. DP_STATS_INIT(mon_peer);
  4710. DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR);
  4711. dp_mon_peer_attach_notify(peer);
  4712. return QDF_STATUS_SUCCESS;
  4713. }
  4714. #endif
  4715. QDF_STATUS dp_mon_peer_detach(struct dp_peer *peer)
  4716. {
  4717. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  4718. if (!mon_peer)
  4719. return QDF_STATUS_SUCCESS;
  4720. dp_mon_peer_detach_notify(peer);
  4721. qdf_mem_free(mon_peer);
  4722. peer->monitor_peer = NULL;
  4723. return QDF_STATUS_SUCCESS;
  4724. }
  4725. #ifndef DISABLE_MON_CONFIG
  4726. void dp_mon_register_intr_ops(struct dp_soc *soc)
  4727. {
  4728. struct dp_mon_ops *mon_ops = NULL;
  4729. mon_ops = dp_mon_ops_get(soc);
  4730. if (!mon_ops) {
  4731. dp_mon_err("Monitor ops is NULL");
  4732. return;
  4733. }
  4734. if (mon_ops->mon_register_intr_ops)
  4735. mon_ops->mon_register_intr_ops(soc);
  4736. }
  4737. #endif
  4738. struct cdp_peer_rate_stats_ctx *dp_mon_peer_get_peerstats_ctx(struct
  4739. dp_peer *peer)
  4740. {
  4741. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  4742. if (mon_peer)
  4743. return mon_peer->peerstats_ctx;
  4744. else
  4745. return NULL;
  4746. }
  4747. #ifdef QCA_ENHANCED_STATS_SUPPORT
  4748. void dp_mon_peer_reset_stats(struct dp_peer *peer)
  4749. {
  4750. struct dp_mon_peer *mon_peer = NULL;
  4751. mon_peer = peer->monitor_peer;
  4752. if (!mon_peer)
  4753. return;
  4754. DP_STATS_CLR(mon_peer);
  4755. DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR);
  4756. }
  4757. void dp_mon_peer_get_stats(struct dp_peer *peer, void *arg,
  4758. enum cdp_stat_update_type type)
  4759. {
  4760. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  4761. struct dp_mon_peer_stats *mon_peer_stats;
  4762. if (!mon_peer || !arg)
  4763. return;
  4764. mon_peer_stats = &mon_peer->stats;
  4765. switch (type) {
  4766. case UPDATE_PEER_STATS:
  4767. {
  4768. struct cdp_peer_stats *peer_stats =
  4769. (struct cdp_peer_stats *)arg;
  4770. DP_UPDATE_MON_STATS(peer_stats, mon_peer_stats);
  4771. break;
  4772. }
  4773. case UPDATE_VDEV_STATS:
  4774. {
  4775. struct cdp_vdev_stats *vdev_stats =
  4776. (struct cdp_vdev_stats *)arg;
  4777. DP_UPDATE_MON_STATS(vdev_stats, mon_peer_stats);
  4778. break;
  4779. }
  4780. default:
  4781. dp_mon_err("Invalid stats_update_type");
  4782. }
  4783. }
  4784. void dp_mon_invalid_peer_update_pdev_stats(struct dp_pdev *pdev)
  4785. {
  4786. struct dp_mon_peer *mon_peer;
  4787. struct dp_mon_peer_stats *mon_peer_stats;
  4788. struct cdp_pdev_stats *pdev_stats;
  4789. if (!pdev || !pdev->monitor_pdev)
  4790. return;
  4791. mon_peer = pdev->monitor_pdev->invalid_mon_peer;
  4792. if (!mon_peer)
  4793. return;
  4794. mon_peer_stats = &mon_peer->stats;
  4795. pdev_stats = &pdev->stats;
  4796. DP_UPDATE_MON_STATS(pdev_stats, mon_peer_stats);
  4797. }
  4798. QDF_STATUS
  4799. dp_mon_peer_get_stats_param(struct dp_peer *peer, enum cdp_peer_stats_type type,
  4800. cdp_peer_stats_param_t *buf)
  4801. {
  4802. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  4803. struct dp_mon_peer *mon_peer;
  4804. mon_peer = peer->monitor_peer;
  4805. if (!mon_peer)
  4806. return QDF_STATUS_E_FAILURE;
  4807. switch (type) {
  4808. case cdp_peer_tx_rate:
  4809. buf->tx_rate = mon_peer->stats.tx.tx_rate;
  4810. break;
  4811. case cdp_peer_tx_last_tx_rate:
  4812. buf->last_tx_rate = mon_peer->stats.tx.last_tx_rate;
  4813. break;
  4814. case cdp_peer_tx_ratecode:
  4815. buf->tx_ratecode = mon_peer->stats.tx.tx_ratecode;
  4816. break;
  4817. case cdp_peer_rx_rate:
  4818. buf->rx_rate = mon_peer->stats.rx.rx_rate;
  4819. break;
  4820. case cdp_peer_rx_last_rx_rate:
  4821. buf->last_rx_rate = mon_peer->stats.rx.last_rx_rate;
  4822. break;
  4823. case cdp_peer_rx_ratecode:
  4824. buf->rx_ratecode = mon_peer->stats.rx.rx_ratecode;
  4825. break;
  4826. case cdp_peer_rx_avg_snr:
  4827. buf->rx_avg_snr = mon_peer->stats.rx.avg_snr;
  4828. break;
  4829. case cdp_peer_rx_snr:
  4830. buf->rx_snr = mon_peer->stats.rx.snr;
  4831. break;
  4832. default:
  4833. dp_err("Invalid stats type requested");
  4834. ret = QDF_STATUS_E_FAILURE;
  4835. }
  4836. return ret;
  4837. }
  4838. #endif
  4839. void dp_mon_ops_register(struct dp_soc *soc)
  4840. {
  4841. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4842. uint32_t target_type;
  4843. target_type = hal_get_target_type(soc->hal_soc);
  4844. switch (target_type) {
  4845. case TARGET_TYPE_QCA6290:
  4846. case TARGET_TYPE_QCA6390:
  4847. case TARGET_TYPE_QCA6490:
  4848. case TARGET_TYPE_QCA6750:
  4849. case TARGET_TYPE_KIWI:
  4850. case TARGET_TYPE_MANGO:
  4851. case TARGET_TYPE_QCA8074:
  4852. case TARGET_TYPE_QCA8074V2:
  4853. case TARGET_TYPE_QCA6018:
  4854. case TARGET_TYPE_QCA9574:
  4855. case TARGET_TYPE_QCN9000:
  4856. case TARGET_TYPE_QCA5018:
  4857. case TARGET_TYPE_QCN6122:
  4858. dp_mon_ops_register_1_0(mon_soc);
  4859. break;
  4860. case TARGET_TYPE_QCN9224:
  4861. #ifdef QCA_MONITOR_2_0_SUPPORT
  4862. dp_mon_ops_register_2_0(mon_soc);
  4863. #endif
  4864. break;
  4865. default:
  4866. dp_mon_err("%s: Unknown tgt type %d", __func__, target_type);
  4867. qdf_assert_always(0);
  4868. break;
  4869. }
  4870. }
  4871. #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
  4872. void dp_mon_ops_free(struct dp_soc *soc)
  4873. {
  4874. struct cdp_ops *ops = soc->cdp_soc.ops;
  4875. struct cdp_mon_ops *cdp_mon_ops = ops->mon_ops;
  4876. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4877. struct dp_mon_ops *mon_ops = mon_soc->mon_ops;
  4878. if (cdp_mon_ops)
  4879. qdf_mem_free(cdp_mon_ops);
  4880. if (mon_ops)
  4881. qdf_mem_free(mon_ops);
  4882. }
  4883. #else
  4884. void dp_mon_ops_free(struct dp_soc *soc)
  4885. {
  4886. }
  4887. #endif
  4888. void dp_mon_cdp_ops_register(struct dp_soc *soc)
  4889. {
  4890. struct cdp_ops *ops = soc->cdp_soc.ops;
  4891. uint32_t target_type;
  4892. if (!ops) {
  4893. dp_mon_err("cdp_ops is NULL");
  4894. return;
  4895. }
  4896. target_type = hal_get_target_type(soc->hal_soc);
  4897. switch (target_type) {
  4898. case TARGET_TYPE_QCA6290:
  4899. case TARGET_TYPE_QCA6390:
  4900. case TARGET_TYPE_QCA6490:
  4901. case TARGET_TYPE_QCA6750:
  4902. case TARGET_TYPE_KIWI:
  4903. case TARGET_TYPE_MANGO:
  4904. case TARGET_TYPE_QCA8074:
  4905. case TARGET_TYPE_QCA8074V2:
  4906. case TARGET_TYPE_QCA6018:
  4907. case TARGET_TYPE_QCA9574:
  4908. case TARGET_TYPE_QCN9000:
  4909. case TARGET_TYPE_QCA5018:
  4910. case TARGET_TYPE_QCN6122:
  4911. dp_mon_cdp_ops_register_1_0(ops);
  4912. #ifdef ATH_SUPPORT_NAC_RSSI
  4913. ops->ctrl_ops->txrx_vdev_config_for_nac_rssi =
  4914. dp_config_for_nac_rssi;
  4915. ops->ctrl_ops->txrx_vdev_get_neighbour_rssi =
  4916. dp_vdev_get_neighbour_rssi;
  4917. #endif
  4918. #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
  4919. ops->ctrl_ops->txrx_update_filter_neighbour_peers =
  4920. dp_update_filter_neighbour_peers;
  4921. #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
  4922. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  4923. dp_cfr_filter_register_1_0(ops);
  4924. #endif
  4925. break;
  4926. case TARGET_TYPE_QCN9224:
  4927. #ifdef QCA_MONITOR_2_0_SUPPORT
  4928. dp_mon_cdp_ops_register_2_0(ops);
  4929. #ifdef ATH_SUPPORT_NAC_RSSI
  4930. ops->ctrl_ops->txrx_vdev_config_for_nac_rssi =
  4931. dp_lite_mon_config_nac_rssi_peer;
  4932. ops->ctrl_ops->txrx_vdev_get_neighbour_rssi =
  4933. dp_lite_mon_get_nac_peer_rssi;
  4934. #endif
  4935. #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
  4936. ops->ctrl_ops->txrx_update_filter_neighbour_peers =
  4937. dp_lite_mon_config_nac_peer;
  4938. #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
  4939. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  4940. dp_cfr_filter_register_2_0(ops);
  4941. #endif
  4942. #endif /* QCA_MONITOR_2_0_SUPPORT */
  4943. break;
  4944. default:
  4945. dp_mon_err("%s: Unknown tgt type %d", __func__, target_type);
  4946. qdf_assert_always(0);
  4947. break;
  4948. }
  4949. ops->cmn_drv_ops->txrx_set_monitor_mode = dp_vdev_set_monitor_mode;
  4950. ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev =
  4951. dp_get_mon_vdev_from_pdev_wifi3;
  4952. #ifdef DP_PEER_EXTENDED_API
  4953. ops->misc_ops->pkt_log_init = dp_pkt_log_init;
  4954. ops->misc_ops->pkt_log_con_service = dp_pkt_log_con_service;
  4955. ops->misc_ops->pkt_log_exit = dp_pkt_log_exit;
  4956. #endif
  4957. ops->ctrl_ops->enable_peer_based_pktlog =
  4958. dp_enable_peer_based_pktlog;
  4959. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  4960. ops->ctrl_ops->txrx_update_peer_pkt_capture_params =
  4961. dp_peer_update_pkt_capture_params;
  4962. #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
  4963. #ifdef QCA_ENHANCED_STATS_SUPPORT
  4964. ops->host_stats_ops->txrx_enable_enhanced_stats =
  4965. dp_enable_enhanced_stats;
  4966. ops->host_stats_ops->txrx_disable_enhanced_stats =
  4967. dp_disable_enhanced_stats;
  4968. #endif /* QCA_ENHANCED_STATS_SUPPORT */
  4969. #ifdef WDI_EVENT_ENABLE
  4970. ops->ctrl_ops->txrx_get_pldev = dp_get_pldev;
  4971. #endif
  4972. #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
  4973. ops->host_stats_ops->txrx_get_scan_spcl_vap_stats =
  4974. dp_get_scan_spcl_vap_stats;
  4975. #endif
  4976. return;
  4977. }
  4978. #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
  4979. static inline void
  4980. dp_mon_cdp_mon_ops_deregister(struct cdp_ops *ops)
  4981. {
  4982. if (ops->mon_ops) {
  4983. qdf_mem_free(ops->mon_ops);
  4984. ops->mon_ops = NULL;
  4985. }
  4986. }
  4987. #else
  4988. static inline void
  4989. dp_mon_cdp_mon_ops_deregister(struct cdp_ops *ops)
  4990. {
  4991. ops->mon_ops = NULL;
  4992. }
  4993. #endif
  4994. void dp_mon_cdp_ops_deregister(struct dp_soc *soc)
  4995. {
  4996. struct cdp_ops *ops = soc->cdp_soc.ops;
  4997. if (!ops) {
  4998. dp_mon_err("cdp_ops is NULL");
  4999. return;
  5000. }
  5001. dp_mon_cdp_mon_ops_deregister(ops);
  5002. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  5003. ops->cfr_ops->txrx_cfr_filter = NULL;
  5004. #endif
  5005. ops->cmn_drv_ops->txrx_set_monitor_mode = NULL;
  5006. ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev = NULL;
  5007. #ifdef DP_PEER_EXTENDED_API
  5008. ops->misc_ops->pkt_log_init = NULL;
  5009. ops->misc_ops->pkt_log_con_service = NULL;
  5010. ops->misc_ops->pkt_log_exit = NULL;
  5011. #endif
  5012. #ifdef ATH_SUPPORT_NAC_RSSI
  5013. ops->ctrl_ops->txrx_vdev_config_for_nac_rssi = NULL;
  5014. ops->ctrl_ops->txrx_vdev_get_neighbour_rssi = NULL;
  5015. #endif
  5016. #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
  5017. ops->ctrl_ops->txrx_update_filter_neighbour_peers = NULL;
  5018. #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
  5019. ops->ctrl_ops->enable_peer_based_pktlog = NULL;
  5020. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  5021. ops->ctrl_ops->txrx_update_peer_pkt_capture_params = NULL;
  5022. #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
  5023. #ifdef FEATURE_PERPKT_INFO
  5024. ops->host_stats_ops->txrx_enable_enhanced_stats = NULL;
  5025. ops->host_stats_ops->txrx_disable_enhanced_stats = NULL;
  5026. #endif /* FEATURE_PERPKT_INFO */
  5027. #ifdef WDI_EVENT_ENABLE
  5028. ops->ctrl_ops->txrx_get_pldev = NULL;
  5029. #endif
  5030. return;
  5031. }
  5032. #if defined(WDI_EVENT_ENABLE) &&\
  5033. (defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
  5034. static inline
  5035. void dp_mon_ppdu_stats_handler_deregister(struct dp_mon_soc *mon_soc)
  5036. {
  5037. mon_soc->mon_ops->mon_ppdu_stats_ind_handler = NULL;
  5038. }
  5039. #else
  5040. static inline
  5041. void dp_mon_ppdu_stats_handler_deregister(struct dp_mon_soc *mon_soc)
  5042. {
  5043. }
  5044. #endif
  5045. #ifdef QCA_RSSI_DB2DBM
  5046. /*
  5047. * dp_mon_compute_min_nf() - calculate the min nf value in the
  5048. * active chains 20MHZ subbands.
  5049. * computation: Need to calculate nfInDbm[][] to A_MIN(nfHwDbm[][])
  5050. * considering row index as active chains and column
  5051. * index as 20MHZ subbands per chain.
  5052. * example: chain_mask = 0x07 (consider 3 active chains 0,1,2 index)
  5053. * BandWidth = 40MHZ (40MHZ includes two 20MHZ subbands so need to
  5054. * consider 0,1 index calculate min_nf value)
  5055. *
  5056. *@conv_params: cdp_rssi_dbm_conv_param_dp structure value
  5057. *@chain_idx: active chain index in nfHwdbm array
  5058. *
  5059. * Return: QDF_STATUS_SUCCESS if value set successfully
  5060. * QDF_STATUS_E_INVAL false if error
  5061. */
  5062. static QDF_STATUS
  5063. dp_mon_compute_min_nf(struct cdp_rssi_dbm_conv_param_dp *conv_params,
  5064. int8_t *min_nf, int chain_idx)
  5065. {
  5066. int j;
  5067. *min_nf = conv_params->nf_hw_dbm[chain_idx][0];
  5068. switch (conv_params->curr_bw) {
  5069. case CHAN_WIDTH_20:
  5070. case CHAN_WIDTH_5:
  5071. case CHAN_WIDTH_10:
  5072. break;
  5073. case CHAN_WIDTH_40:
  5074. for (j = 1; j < SUB40BW; j++) {
  5075. if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
  5076. *min_nf = conv_params->nf_hw_dbm[chain_idx][j];
  5077. }
  5078. break;
  5079. case CHAN_WIDTH_80:
  5080. for (j = 1; j < SUB80BW; j++) {
  5081. if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
  5082. *min_nf = conv_params->nf_hw_dbm[chain_idx][j];
  5083. }
  5084. break;
  5085. case CHAN_WIDTH_160:
  5086. case CHAN_WIDTH_80P80:
  5087. case CHAN_WIDTH_165:
  5088. for (j = 1; j < SUB160BW; j++) {
  5089. if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
  5090. *min_nf = conv_params->nf_hw_dbm[chain_idx][j];
  5091. }
  5092. break;
  5093. case CHAN_WIDTH_160P160:
  5094. case CHAN_WIDTH_320:
  5095. for (j = 1; j < SUB320BW; j++) {
  5096. if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
  5097. *min_nf = conv_params->nf_hw_dbm[chain_idx][j];
  5098. }
  5099. break;
  5100. default:
  5101. dp_cdp_err("Invalid bandwidth %u", conv_params->curr_bw);
  5102. return QDF_STATUS_E_INVAL;
  5103. }
  5104. return QDF_STATUS_SUCCESS;
  5105. }
  5106. /*
  5107. * dp_mon_pdev_params_rssi_dbm_conv() --> to set rssi in dbm converstion
  5108. * params into monitor pdev.
  5109. *@cdp_soc: dp soc handle.
  5110. *@params: cdp_rssi_db2dbm_param_dp structure value.
  5111. *
  5112. * Return: QDF_STATUS_SUCCESS if value set successfully
  5113. * QDF_STATUS_E_INVAL false if error
  5114. */
  5115. QDF_STATUS
  5116. dp_mon_pdev_params_rssi_dbm_conv(struct cdp_soc_t *cdp_soc,
  5117. struct cdp_rssi_db2dbm_param_dp *params)
  5118. {
  5119. struct cdp_rssi_db2dbm_param_dp *dp_rssi_params = params;
  5120. uint8_t pdev_id = params->pdev_id;
  5121. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  5122. struct dp_pdev *pdev =
  5123. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  5124. struct dp_mon_pdev *mon_pdev;
  5125. struct cdp_rssi_temp_off_param_dp temp_off_param;
  5126. struct cdp_rssi_dbm_conv_param_dp conv_params;
  5127. int8_t min_nf = 0;
  5128. int i;
  5129. if (!soc->features.rssi_dbm_conv_support) {
  5130. dp_cdp_err("rssi dbm converstion support is false");
  5131. return QDF_STATUS_E_INVAL;
  5132. }
  5133. if (!pdev || !pdev->monitor_pdev) {
  5134. dp_cdp_err("Invalid pdev_id %u", pdev_id);
  5135. return QDF_STATUS_E_FAILURE;
  5136. }
  5137. mon_pdev = pdev->monitor_pdev;
  5138. if (dp_rssi_params->rssi_temp_off_present) {
  5139. temp_off_param = dp_rssi_params->temp_off_param;
  5140. mon_pdev->ppdu_info.rx_status.rssi_temp_offset =
  5141. temp_off_param.rssi_temp_offset;
  5142. }
  5143. if (dp_rssi_params->rssi_dbm_info_present) {
  5144. conv_params = dp_rssi_params->rssi_dbm_param;
  5145. for (i = 0; i < CDP_MAX_NUM_ANTENNA; i++) {
  5146. if (conv_params.curr_rx_chainmask & (0x01 << i)) {
  5147. if (QDF_STATUS_E_INVAL == dp_mon_compute_min_nf
  5148. (&conv_params, &min_nf, i))
  5149. return QDF_STATUS_E_INVAL;
  5150. } else {
  5151. continue;
  5152. }
  5153. }
  5154. mon_pdev->ppdu_info.rx_status.xlna_bypass_offset =
  5155. conv_params.xlna_bypass_offset;
  5156. mon_pdev->ppdu_info.rx_status.xlna_bypass_threshold =
  5157. conv_params.xlna_bypass_threshold;
  5158. mon_pdev->ppdu_info.rx_status.xbar_config =
  5159. conv_params.xbar_config;
  5160. mon_pdev->ppdu_info.rx_status.min_nf_dbm = min_nf;
  5161. mon_pdev->ppdu_info.rx_status.rssi_dbm_conv_support =
  5162. soc->features.rssi_dbm_conv_support;
  5163. }
  5164. return QDF_STATUS_SUCCESS;
  5165. }
  5166. #endif
  5167. void dp_mon_intr_ops_deregister(struct dp_soc *soc)
  5168. {
  5169. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  5170. mon_soc->mon_rx_process = NULL;
  5171. dp_mon_ppdu_stats_handler_deregister(mon_soc);
  5172. }
  5173. void dp_mon_feature_ops_deregister(struct dp_soc *soc)
  5174. {
  5175. struct dp_mon_ops *mon_ops = dp_mon_ops_get(soc);
  5176. if (!mon_ops) {
  5177. dp_err("mon_ops is NULL");
  5178. return;
  5179. }
  5180. mon_ops->mon_config_debug_sniffer = NULL;
  5181. mon_ops->mon_peer_tx_init = NULL;
  5182. mon_ops->mon_peer_tx_cleanup = NULL;
  5183. mon_ops->mon_htt_ppdu_stats_attach = NULL;
  5184. mon_ops->mon_htt_ppdu_stats_detach = NULL;
  5185. mon_ops->mon_print_pdev_rx_mon_stats = NULL;
  5186. mon_ops->mon_set_bsscolor = NULL;
  5187. mon_ops->mon_pdev_get_filter_ucast_data = NULL;
  5188. mon_ops->mon_pdev_get_filter_mcast_data = NULL;
  5189. mon_ops->mon_pdev_get_filter_non_data = NULL;
  5190. mon_ops->mon_neighbour_peer_add_ast = NULL;
  5191. #ifdef WLAN_TX_PKT_CAPTURE_ENH
  5192. mon_ops->mon_peer_tid_peer_id_update = NULL;
  5193. mon_ops->mon_tx_ppdu_stats_attach = NULL;
  5194. mon_ops->mon_tx_ppdu_stats_detach = NULL;
  5195. mon_ops->mon_tx_capture_debugfs_init = NULL;
  5196. mon_ops->mon_tx_add_to_comp_queue = NULL;
  5197. mon_ops->mon_peer_tx_capture_filter_check = NULL;
  5198. mon_ops->mon_print_pdev_tx_capture_stats = NULL;
  5199. mon_ops->mon_config_enh_tx_capture = NULL;
  5200. #endif
  5201. #ifdef WLAN_RX_PKT_CAPTURE_ENH
  5202. mon_ops->mon_config_enh_rx_capture = NULL;
  5203. #endif
  5204. #ifdef QCA_SUPPORT_BPR
  5205. mon_ops->mon_set_bpr_enable = NULL;
  5206. #endif
  5207. #ifdef ATH_SUPPORT_NAC
  5208. mon_ops->mon_set_filter_neigh_peers = NULL;
  5209. #endif
  5210. #ifdef WLAN_ATF_ENABLE
  5211. mon_ops->mon_set_atf_stats_enable = NULL;
  5212. #endif
  5213. #ifdef FEATURE_NAC_RSSI
  5214. mon_ops->mon_filter_neighbour_peer = NULL;
  5215. #endif
  5216. #ifdef QCA_MCOPY_SUPPORT
  5217. mon_ops->mon_filter_setup_mcopy_mode = NULL;
  5218. mon_ops->mon_filter_reset_mcopy_mode = NULL;
  5219. mon_ops->mon_mcopy_check_deliver = NULL;
  5220. #endif
  5221. #ifdef QCA_ENHANCED_STATS_SUPPORT
  5222. mon_ops->mon_filter_setup_enhanced_stats = NULL;
  5223. mon_ops->mon_tx_enable_enhanced_stats = NULL;
  5224. mon_ops->mon_tx_disable_enhanced_stats = NULL;
  5225. mon_ops->mon_ppdu_desc_deliver = NULL;
  5226. mon_ops->mon_ppdu_desc_notify = NULL;
  5227. mon_ops->mon_ppdu_stats_feat_enable_check = NULL;
  5228. #ifdef WLAN_FEATURE_11BE
  5229. mon_ops->mon_tx_stats_update = NULL;
  5230. #endif
  5231. #endif
  5232. #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
  5233. mon_ops->mon_filter_setup_smart_monitor = NULL;
  5234. #endif
  5235. #ifdef WLAN_RX_PKT_CAPTURE_ENH
  5236. mon_ops->mon_filter_setup_rx_enh_capture = NULL;
  5237. #endif
  5238. #ifdef WDI_EVENT_ENABLE
  5239. mon_ops->mon_set_pktlog_wifi3 = NULL;
  5240. mon_ops->mon_filter_setup_rx_pkt_log_full = NULL;
  5241. mon_ops->mon_filter_reset_rx_pkt_log_full = NULL;
  5242. mon_ops->mon_filter_setup_rx_pkt_log_lite = NULL;
  5243. mon_ops->mon_filter_reset_rx_pkt_log_lite = NULL;
  5244. mon_ops->mon_filter_setup_rx_pkt_log_cbf = NULL;
  5245. mon_ops->mon_filter_reset_rx_pkt_log_cbf = NULL;
  5246. #ifdef BE_PKTLOG_SUPPORT
  5247. mon_ops->mon_filter_setup_pktlog_hybrid = NULL;
  5248. mon_ops->mon_filter_reset_pktlog_hybrid = NULL;
  5249. #endif
  5250. #endif
  5251. #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
  5252. mon_ops->mon_pktlogmod_exit = NULL;
  5253. #endif
  5254. mon_ops->rx_hdr_length_set = NULL;
  5255. mon_ops->rx_packet_length_set = NULL;
  5256. mon_ops->rx_wmask_subscribe = NULL;
  5257. mon_ops->rx_enable_mpdu_logging = NULL;
  5258. mon_ops->mon_neighbour_peers_detach = NULL;
  5259. mon_ops->mon_vdev_set_monitor_mode_buf_rings = NULL;
  5260. mon_ops->mon_vdev_set_monitor_mode_rings = NULL;
  5261. #ifdef QCA_ENHANCED_STATS_SUPPORT
  5262. mon_ops->mon_rx_stats_update = NULL;
  5263. mon_ops->mon_rx_populate_ppdu_usr_info = NULL;
  5264. mon_ops->mon_rx_populate_ppdu_info = NULL;
  5265. #endif
  5266. }
  5267. QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc)
  5268. {
  5269. struct dp_mon_soc *mon_soc;
  5270. if (!soc) {
  5271. dp_mon_err("dp_soc is NULL");
  5272. return QDF_STATUS_E_FAILURE;
  5273. }
  5274. mon_soc = (struct dp_mon_soc *)qdf_mem_malloc(sizeof(*mon_soc));
  5275. if (!mon_soc) {
  5276. dp_mon_err("%pK: mem allocation failed", soc);
  5277. return QDF_STATUS_E_NOMEM;
  5278. }
  5279. /* register monitor ops */
  5280. soc->monitor_soc = mon_soc;
  5281. dp_mon_ops_register(soc);
  5282. dp_mon_register_intr_ops(soc);
  5283. dp_mon_cdp_ops_register(soc);
  5284. dp_mon_register_feature_ops(soc);
  5285. return QDF_STATUS_SUCCESS;
  5286. }
  5287. QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc)
  5288. {
  5289. struct dp_mon_soc *mon_soc;
  5290. if (!soc) {
  5291. dp_mon_err("dp_soc is NULL");
  5292. return QDF_STATUS_E_FAILURE;
  5293. }
  5294. mon_soc = soc->monitor_soc;
  5295. dp_monitor_vdev_timer_deinit(soc);
  5296. dp_mon_cdp_ops_deregister(soc);
  5297. soc->monitor_soc = NULL;
  5298. qdf_mem_free(mon_soc);
  5299. return QDF_STATUS_SUCCESS;
  5300. }