dp_mon.c 167 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030
  1. /*
  2. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  3. * Permission to use, copy, modify, and/or distribute this software for any
  4. * purpose with or without fee is hereby granted, provided that the above
  5. * copyright notice and this permission notice appear in all copies.
  6. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  7. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  8. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  9. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  10. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  11. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  12. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  13. */
  14. #include <dp_types.h>
  15. #include "dp_rx.h"
  16. #include "dp_peer.h"
  17. #include <dp_htt.h>
  18. #include <dp_mon_filter.h>
  19. #include <dp_mon.h>
  20. #include <dp_rx_mon.h>
  21. #include "htt_ppdu_stats.h"
  22. #include "dp_cal_client_api.h"
  23. #if defined(DP_CON_MON)
  24. #ifndef REMOVE_PKT_LOG
  25. #include <pktlog_ac_api.h>
  26. #include <pktlog_ac.h>
  27. #endif
  28. #endif
  29. #ifdef FEATURE_PERPKT_INFO
  30. #include "dp_ratetable.h"
  31. #endif
  32. #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
  33. #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
  34. #define HTT_SHIFT_UPPER_TIMESTAMP 32
  35. #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
  36. #define DP_INTR_POLL_TIMER_MS 5
  37. #define INVALID_FREE_BUFF 0xffffffff
  38. #ifdef WLAN_RX_PKT_CAPTURE_ENH
  39. #include "dp_rx_mon_feature.h"
  40. #endif /* WLAN_RX_PKT_CAPTURE_ENH */
  41. #ifdef WLAN_TX_PKT_CAPTURE_ENH
  42. #include "dp_tx_capture.h"
  43. #endif
  44. #if defined(QCA_MONITOR_PKT_SUPPORT) || defined(QCA_MCOPY_SUPPORT)
  45. static QDF_STATUS dp_vdev_set_monitor_mode_rings(struct dp_pdev *pdev,
  46. uint8_t delayed_replenish);
  47. #endif
  48. #ifndef WLAN_TX_PKT_CAPTURE_ENH
  49. static inline void
  50. dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev,
  51. void *data,
  52. uint32_t ppdu_id,
  53. uint32_t size)
  54. {
  55. }
  56. #endif
  57. #if !defined(DISABLE_MON_CONFIG)
  58. #ifdef QCA_MONITOR_PKT_SUPPORT
  59. static void dp_mon_dest_rings_deinit(struct dp_pdev *pdev, int lmac_id)
  60. {
  61. struct dp_soc *soc = pdev->soc;
  62. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  63. dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[lmac_id],
  64. RXDMA_MONITOR_BUF, 0);
  65. dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id],
  66. RXDMA_MONITOR_DST, 0);
  67. dp_srng_deinit(soc, &soc->rxdma_mon_desc_ring[lmac_id],
  68. RXDMA_MONITOR_DESC, 0);
  69. }
  70. }
  71. #else
  72. static void dp_mon_dest_rings_deinit(struct dp_pdev *pdev, int lmac_id)
  73. {
  74. }
  75. #endif
  76. /**
  77. * dp_mon_rings_deinit() - Deinitialize monitor rings
  78. * @pdev: DP pdev handle
  79. *
  80. * Return: None
  81. *
  82. */
  83. static void dp_mon_rings_deinit(struct dp_pdev *pdev)
  84. {
  85. int mac_id = 0;
  86. struct dp_soc *soc = pdev->soc;
  87. struct dp_mon_soc *mon_soc;
  88. mon_soc = soc->monitor_soc;
  89. if(!mon_soc) {
  90. dp_mon_err("%pK: monitor SOC not initialized",
  91. soc);
  92. return;
  93. }
  94. if (mon_soc->monitor_mode_v2)
  95. return;
  96. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  97. int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
  98. pdev->pdev_id);
  99. dp_srng_deinit(soc, &soc->rxdma_mon_status_ring[lmac_id],
  100. RXDMA_MONITOR_STATUS, 0);
  101. dp_mon_dest_rings_deinit(pdev, lmac_id);
  102. }
  103. }
  104. #ifdef QCA_MONITOR_PKT_SUPPORT
  105. static void dp_mon_dest_rings_free(struct dp_pdev *pdev, int lmac_id)
  106. {
  107. struct dp_soc *soc = pdev->soc;
  108. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  109. dp_srng_free(soc, &soc->rxdma_mon_buf_ring[lmac_id]);
  110. dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]);
  111. dp_srng_free(soc, &soc->rxdma_mon_desc_ring[lmac_id]);
  112. }
  113. }
  114. #else
  115. static void dp_mon_dest_rings_free(struct dp_pdev *pdev, int lmac_id)
  116. {
  117. }
  118. #endif
  119. /**
  120. * dp_mon_rings_free() - free monitor rings
  121. * @pdev: Datapath pdev handle
  122. *
  123. * Return: None
  124. *
  125. */
  126. static void dp_mon_rings_free(struct dp_pdev *pdev)
  127. {
  128. int mac_id = 0;
  129. struct dp_soc *soc = pdev->soc;
  130. struct dp_mon_soc *mon_soc;
  131. mon_soc = soc->monitor_soc;
  132. if(!mon_soc) {
  133. dp_mon_err("%pK: monitor SOC not initialized",
  134. soc);
  135. return;
  136. }
  137. if (soc->monitor_soc->monitor_mode_v2)
  138. return;
  139. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  140. int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
  141. pdev->pdev_id);
  142. dp_srng_free(soc, &soc->rxdma_mon_status_ring[lmac_id]);
  143. dp_mon_dest_rings_free(pdev, lmac_id);
  144. }
  145. }
  146. #ifdef QCA_MONITOR_PKT_SUPPORT
  147. static
  148. QDF_STATUS dp_mon_dest_rings_init(struct dp_pdev *pdev, int lmac_id)
  149. {
  150. struct dp_soc *soc = pdev->soc;
  151. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  152. if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[lmac_id],
  153. RXDMA_MONITOR_BUF, 0, lmac_id)) {
  154. dp_mon_err("%pK: " RNG_ERR "rxdma_mon_buf_ring ",
  155. soc);
  156. goto fail1;
  157. }
  158. if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id],
  159. RXDMA_MONITOR_DST, 0, lmac_id)) {
  160. dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
  161. goto fail1;
  162. }
  163. if (dp_srng_init(soc, &soc->rxdma_mon_desc_ring[lmac_id],
  164. RXDMA_MONITOR_DESC, 0, lmac_id)) {
  165. dp_mon_err("%pK: " RNG_ERR "rxdma_mon_desc_ring",
  166. soc);
  167. goto fail1;
  168. }
  169. }
  170. return QDF_STATUS_SUCCESS;
  171. fail1:
  172. return QDF_STATUS_E_NOMEM;
  173. }
  174. #else
  175. static
  176. QDF_STATUS dp_mon_dest_rings_init(struct dp_pdev *pdev, int lmac_id)
  177. {
  178. return QDF_STATUS_SUCCESS;
  179. }
  180. #endif
  181. /**
  182. * dp_mon_rings_init() - Initialize monitor srng rings
  183. * @pdev: Datapath pdev handle
  184. *
  185. * return: QDF_STATUS_SUCCESS on success
  186. * QDF_STATUS_E_NOMEM on failure
  187. */
  188. static
  189. QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev)
  190. {
  191. int mac_id = 0;
  192. struct dp_mon_soc *mon_soc;
  193. mon_soc = soc->monitor_soc;
  194. if(!mon_soc) {
  195. dp_mon_err("%pK: monitor SOC not initialized",
  196. soc);
  197. return QDF_STATUS_SUCCESS;
  198. }
  199. if (soc->monitor_soc->monitor_mode_v2)
  200. return QDF_STATUS_SUCCESS;
  201. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  202. int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
  203. pdev->pdev_id);
  204. if (dp_srng_init(soc, &soc->rxdma_mon_status_ring[lmac_id],
  205. RXDMA_MONITOR_STATUS, 0, lmac_id)) {
  206. dp_mon_err("%pK: " RNG_ERR "rxdma_mon_status_ring",
  207. soc);
  208. goto fail1;
  209. }
  210. if (dp_mon_dest_rings_init(pdev, lmac_id))
  211. goto fail1;
  212. }
  213. return QDF_STATUS_SUCCESS;
  214. fail1:
  215. dp_mon_rings_deinit(pdev);
  216. return QDF_STATUS_E_NOMEM;
  217. }
  218. #ifdef QCA_MONITOR_PKT_SUPPORT
  219. static
  220. QDF_STATUS dp_mon_dest_rings_alloc(struct dp_pdev *pdev, int lmac_id)
  221. {
  222. int entries;
  223. struct dp_soc *soc = pdev->soc;
  224. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  225. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  226. entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
  227. if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[lmac_id],
  228. RXDMA_MONITOR_BUF, entries, 0)) {
  229. dp_mon_err("%pK: " RNG_ERR "rxdma_mon_buf_ring ",
  230. soc);
  231. goto fail1;
  232. }
  233. entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
  234. if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id],
  235. RXDMA_MONITOR_DST, entries, 0)) {
  236. dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
  237. goto fail1;
  238. }
  239. entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
  240. if (dp_srng_alloc(soc, &soc->rxdma_mon_desc_ring[lmac_id],
  241. RXDMA_MONITOR_DESC, entries, 0)) {
  242. dp_mon_err("%pK: " RNG_ERR "rxdma_mon_desc_ring",
  243. soc);
  244. goto fail1;
  245. }
  246. }
  247. return QDF_STATUS_SUCCESS;
  248. fail1:
  249. return QDF_STATUS_E_NOMEM;
  250. }
  251. #else
  252. static
  253. QDF_STATUS dp_mon_dest_rings_alloc(struct dp_pdev *pdev, int lmac_id)
  254. {
  255. return QDF_STATUS_SUCCESS;
  256. }
  257. #endif
  258. /**
  259. * dp_mon_rings_alloc() - Allocate memory for monitor srng rings
  260. * @soc: Datapath soc handle
  261. * @pdev: Datapath pdev handle
  262. *
  263. * return: QDF_STATUS_SUCCESS on success
  264. * QDF_STATUS_E_NOMEM on failure
  265. */
  266. static
  267. QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
  268. {
  269. int mac_id = 0;
  270. int entries;
  271. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  272. struct dp_mon_soc *mon_soc;
  273. mon_soc = soc->monitor_soc;
  274. if(!mon_soc) {
  275. dp_mon_err("%pK: monitor SOC not initialized",
  276. soc);
  277. return QDF_STATUS_SUCCESS;
  278. }
  279. if (mon_soc->monitor_mode_v2)
  280. return QDF_STATUS_SUCCESS;
  281. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  282. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  283. int lmac_id =
  284. dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
  285. entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
  286. if (dp_srng_alloc(soc, &soc->rxdma_mon_status_ring[lmac_id],
  287. RXDMA_MONITOR_STATUS, entries, 0)) {
  288. dp_mon_err("%pK: " RNG_ERR "rxdma_mon_status_ring",
  289. soc);
  290. goto fail1;
  291. }
  292. if (dp_mon_dest_rings_alloc(pdev, lmac_id))
  293. goto fail1;
  294. }
  295. return QDF_STATUS_SUCCESS;
  296. fail1:
  297. dp_mon_rings_free(pdev);
  298. return QDF_STATUS_E_NOMEM;
  299. }
  300. #else
  301. static void dp_mon_rings_free(struct dp_pdev *pdev)
  302. {
  303. }
  304. static void dp_mon_rings_deinit(struct dp_pdev *pdev)
  305. {
  306. }
  307. static
  308. QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev)
  309. {
  310. return QDF_STATUS_SUCCESS;
  311. }
  312. static
  313. QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
  314. {
  315. return QDF_STATUS_SUCCESS;
  316. }
  317. #endif
  318. #ifdef QCA_SUPPORT_FULL_MON
  319. static inline QDF_STATUS
  320. dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
  321. uint8_t val)
  322. {
  323. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  324. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  325. mon_soc->full_mon_mode = val;
  326. dp_cdp_err("Configure full monitor mode val: %d ", val);
  327. return QDF_STATUS_SUCCESS;
  328. }
  329. #else
  330. static inline QDF_STATUS
  331. dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
  332. uint8_t val)
  333. {
  334. return 0;
  335. }
  336. #endif
  337. #ifdef QCA_SUPPORT_FULL_MON
  338. static inline QDF_STATUS
  339. dp_soc_config_full_mon_mode(struct dp_pdev *pdev, enum dp_full_mon_config val)
  340. {
  341. struct dp_soc *soc = pdev->soc;
  342. QDF_STATUS status = QDF_STATUS_SUCCESS;
  343. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  344. if (!mon_soc->full_mon_mode)
  345. return QDF_STATUS_SUCCESS;
  346. if ((htt_h2t_full_mon_cfg(soc->htt_handle,
  347. pdev->pdev_id,
  348. val)) != QDF_STATUS_SUCCESS) {
  349. status = QDF_STATUS_E_FAILURE;
  350. }
  351. return status;
  352. }
  353. #else
  354. static inline QDF_STATUS
  355. dp_soc_config_full_mon_mode(struct dp_pdev *pdev, enum dp_full_mon_config val)
  356. {
  357. return 0;
  358. }
  359. #endif
  360. #ifdef QCA_MCOPY_SUPPORT
  361. static inline void
  362. dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
  363. {
  364. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  365. mon_pdev->mcopy_mode = M_COPY_DISABLED;
  366. mon_pdev->monitor_configured = false;
  367. mon_pdev->mvdev = NULL;
  368. }
  369. static inline void
  370. dp_reset_mcopy_mode(struct dp_pdev *pdev)
  371. {
  372. QDF_STATUS status = QDF_STATUS_SUCCESS;
  373. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  374. if (mon_pdev->mcopy_mode) {
  375. dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_DISABLE);
  376. dp_pdev_disable_mcopy_code(pdev);
  377. dp_mon_filter_reset_mcopy_mode(pdev);
  378. status = dp_mon_filter_update(pdev);
  379. if (status != QDF_STATUS_SUCCESS) {
  380. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  381. FL("Failed to reset AM copy mode filters"));
  382. }
  383. mon_pdev->monitor_configured = false;
  384. }
  385. }
  386. static QDF_STATUS
  387. dp_config_mcopy_mode(struct dp_pdev *pdev, int val)
  388. {
  389. QDF_STATUS status = QDF_STATUS_SUCCESS;
  390. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  391. if (mon_pdev->mvdev)
  392. return QDF_STATUS_E_RESOURCES;
  393. mon_pdev->mcopy_mode = val;
  394. mon_pdev->tx_sniffer_enable = 0;
  395. mon_pdev->monitor_configured = true;
  396. if (!wlan_cfg_is_delay_mon_replenish(pdev->soc->wlan_cfg_ctx))
  397. dp_vdev_set_monitor_mode_rings(pdev, true);
  398. /*
  399. * Setup the M copy mode filter.
  400. */
  401. dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_ENABLE);
  402. dp_mon_filter_setup_mcopy_mode(pdev);
  403. status = dp_mon_filter_update(pdev);
  404. if (status != QDF_STATUS_SUCCESS) {
  405. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  406. FL("Failed to set M_copy mode filters"));
  407. dp_mon_filter_reset_mcopy_mode(pdev);
  408. dp_pdev_disable_mcopy_code(pdev);
  409. return status;
  410. }
  411. if (!mon_pdev->pktlog_ppdu_stats)
  412. dp_h2t_cfg_stats_msg_send(pdev,
  413. DP_PPDU_STATS_CFG_SNIFFER,
  414. pdev->pdev_id);
  415. return status;
  416. }
  417. #else
  418. static inline void
  419. dp_reset_mcopy_mode(struct dp_pdev *pdev)
  420. {
  421. }
  422. static inline QDF_STATUS
  423. dp_config_mcopy_mode(struct dp_pdev *pdev, int val)
  424. {
  425. return QDF_STATUS_E_INVAL;
  426. }
  427. #endif /* QCA_MCOPY_SUPPORT */
  428. /**
  429. * dp_reset_monitor_mode() - Disable monitor mode
  430. * @soc_hdl: Datapath soc handle
  431. * @pdev_id: id of datapath PDEV handle
  432. *
  433. * Return: QDF_STATUS
  434. */
  435. QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl,
  436. uint8_t pdev_id,
  437. uint8_t special_monitor)
  438. {
  439. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  440. struct dp_pdev *pdev =
  441. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  442. pdev_id);
  443. QDF_STATUS status = QDF_STATUS_SUCCESS;
  444. struct dp_mon_pdev *mon_pdev;
  445. if (!pdev)
  446. return QDF_STATUS_E_FAILURE;
  447. mon_pdev = pdev->monitor_pdev;
  448. qdf_spin_lock_bh(&mon_pdev->mon_lock);
  449. dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_DISABLE);
  450. mon_pdev->mvdev = NULL;
  451. mon_pdev->monitor_configured = false;
  452. /*
  453. * Lite monitor mode, smart monitor mode and monitor
  454. * mode uses this APIs to filter reset and mode disable
  455. */
  456. if (mon_pdev->mcopy_mode) {
  457. #if defined(QCA_MCOPY_SUPPORT)
  458. dp_pdev_disable_mcopy_code(pdev);
  459. dp_mon_filter_reset_mcopy_mode(pdev);
  460. #endif /* QCA_MCOPY_SUPPORT */
  461. } else if (special_monitor) {
  462. #if defined(ATH_SUPPORT_NAC)
  463. dp_mon_filter_reset_smart_monitor(pdev);
  464. #endif /* ATH_SUPPORT_NAC */
  465. } else {
  466. dp_mon_filter_reset_mon_mode(pdev);
  467. }
  468. status = dp_mon_filter_update(pdev);
  469. if (status != QDF_STATUS_SUCCESS) {
  470. dp_rx_mon_dest_err("%pK: Failed to reset monitor filters",
  471. soc);
  472. }
  473. qdf_spin_unlock_bh(&mon_pdev->mon_lock);
  474. return QDF_STATUS_SUCCESS;
  475. }
  476. /**
  477. * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
  478. * @soc: soc handle
  479. * @pdev_id: id of Datapath PDEV handle
  480. * @filter_val: Flag to select Filter for monitor mode
  481. * Return: 0 on success, not 0 on failure
  482. */
  483. #ifdef QCA_ADVANCE_MON_FILTER_SUPPORT
  484. static QDF_STATUS
  485. dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  486. struct cdp_monitor_filter *filter_val)
  487. {
  488. /* Many monitor VAPs can exists in a system but only one can be up at
  489. * anytime
  490. */
  491. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  492. struct dp_vdev *vdev;
  493. struct dp_pdev *pdev =
  494. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  495. pdev_id);
  496. QDF_STATUS status = QDF_STATUS_SUCCESS;
  497. struct dp_mon_pdev *mon_pdev;
  498. if (!pdev || !pdev->monitor_pdev)
  499. return QDF_STATUS_E_FAILURE;
  500. mon_pdev = pdev->monitor_pdev;
  501. vdev = mon_pdev->mvdev;
  502. if (!vdev)
  503. return QDF_STATUS_E_FAILURE;
  504. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  505. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
  506. pdev, pdev_id, soc, vdev);
  507. /*Check if current pdev's monitor_vdev exists */
  508. if (!mon_pdev->mvdev) {
  509. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  510. "vdev=%pK", vdev);
  511. qdf_assert(vdev);
  512. }
  513. /* update filter mode, type in pdev structure */
  514. mon_pdev->mon_filter_mode = filter_val->mode;
  515. mon_pdev->fp_mgmt_filter = filter_val->fp_mgmt;
  516. mon_pdev->fp_ctrl_filter = filter_val->fp_ctrl;
  517. mon_pdev->fp_data_filter = filter_val->fp_data;
  518. mon_pdev->mo_mgmt_filter = filter_val->mo_mgmt;
  519. mon_pdev->mo_ctrl_filter = filter_val->mo_ctrl;
  520. mon_pdev->mo_data_filter = filter_val->mo_data;
  521. dp_mon_filter_setup_mon_mode(pdev);
  522. status = dp_mon_filter_update(pdev);
  523. if (status != QDF_STATUS_SUCCESS) {
  524. dp_rx_mon_dest_err("%pK: Failed to set filter for adv mon mode",
  525. soc);
  526. dp_mon_filter_reset_mon_mode(pdev);
  527. }
  528. return status;
  529. }
  530. #else
  531. static QDF_STATUS
  532. dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  533. struct cdp_monitor_filter *filter_val)
  534. {
  535. return QDF_STATUS_E_INVAL;
  536. }
  537. #endif
  538. /**
  539. * dp_deliver_tx_mgmt() - Deliver mgmt frame for tx capture
  540. * @cdp_soc : data path soc handle
  541. * @pdev_id : pdev_id
  542. * @nbuf: Management frame buffer
  543. */
  544. static QDF_STATUS
  545. dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf)
  546. {
  547. struct dp_pdev *pdev =
  548. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
  549. pdev_id);
  550. if (!pdev)
  551. return QDF_STATUS_E_FAILURE;
  552. dp_deliver_mgmt_frm(pdev, nbuf);
  553. return QDF_STATUS_SUCCESS;
  554. }
  555. #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
  556. /**
  557. * dp_scan_spcl_vap_stats_attach() - alloc spcl vap stats struct
  558. * @mon_vdev: Datapath mon VDEV handle
  559. *
  560. * Return: 0 on success, not 0 on failure
  561. */
  562. static inline QDF_STATUS
  563. dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev)
  564. {
  565. mon_vdev->scan_spcl_vap_stats =
  566. qdf_mem_malloc(sizeof(struct cdp_scan_spcl_vap_stats));
  567. if (!mon_vdev->scan_spcl_vap_stats) {
  568. dp_mon_err("scan spcl vap stats attach fail");
  569. return QDF_STATUS_E_NOMEM;
  570. }
  571. return QDF_STATUS_SUCCESS;
  572. }
  573. /**
  574. * dp_scan_spcl_vap_stats_detach() - free spcl vap stats struct
  575. * @mon_vdev: Datapath mon VDEV handle
  576. *
  577. * Return: void
  578. */
  579. static inline void
  580. dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev)
  581. {
  582. if (mon_vdev->scan_spcl_vap_stats) {
  583. qdf_mem_free(mon_vdev->scan_spcl_vap_stats);
  584. mon_vdev->scan_spcl_vap_stats = NULL;
  585. }
  586. }
  587. /**
  588. * dp_reset_scan_spcl_vap_stats() - reset spcl vap rx stats
  589. * @vdev: Datapath VDEV handle
  590. *
  591. * Return: void
  592. */
  593. static inline void
  594. dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev)
  595. {
  596. struct dp_mon_vdev *mon_vdev;
  597. struct dp_mon_pdev *mon_pdev;
  598. mon_pdev = vdev->pdev->monitor_pdev;
  599. if (!mon_pdev || !mon_pdev->reset_scan_spcl_vap_stats_enable)
  600. return;
  601. mon_vdev = vdev->monitor_vdev;
  602. if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats)
  603. return;
  604. qdf_mem_zero(mon_vdev->scan_spcl_vap_stats,
  605. sizeof(struct cdp_scan_spcl_vap_stats));
  606. }
  607. /**
  608. * dp_get_scan_spcl_vap_stats() - get spcl vap rx stats
  609. * @soc_hdl: Datapath soc handle
  610. * @vdev_id: vdev id
  611. * @stats: structure to hold spcl vap stats
  612. *
  613. * Return: 0 on success, not 0 on failure
  614. */
  615. static QDF_STATUS
  616. dp_get_scan_spcl_vap_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  617. struct cdp_scan_spcl_vap_stats *stats)
  618. {
  619. struct dp_mon_vdev *mon_vdev = NULL;
  620. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  621. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  622. DP_MOD_ID_CDP);
  623. if (!vdev || !stats) {
  624. if (vdev)
  625. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  626. return QDF_STATUS_E_INVAL;
  627. }
  628. mon_vdev = vdev->monitor_vdev;
  629. if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats) {
  630. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  631. return QDF_STATUS_E_INVAL;
  632. }
  633. qdf_mem_copy(stats, mon_vdev->scan_spcl_vap_stats,
  634. sizeof(struct cdp_scan_spcl_vap_stats));
  635. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  636. return QDF_STATUS_SUCCESS;
  637. }
  638. #else
  639. static inline void
  640. dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev)
  641. {
  642. }
  643. static inline QDF_STATUS
  644. dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev)
  645. {
  646. return QDF_STATUS_SUCCESS;
  647. }
  648. static inline void
  649. dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev)
  650. {
  651. }
  652. #endif
  653. /**
  654. * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
  655. * @vdev_handle: Datapath VDEV handle
  656. * @smart_monitor: Flag to denote if its smart monitor mode
  657. *
  658. * Return: 0 on success, not 0 on failure
  659. */
  660. static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *dp_soc,
  661. uint8_t vdev_id,
  662. uint8_t special_monitor)
  663. {
  664. struct dp_soc *soc = (struct dp_soc *)dp_soc;
  665. struct dp_pdev *pdev;
  666. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  667. DP_MOD_ID_CDP);
  668. QDF_STATUS status = QDF_STATUS_SUCCESS;
  669. struct dp_mon_pdev *mon_pdev;
  670. if (!vdev)
  671. return QDF_STATUS_E_FAILURE;
  672. pdev = vdev->pdev;
  673. if (!pdev || !pdev->monitor_pdev)
  674. return QDF_STATUS_E_FAILURE;
  675. mon_pdev = pdev->monitor_pdev;
  676. mon_pdev->mvdev = vdev;
  677. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  678. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
  679. pdev, pdev->pdev_id, pdev->soc, vdev);
  680. /*
  681. * do not configure monitor buf ring and filter for smart and
  682. * lite monitor
  683. * for smart monitor filters are added along with first NAC
  684. * for lite monitor required configuration done through
  685. * dp_set_pdev_param
  686. */
  687. if (special_monitor) {
  688. status = QDF_STATUS_SUCCESS;
  689. goto fail;
  690. }
  691. if (mon_pdev->scan_spcl_vap_configured)
  692. dp_reset_scan_spcl_vap_stats(vdev);
  693. /*Check if current pdev's monitor_vdev exists */
  694. if (mon_pdev->monitor_configured) {
  695. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  696. "monitor vap already created vdev=%pK\n", vdev);
  697. status = QDF_STATUS_E_RESOURCES;
  698. goto fail;
  699. }
  700. mon_pdev->monitor_configured = true;
  701. dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_ENABLE);
  702. dp_mon_filter_setup_mon_mode(pdev);
  703. status = dp_mon_filter_update(pdev);
  704. if (status != QDF_STATUS_SUCCESS) {
  705. dp_cdp_err("%pK: Failed to reset monitor filters", soc);
  706. dp_mon_filter_reset_mon_mode(pdev);
  707. mon_pdev->monitor_configured = false;
  708. mon_pdev->mvdev = NULL;
  709. }
  710. fail:
  711. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  712. return status;
  713. }
  714. #ifdef QCA_TX_CAPTURE_SUPPORT
  715. static QDF_STATUS
  716. dp_config_tx_capture_mode(struct dp_pdev *pdev)
  717. {
  718. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  719. mon_pdev->tx_sniffer_enable = 1;
  720. mon_pdev->monitor_configured = false;
  721. if (!mon_pdev->pktlog_ppdu_stats)
  722. dp_h2t_cfg_stats_msg_send(pdev,
  723. DP_PPDU_STATS_CFG_SNIFFER,
  724. pdev->pdev_id);
  725. return QDF_STATUS_SUCCESS;
  726. }
  727. #else
  728. #ifdef QCA_MCOPY_SUPPORT
  729. static QDF_STATUS
  730. dp_config_tx_capture_mode(struct dp_pdev *pdev)
  731. {
  732. return QDF_STATUS_E_INVAL;
  733. }
  734. #endif
  735. #endif
  736. /*
  737. * dp_config_debug_sniffer()- API to enable/disable debug sniffer
  738. * @pdev: DP_PDEV handle
  739. * @val: user provided value
  740. *
  741. * Return: 0 for success. nonzero for failure.
  742. */
  743. #if defined(QCA_MCOPY_SUPPORT) || defined(QCA_TX_CAPTURE_SUPPORT)
  744. static QDF_STATUS
  745. dp_config_debug_sniffer(struct dp_pdev *pdev, int val)
  746. {
  747. QDF_STATUS status = QDF_STATUS_SUCCESS;
  748. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  749. /*
  750. * Note: The mirror copy mode cannot co-exist with any other
  751. * monitor modes. Hence disabling the filter for this mode will
  752. * reset the monitor destination ring filters.
  753. */
  754. dp_reset_mcopy_mode(pdev);
  755. switch (val) {
  756. case 0:
  757. mon_pdev->tx_sniffer_enable = 0;
  758. mon_pdev->monitor_configured = false;
  759. /*
  760. * We don't need to reset the Rx monitor status ring or call
  761. * the API dp_ppdu_ring_reset() if all debug sniffer mode is
  762. * disabled. The Rx monitor status ring will be disabled when
  763. * the last mode using the monitor status ring get disabled.
  764. */
  765. if (!mon_pdev->pktlog_ppdu_stats &&
  766. !mon_pdev->enhanced_stats_en &&
  767. !mon_pdev->bpr_enable) {
  768. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  769. } else if (mon_pdev->enhanced_stats_en &&
  770. !mon_pdev->bpr_enable) {
  771. dp_h2t_cfg_stats_msg_send(pdev,
  772. DP_PPDU_STATS_CFG_ENH_STATS,
  773. pdev->pdev_id);
  774. } else if (!mon_pdev->enhanced_stats_en &&
  775. mon_pdev->bpr_enable) {
  776. dp_h2t_cfg_stats_msg_send(pdev,
  777. DP_PPDU_STATS_CFG_BPR_ENH,
  778. pdev->pdev_id);
  779. } else {
  780. dp_h2t_cfg_stats_msg_send(pdev,
  781. DP_PPDU_STATS_CFG_BPR,
  782. pdev->pdev_id);
  783. }
  784. break;
  785. case 1:
  786. status = dp_config_tx_capture_mode(pdev);
  787. break;
  788. case 2:
  789. case 4:
  790. status = dp_config_mcopy_mode(pdev, val);
  791. break;
  792. default:
  793. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  794. "Invalid value, mode not supported");
  795. status = QDF_STATUS_E_INVAL;
  796. break;
  797. }
  798. return status;
  799. }
  800. #else
  801. static QDF_STATUS
  802. dp_config_debug_sniffer(struct dp_pdev *pdev, int val)
  803. {
  804. return QDF_STATUS_E_INVAL;
  805. }
  806. #endif
  807. static void dp_flush_monitor_rings(struct dp_soc *soc)
  808. {
  809. struct dp_pdev *pdev = soc->pdev_list[0];
  810. hal_soc_handle_t hal_soc = soc->hal_soc;
  811. uint32_t lmac_id;
  812. uint32_t hp, tp;
  813. uint8_t dp_intr_id;
  814. int budget;
  815. void *mon_dst_srng;
  816. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  817. /* Reset monitor filters before reaping the ring*/
  818. qdf_spin_lock_bh(&mon_pdev->mon_lock);
  819. dp_mon_filter_reset_mon_mode(pdev);
  820. if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS)
  821. dp_mon_info("failed to reset monitor filters");
  822. qdf_spin_unlock_bh(&mon_pdev->mon_lock);
  823. if (mon_pdev->mon_chan_band == REG_BAND_UNKNOWN)
  824. return;
  825. lmac_id = pdev->ch_band_lmac_id_mapping[mon_pdev->mon_chan_band];
  826. if (qdf_unlikely(lmac_id == DP_MON_INVALID_LMAC_ID))
  827. return;
  828. dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
  829. mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, lmac_id);
  830. /* reap full ring */
  831. budget = wlan_cfg_get_dma_mon_stat_ring_size(pdev->wlan_cfg_ctx);
  832. hal_get_sw_hptp(hal_soc, mon_dst_srng, &tp, &hp);
  833. dp_mon_info("Before reap: Monitor DST ring HP %u TP %u", hp, tp);
  834. dp_mon_process(soc, &soc->intr_ctx[dp_intr_id], lmac_id, budget);
  835. hal_get_sw_hptp(hal_soc, mon_dst_srng, &tp, &hp);
  836. dp_mon_info("After reap: Monitor DST ring HP %u TP %u", hp, tp);
  837. }
  838. #if !defined(DISABLE_MON_CONFIG)
  839. #ifdef QCA_MONITOR_PKT_SUPPORT
  840. static
  841. QDF_STATUS dp_mon_htt_dest_srng_setup(struct dp_soc *soc,
  842. struct dp_pdev *pdev,
  843. int mac_id,
  844. int mac_for_pdev)
  845. {
  846. QDF_STATUS status = QDF_STATUS_SUCCESS;
  847. if (soc->wlan_cfg_ctx->rxdma1_enable) {
  848. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  849. soc->rxdma_mon_buf_ring[mac_id]
  850. .hal_srng,
  851. RXDMA_MONITOR_BUF);
  852. if (status != QDF_STATUS_SUCCESS) {
  853. dp_mon_err("Failed to send htt srng setup message for Rxdma mon buf ring");
  854. return status;
  855. }
  856. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  857. soc->rxdma_mon_dst_ring[mac_id]
  858. .hal_srng,
  859. RXDMA_MONITOR_DST);
  860. if (status != QDF_STATUS_SUCCESS) {
  861. dp_mon_err("Failed to send htt srng setup message for Rxdma mon dst ring");
  862. return status;
  863. }
  864. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  865. soc->rxdma_mon_desc_ring[mac_id]
  866. .hal_srng,
  867. RXDMA_MONITOR_DESC);
  868. if (status != QDF_STATUS_SUCCESS) {
  869. dp_mon_err("Failed to send htt srng message for Rxdma mon desc ring");
  870. return status;
  871. }
  872. }
  873. return status;
  874. }
  875. #else
  876. static
  877. QDF_STATUS dp_mon_htt_dest_srng_setup(struct dp_soc *soc,
  878. struct dp_pdev *pdev,
  879. int mac_id,
  880. int mac_for_pdev)
  881. {
  882. return QDF_STATUS_SUCCESS;
  883. }
  884. #endif
  885. /**
  886. * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
  887. * @soc: soc handle
  888. * @pdev: physical device handle
  889. * @mac_id: ring number
  890. * @mac_for_pdev: mac_id
  891. *
  892. * Return: non-zero for failure, zero for success
  893. */
  894. #ifdef QCA_HOST2FW_RXBUF_RING
  895. QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
  896. struct dp_pdev *pdev,
  897. int mac_id,
  898. int mac_for_pdev)
  899. {
  900. QDF_STATUS status = QDF_STATUS_SUCCESS;
  901. status = dp_mon_htt_dest_srng_setup(soc, pdev, mac_id, mac_for_pdev);
  902. if (status != QDF_STATUS_SUCCESS)
  903. return status;
  904. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  905. soc->rxdma_mon_status_ring[mac_id]
  906. .hal_srng,
  907. RXDMA_MONITOR_STATUS);
  908. if (status != QDF_STATUS_SUCCESS) {
  909. dp_mon_err("Failed to send htt srng setup message for Rxdma mon status ring");
  910. return status;
  911. }
  912. return status;
  913. }
  914. #else
  915. /* This is only for WIN */
  916. QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
  917. struct dp_pdev *pdev,
  918. int mac_id,
  919. int mac_for_pdev)
  920. {
  921. QDF_STATUS status = QDF_STATUS_SUCCESS;
  922. struct dp_mon_soc *mon_soc;
  923. mon_soc = soc->monitor_soc;
  924. if(!mon_soc) {
  925. dp_mon_err("%pK: monitor SOC not initialized",
  926. soc);
  927. return status;
  928. }
  929. if (mon_soc->monitor_mode_v2)
  930. return status;
  931. if (wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) {
  932. status = dp_mon_htt_dest_srng_setup(soc, pdev,
  933. mac_id, mac_for_pdev);
  934. if (status != QDF_STATUS_SUCCESS)
  935. return status;
  936. }
  937. status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
  938. soc->rxdma_mon_status_ring[mac_id]
  939. .hal_srng,
  940. RXDMA_MONITOR_STATUS);
  941. if (status != QDF_STATUS_SUCCESS) {
  942. dp_mon_err("Failed to send htt srng setup msg for Rxdma mon status ring");
  943. return status;
  944. }
  945. return status;
  946. }
  947. #endif
  948. #else
  949. QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
  950. struct dp_pdev *pdev,
  951. int mac_id,
  952. int mac_for_pdev)
  953. {
  954. return QDF_STATUS_SUCCESS;
  955. }
  956. #endif
  957. /* MCL specific functions */
  958. #if defined(DP_CON_MON)
  959. /*
  960. * dp_service_mon_rings()- service monitor rings
  961. * @soc: soc dp handle
  962. * @quota: number of ring entry that can be serviced
  963. *
  964. * Return: None
  965. *
  966. */
  967. static void dp_service_mon_rings(struct dp_soc *soc, uint32_t quota)
  968. {
  969. int ring = 0, work_done;
  970. struct dp_pdev *pdev = NULL;
  971. for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
  972. pdev = dp_get_pdev_for_lmac_id(soc, ring);
  973. if (!pdev)
  974. continue;
  975. work_done = dp_mon_process(soc, NULL, ring, quota);
  976. dp_rx_mon_dest_debug("Reaped %d descs from Monitor rings",
  977. work_done);
  978. }
  979. }
  980. #endif
  981. /**
  982. * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
  983. * ring based on target
  984. * @soc: soc handle
  985. * @mac_for_pdev: WIN- pdev_id, MCL- mac id
  986. * @pdev: physical device handle
  987. * @ring_num: mac id
  988. * @htt_tlv_filter: tlv filter
  989. *
  990. * Return: zero on success, non-zero on failure
  991. */
  992. static inline QDF_STATUS
  993. dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
  994. struct dp_pdev *pdev, uint8_t ring_num,
  995. struct htt_rx_ring_tlv_filter htt_tlv_filter)
  996. {
  997. QDF_STATUS status;
  998. if (soc->wlan_cfg_ctx->rxdma1_enable)
  999. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  1000. soc->rxdma_mon_buf_ring[ring_num]
  1001. .hal_srng,
  1002. RXDMA_MONITOR_BUF,
  1003. RX_MONITOR_BUFFER_SIZE,
  1004. &htt_tlv_filter);
  1005. else
  1006. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  1007. pdev->rx_mac_buf_ring[ring_num]
  1008. .hal_srng,
  1009. RXDMA_BUF, RX_DATA_BUFFER_SIZE,
  1010. &htt_tlv_filter);
  1011. return status;
  1012. }
  1013. /*
  1014. * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode
  1015. * @soc_hdl: datapath soc handle
  1016. * @pdev_id: physical device instance id
  1017. *
  1018. * Return: virtual interface id
  1019. */
  1020. static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl,
  1021. uint8_t pdev_id)
  1022. {
  1023. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1024. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1025. if (qdf_unlikely(!pdev || !pdev->monitor_pdev ||
  1026. !pdev->monitor_pdev->mvdev))
  1027. return -EINVAL;
  1028. return pdev->monitor_pdev->mvdev->vdev_id;
  1029. }
  1030. /*
  1031. * dp_peer_tx_init() – Initialize receive TID state
  1032. * @pdev: Datapath pdev
  1033. * @peer: Datapath peer
  1034. *
  1035. */
  1036. static void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
  1037. {
  1038. dp_peer_tid_queue_init(peer);
  1039. dp_peer_update_80211_hdr(peer->vdev, peer);
  1040. }
  1041. /*
  1042. * dp_peer_tx_cleanup() – Deinitialize receive TID state
  1043. * @vdev: Datapath vdev
  1044. * @peer: Datapath peer
  1045. *
  1046. */
  1047. static inline void
  1048. dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
  1049. {
  1050. dp_peer_tid_queue_cleanup(peer);
  1051. }
  1052. #if defined(QCA_TX_CAPTURE_SUPPORT) || defined(QCA_ENHANCED_STATS_SUPPORT)
  1053. #ifndef WLAN_TX_PKT_CAPTURE_ENH
  1054. /*
  1055. * dp_deliver_mgmt_frm: Process
  1056. * @pdev: DP PDEV handle
  1057. * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
  1058. *
  1059. * return: void
  1060. */
  1061. void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
  1062. {
  1063. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1064. if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode) {
  1065. dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
  1066. nbuf, HTT_INVALID_PEER,
  1067. WDI_NO_VAL, pdev->pdev_id);
  1068. } else {
  1069. if (!mon_pdev->bpr_enable)
  1070. qdf_nbuf_free(nbuf);
  1071. }
  1072. }
  1073. #endif
  1074. #endif
  1075. #ifdef QCA_ENHANCED_STATS_SUPPORT
  1076. /*
  1077. * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
  1078. * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
  1079. * @pdev: DP PDEV handle
  1080. * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
  1081. * @length: tlv_length
  1082. *
  1083. * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
  1084. */
  1085. QDF_STATUS
  1086. dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
  1087. qdf_nbuf_t tag_buf,
  1088. uint32_t ppdu_id)
  1089. {
  1090. uint32_t *nbuf_ptr;
  1091. uint8_t trim_size;
  1092. size_t head_size;
  1093. struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info;
  1094. uint32_t *msg_word;
  1095. uint32_t tsf_hdr;
  1096. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1097. if ((!mon_pdev->tx_sniffer_enable) && (!mon_pdev->mcopy_mode) &&
  1098. (!mon_pdev->bpr_enable) && (!mon_pdev->tx_capture_enabled))
  1099. return QDF_STATUS_SUCCESS;
  1100. /*
  1101. * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t
  1102. */
  1103. msg_word = (uint32_t *)qdf_nbuf_data(tag_buf);
  1104. msg_word = msg_word + 2;
  1105. tsf_hdr = *msg_word;
  1106. trim_size = ((mon_pdev->mgmtctrl_frm_info.mgmt_buf +
  1107. HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
  1108. qdf_nbuf_data(tag_buf));
  1109. if (!qdf_nbuf_pull_head(tag_buf, trim_size))
  1110. return QDF_STATUS_SUCCESS;
  1111. qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
  1112. mon_pdev->mgmtctrl_frm_info.mgmt_buf_len);
  1113. if (mon_pdev->tx_capture_enabled) {
  1114. head_size = sizeof(struct cdp_tx_mgmt_comp_info);
  1115. if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) {
  1116. qdf_err("Fail to get headroom h_sz %zu h_avail %d\n",
  1117. head_size, qdf_nbuf_headroom(tag_buf));
  1118. qdf_assert_always(0);
  1119. return QDF_STATUS_E_NOMEM;
  1120. }
  1121. ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *)
  1122. qdf_nbuf_push_head(tag_buf, head_size);
  1123. qdf_assert_always(ptr_mgmt_comp_info);
  1124. ptr_mgmt_comp_info->ppdu_id = ppdu_id;
  1125. ptr_mgmt_comp_info->is_sgen_pkt = true;
  1126. ptr_mgmt_comp_info->tx_tsf = tsf_hdr;
  1127. } else {
  1128. head_size = sizeof(ppdu_id);
  1129. nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size);
  1130. *nbuf_ptr = ppdu_id;
  1131. }
  1132. if (mon_pdev->bpr_enable) {
  1133. dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
  1134. tag_buf, HTT_INVALID_PEER,
  1135. WDI_NO_VAL, pdev->pdev_id);
  1136. }
  1137. dp_deliver_mgmt_frm(pdev, tag_buf);
  1138. return QDF_STATUS_E_ALREADY;
  1139. }
  1140. /*
  1141. * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv
  1142. * bitmap for sniffer mode
  1143. * @bitmap: received bitmap
  1144. *
  1145. * Return: expected bitmap value, returns zero if doesn't match with
  1146. * either 64-bit Tx window or 256-bit window tlv bitmap
  1147. */
  1148. int
  1149. dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
  1150. {
  1151. if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
  1152. return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
  1153. else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
  1154. return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
  1155. return 0;
  1156. }
  1157. /*
  1158. * dp_peer_find_by_id_valid - check if peer exists for given id
  1159. * @soc: core DP soc context
  1160. * @peer_id: peer id from peer object can be retrieved
  1161. *
  1162. * Return: true if peer exists of false otherwise
  1163. */
  1164. static
  1165. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
  1166. {
  1167. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id,
  1168. DP_MOD_ID_HTT);
  1169. if (peer) {
  1170. /*
  1171. * Decrement the peer ref which is taken as part of
  1172. * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled
  1173. */
  1174. dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
  1175. return true;
  1176. }
  1177. return false;
  1178. }
  1179. /*
  1180. * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats.
  1181. * @peer: Datapath peer handle
  1182. * @ppdu: User PPDU Descriptor
  1183. * @cur_ppdu_id: PPDU_ID
  1184. *
  1185. * Return: None
  1186. *
  1187. * on Tx data frame, we may get delayed ba set
  1188. * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we
  1189. * request Block Ack Request(BAR). Successful msdu is received only after Block
  1190. * Ack. To populate peer stats we need successful msdu(data frame).
  1191. * So we hold the Tx data stats on delayed_ba for stats update.
  1192. */
  1193. static void
  1194. dp_peer_copy_delay_stats(struct dp_peer *peer,
  1195. struct cdp_tx_completion_ppdu_user *ppdu,
  1196. uint32_t cur_ppdu_id)
  1197. {
  1198. struct dp_pdev *pdev;
  1199. struct dp_vdev *vdev;
  1200. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  1201. if (mon_peer->last_delayed_ba) {
  1202. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1203. "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]",
  1204. mon_peer->last_delayed_ba_ppduid, cur_ppdu_id);
  1205. vdev = peer->vdev;
  1206. if (vdev) {
  1207. pdev = vdev->pdev;
  1208. pdev->stats.cdp_delayed_ba_not_recev++;
  1209. }
  1210. }
  1211. mon_peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size;
  1212. mon_peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc;
  1213. mon_peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re;
  1214. mon_peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf;
  1215. mon_peer->delayed_ba_ppdu_stats.bw = ppdu->bw;
  1216. mon_peer->delayed_ba_ppdu_stats.nss = ppdu->nss;
  1217. mon_peer->delayed_ba_ppdu_stats.gi = ppdu->gi;
  1218. mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
  1219. mon_peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc;
  1220. mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
  1221. mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast =
  1222. ppdu->mpdu_tried_ucast;
  1223. mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast =
  1224. ppdu->mpdu_tried_mcast;
  1225. mon_peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl;
  1226. mon_peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl;
  1227. mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
  1228. mon_peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start;
  1229. mon_peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones;
  1230. mon_peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast;
  1231. mon_peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos;
  1232. mon_peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id;
  1233. mon_peer->last_delayed_ba = true;
  1234. ppdu->debug_copied = true;
  1235. }
  1236. /*
  1237. * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats.
  1238. * @peer: Datapath peer handle
  1239. * @ppdu: PPDU Descriptor
  1240. *
  1241. * Return: None
  1242. *
  1243. * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info
  1244. * from Tx BAR frame not required to populate peer stats.
  1245. * But we need successful MPDU and MSDU to update previous
  1246. * transmitted Tx data frame. Overwrite ppdu stats with the previous
  1247. * stored ppdu stats.
  1248. */
  1249. static void
  1250. dp_peer_copy_stats_to_bar(struct dp_peer *peer,
  1251. struct cdp_tx_completion_ppdu_user *ppdu)
  1252. {
  1253. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  1254. ppdu->ltf_size = mon_peer->delayed_ba_ppdu_stats.ltf_size;
  1255. ppdu->stbc = mon_peer->delayed_ba_ppdu_stats.stbc;
  1256. ppdu->he_re = mon_peer->delayed_ba_ppdu_stats.he_re;
  1257. ppdu->txbf = mon_peer->delayed_ba_ppdu_stats.txbf;
  1258. ppdu->bw = mon_peer->delayed_ba_ppdu_stats.bw;
  1259. ppdu->nss = mon_peer->delayed_ba_ppdu_stats.nss;
  1260. ppdu->gi = mon_peer->delayed_ba_ppdu_stats.gi;
  1261. ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
  1262. ppdu->ldpc = mon_peer->delayed_ba_ppdu_stats.ldpc;
  1263. ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
  1264. ppdu->mpdu_tried_ucast =
  1265. mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast;
  1266. ppdu->mpdu_tried_mcast =
  1267. mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast;
  1268. ppdu->frame_ctrl = mon_peer->delayed_ba_ppdu_stats.frame_ctrl;
  1269. ppdu->qos_ctrl = mon_peer->delayed_ba_ppdu_stats.qos_ctrl;
  1270. ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
  1271. ppdu->ru_start = mon_peer->delayed_ba_ppdu_stats.ru_start;
  1272. ppdu->ru_tones = mon_peer->delayed_ba_ppdu_stats.ru_tones;
  1273. ppdu->is_mcast = mon_peer->delayed_ba_ppdu_stats.is_mcast;
  1274. ppdu->user_pos = mon_peer->delayed_ba_ppdu_stats.user_pos;
  1275. ppdu->mu_group_id = mon_peer->delayed_ba_ppdu_stats.mu_group_id;
  1276. mon_peer->last_delayed_ba = false;
  1277. ppdu->debug_copied = true;
  1278. }
  1279. /*
  1280. * dp_tx_rate_stats_update() - Update rate per-peer statistics
  1281. * @peer: Datapath peer handle
  1282. * @ppdu: PPDU Descriptor
  1283. *
  1284. * Return: None
  1285. */
  1286. static void
  1287. dp_tx_rate_stats_update(struct dp_peer *peer,
  1288. struct cdp_tx_completion_ppdu_user *ppdu)
  1289. {
  1290. uint32_t ratekbps = 0;
  1291. uint64_t ppdu_tx_rate = 0;
  1292. uint32_t rix;
  1293. uint16_t ratecode = 0;
  1294. if (!peer || !ppdu)
  1295. return;
  1296. if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK)
  1297. return;
  1298. ratekbps = dp_getrateindex(ppdu->gi,
  1299. ppdu->mcs,
  1300. ppdu->nss,
  1301. ppdu->preamble,
  1302. ppdu->bw,
  1303. &rix,
  1304. &ratecode);
  1305. DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
  1306. if (!ratekbps)
  1307. return;
  1308. /* Calculate goodput in non-training period
  1309. * In training period, don't do anything as
  1310. * pending pkt is send as goodput.
  1311. */
  1312. if ((!peer->bss_peer) && (!ppdu->sa_is_training)) {
  1313. ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) *
  1314. (CDP_PERCENT_MACRO - ppdu->current_rate_per));
  1315. }
  1316. ppdu->rix = rix;
  1317. ppdu->tx_ratekbps = ratekbps;
  1318. ppdu->tx_ratecode = ratecode;
  1319. peer->stats.tx.avg_tx_rate =
  1320. dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
  1321. ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
  1322. DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
  1323. peer->stats.tx.bw_info = ppdu->bw;
  1324. peer->stats.tx.gi_info = ppdu->gi;
  1325. peer->stats.tx.nss_info = ppdu->nss;
  1326. peer->stats.tx.mcs_info = ppdu->mcs;
  1327. peer->stats.tx.preamble_info = ppdu->preamble;
  1328. if (peer->vdev) {
  1329. /*
  1330. * In STA mode:
  1331. * We get ucast stats as BSS peer stats.
  1332. *
  1333. * In AP mode:
  1334. * We get mcast stats as BSS peer stats.
  1335. * We get ucast stats as assoc peer stats.
  1336. */
  1337. if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) {
  1338. peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
  1339. peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
  1340. } else {
  1341. peer->vdev->stats.tx.last_tx_rate = ratekbps;
  1342. peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
  1343. }
  1344. }
  1345. }
  1346. /*
  1347. * dp_tx_stats_update() - Update per-peer statistics
  1348. * @pdev: Datapath pdev handle
  1349. * @peer: Datapath peer handle
  1350. * @ppdu: PPDU Descriptor
  1351. * @ack_rssi: RSSI of last ack received
  1352. *
  1353. * Return: None
  1354. */
  1355. static void
  1356. dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
  1357. struct cdp_tx_completion_ppdu_user *ppdu,
  1358. uint32_t ack_rssi)
  1359. {
  1360. uint8_t preamble, mcs;
  1361. uint16_t num_msdu;
  1362. uint16_t num_mpdu;
  1363. uint16_t mpdu_tried;
  1364. uint16_t mpdu_failed;
  1365. preamble = ppdu->preamble;
  1366. mcs = ppdu->mcs;
  1367. num_msdu = ppdu->num_msdu;
  1368. num_mpdu = ppdu->mpdu_success;
  1369. mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
  1370. mpdu_failed = mpdu_tried - num_mpdu;
  1371. /* If the peer statistics are already processed as part of
  1372. * per-MSDU completion handler, do not process these again in per-PPDU
  1373. * indications
  1374. */
  1375. if (pdev->soc->process_tx_status)
  1376. return;
  1377. if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
  1378. /*
  1379. * All failed mpdu will be retried, so incrementing
  1380. * retries mpdu based on mpdu failed. Even for
  1381. * ack failure i.e for long retries we get
  1382. * mpdu failed equal mpdu tried.
  1383. */
  1384. DP_STATS_INC(peer, tx.retries, mpdu_failed);
  1385. DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
  1386. return;
  1387. }
  1388. if (ppdu->is_ppdu_cookie_valid)
  1389. DP_STATS_INC(peer, tx.num_ppdu_cookie_valid, 1);
  1390. if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
  1391. ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
  1392. if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
  1393. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1394. "mu_group_id out of bound!!\n");
  1395. else
  1396. DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id],
  1397. (ppdu->user_pos + 1));
  1398. }
  1399. if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
  1400. ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
  1401. DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones);
  1402. DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start);
  1403. switch (ppdu->ru_tones) {
  1404. case RU_26:
  1405. DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_msdu,
  1406. num_msdu);
  1407. DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_mpdu,
  1408. num_mpdu);
  1409. DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].mpdu_tried,
  1410. mpdu_tried);
  1411. break;
  1412. case RU_52:
  1413. DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_msdu,
  1414. num_msdu);
  1415. DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_mpdu,
  1416. num_mpdu);
  1417. DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].mpdu_tried,
  1418. mpdu_tried);
  1419. break;
  1420. case RU_106:
  1421. DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_msdu,
  1422. num_msdu);
  1423. DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_mpdu,
  1424. num_mpdu);
  1425. DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].mpdu_tried,
  1426. mpdu_tried);
  1427. break;
  1428. case RU_242:
  1429. DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_msdu,
  1430. num_msdu);
  1431. DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_mpdu,
  1432. num_mpdu);
  1433. DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].mpdu_tried,
  1434. mpdu_tried);
  1435. break;
  1436. case RU_484:
  1437. DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_msdu,
  1438. num_msdu);
  1439. DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_mpdu,
  1440. num_mpdu);
  1441. DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].mpdu_tried,
  1442. mpdu_tried);
  1443. break;
  1444. case RU_996:
  1445. DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_msdu,
  1446. num_msdu);
  1447. DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_mpdu,
  1448. num_mpdu);
  1449. DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].mpdu_tried,
  1450. mpdu_tried);
  1451. break;
  1452. }
  1453. }
  1454. /*
  1455. * All failed mpdu will be retried, so incrementing
  1456. * retries mpdu based on mpdu failed. Even for
  1457. * ack failure i.e for long retries we get
  1458. * mpdu failed equal mpdu tried.
  1459. */
  1460. DP_STATS_INC(peer, tx.retries, mpdu_failed);
  1461. DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
  1462. DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_msdu,
  1463. num_msdu);
  1464. DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu,
  1465. num_mpdu);
  1466. DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried,
  1467. mpdu_tried);
  1468. DP_STATS_INC_PKT(peer, tx.comp_pkt,
  1469. num_msdu, (ppdu->success_bytes +
  1470. ppdu->retry_bytes + ppdu->failed_bytes));
  1471. DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
  1472. DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
  1473. DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
  1474. DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
  1475. if (ppdu->tid < CDP_DATA_TID_MAX)
  1476. DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
  1477. num_msdu);
  1478. DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
  1479. DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
  1480. if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
  1481. DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
  1482. DP_STATS_INCC(peer,
  1483. tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
  1484. ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
  1485. DP_STATS_INCC(peer,
  1486. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  1487. ((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
  1488. DP_STATS_INCC(peer,
  1489. tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
  1490. ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
  1491. DP_STATS_INCC(peer,
  1492. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  1493. ((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
  1494. DP_STATS_INCC(peer,
  1495. tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
  1496. ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
  1497. DP_STATS_INCC(peer,
  1498. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  1499. ((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
  1500. DP_STATS_INCC(peer,
  1501. tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
  1502. ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
  1503. DP_STATS_INCC(peer,
  1504. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  1505. ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
  1506. DP_STATS_INCC(peer,
  1507. tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
  1508. ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
  1509. DP_STATS_INCC(peer,
  1510. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  1511. ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
  1512. DP_STATS_INCC(peer, tx.ampdu_cnt, num_msdu, ppdu->is_ampdu);
  1513. DP_STATS_INCC(peer, tx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu));
  1514. DP_STATS_INCC(peer, tx.pream_punct_cnt, 1, ppdu->pream_punct);
  1515. dp_peer_stats_notify(pdev, peer);
  1516. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  1517. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
  1518. &peer->stats, ppdu->peer_id,
  1519. UPDATE_PEER_STATS, pdev->pdev_id);
  1520. #endif
  1521. }
  1522. /*
  1523. * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
  1524. * if a new peer id arrives in a PPDU
  1525. * pdev: DP pdev handle
  1526. * @peer_id : peer unique identifier
  1527. * @ppdu_info: per ppdu tlv structure
  1528. *
  1529. * return:user index to be populated
  1530. */
  1531. static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
  1532. uint16_t peer_id,
  1533. struct ppdu_info *ppdu_info)
  1534. {
  1535. uint8_t user_index = 0;
  1536. struct cdp_tx_completion_ppdu *ppdu_desc;
  1537. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  1538. ppdu_desc =
  1539. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  1540. while ((user_index + 1) <= ppdu_info->last_user) {
  1541. ppdu_user_desc = &ppdu_desc->user[user_index];
  1542. if (ppdu_user_desc->peer_id != peer_id) {
  1543. user_index++;
  1544. continue;
  1545. } else {
  1546. /* Max users possible is 8 so user array index should
  1547. * not exceed 7
  1548. */
  1549. qdf_assert_always(user_index <= (ppdu_desc->max_users - 1));
  1550. return user_index;
  1551. }
  1552. }
  1553. ppdu_info->last_user++;
  1554. /* Max users possible is 8 so last user should not exceed 8 */
  1555. qdf_assert_always(ppdu_info->last_user <= ppdu_desc->max_users);
  1556. return ppdu_info->last_user - 1;
  1557. }
  1558. /*
  1559. * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
  1560. * pdev: DP pdev handle
  1561. * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
  1562. * @ppdu_info: per ppdu tlv structure
  1563. *
  1564. * return:void
  1565. */
  1566. static void
  1567. dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
  1568. uint32_t *tag_buf,
  1569. struct ppdu_info *ppdu_info)
  1570. {
  1571. uint16_t frame_type;
  1572. uint16_t frame_ctrl;
  1573. uint16_t freq;
  1574. struct dp_soc *soc = NULL;
  1575. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  1576. uint64_t ppdu_start_timestamp;
  1577. uint32_t *start_tag_buf;
  1578. start_tag_buf = tag_buf;
  1579. ppdu_desc =
  1580. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  1581. ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
  1582. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID);
  1583. ppdu_info->sched_cmdid =
  1584. HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
  1585. ppdu_desc->num_users =
  1586. HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
  1587. qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
  1588. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE);
  1589. frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
  1590. ppdu_desc->htt_frame_type = frame_type;
  1591. frame_ctrl = ppdu_desc->frame_ctrl;
  1592. ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id;
  1593. switch (frame_type) {
  1594. case HTT_STATS_FTYPE_TIDQ_DATA_SU:
  1595. case HTT_STATS_FTYPE_TIDQ_DATA_MU:
  1596. case HTT_STATS_FTYPE_SGEN_QOS_NULL:
  1597. /*
  1598. * for management packet, frame type come as DATA_SU
  1599. * need to check frame_ctrl before setting frame_type
  1600. */
  1601. if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL)
  1602. ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
  1603. else
  1604. ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
  1605. break;
  1606. case HTT_STATS_FTYPE_SGEN_MU_BAR:
  1607. case HTT_STATS_FTYPE_SGEN_BAR:
  1608. ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
  1609. break;
  1610. default:
  1611. ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
  1612. break;
  1613. }
  1614. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US);
  1615. ppdu_desc->tx_duration = *tag_buf;
  1616. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
  1617. ppdu_desc->ppdu_start_timestamp = *tag_buf;
  1618. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE);
  1619. freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
  1620. if (freq != ppdu_desc->channel) {
  1621. soc = pdev->soc;
  1622. ppdu_desc->channel = freq;
  1623. pdev->operating_channel.freq = freq;
  1624. if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
  1625. pdev->operating_channel.num =
  1626. soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc,
  1627. pdev->pdev_id,
  1628. freq);
  1629. if (soc && soc->cdp_soc.ol_ops->freq_to_band)
  1630. pdev->operating_channel.band =
  1631. soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc,
  1632. pdev->pdev_id,
  1633. freq);
  1634. }
  1635. ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
  1636. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM);
  1637. ppdu_desc->phy_ppdu_tx_time_us =
  1638. HTT_PPDU_STATS_COMMON_TLV_PHY_PPDU_TX_TIME_US_GET(*tag_buf);
  1639. ppdu_desc->beam_change =
  1640. HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf);
  1641. ppdu_desc->doppler =
  1642. HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf);
  1643. ppdu_desc->spatial_reuse =
  1644. HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf);
  1645. dp_tx_capture_htt_frame_counter(pdev, frame_type);
  1646. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US);
  1647. ppdu_start_timestamp = *tag_buf;
  1648. ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp <<
  1649. HTT_SHIFT_UPPER_TIMESTAMP) &
  1650. HTT_MASK_UPPER_TIMESTAMP);
  1651. ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
  1652. ppdu_desc->tx_duration;
  1653. /* Ack time stamp is same as end time stamp*/
  1654. ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
  1655. ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
  1656. ppdu_desc->tx_duration;
  1657. ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp;
  1658. ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp;
  1659. ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration;
  1660. /* Ack time stamp is same as end time stamp*/
  1661. ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
  1662. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(BSSCOLOR_OBSS_PSR);
  1663. ppdu_desc->bss_color =
  1664. HTT_PPDU_STATS_COMMON_TLV_BSS_COLOR_ID_GET(*tag_buf);
  1665. }
  1666. /*
  1667. * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
  1668. * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
  1669. * @ppdu_info: per ppdu tlv structure
  1670. *
  1671. * return:void
  1672. */
  1673. static void dp_process_ppdu_stats_user_common_tlv(
  1674. struct dp_pdev *pdev, uint32_t *tag_buf,
  1675. struct ppdu_info *ppdu_info)
  1676. {
  1677. uint16_t peer_id;
  1678. struct cdp_tx_completion_ppdu *ppdu_desc;
  1679. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  1680. uint8_t curr_user_index = 0;
  1681. struct dp_peer *peer;
  1682. struct dp_vdev *vdev;
  1683. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  1684. ppdu_desc =
  1685. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  1686. tag_buf++;
  1687. peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
  1688. curr_user_index =
  1689. dp_get_ppdu_info_user_index(pdev,
  1690. peer_id, ppdu_info);
  1691. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  1692. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  1693. ppdu_desc->vdev_id =
  1694. HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
  1695. ppdu_user_desc->peer_id = peer_id;
  1696. tag_buf++;
  1697. if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
  1698. ppdu_user_desc->delayed_ba = 1;
  1699. ppdu_desc->delayed_ba = 1;
  1700. }
  1701. if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
  1702. ppdu_user_desc->is_mcast = true;
  1703. ppdu_user_desc->mpdu_tried_mcast =
  1704. HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
  1705. ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
  1706. } else {
  1707. ppdu_user_desc->mpdu_tried_ucast =
  1708. HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
  1709. }
  1710. ppdu_user_desc->is_seq_num_valid =
  1711. HTT_PPDU_STATS_USER_COMMON_TLV_IS_SQNUM_VALID_IN_BUFFER_GET(*tag_buf);
  1712. tag_buf++;
  1713. ppdu_user_desc->qos_ctrl =
  1714. HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
  1715. ppdu_user_desc->frame_ctrl =
  1716. HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
  1717. ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
  1718. if (ppdu_user_desc->delayed_ba)
  1719. ppdu_user_desc->mpdu_success = 0;
  1720. tag_buf += 3;
  1721. if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
  1722. ppdu_user_desc->ppdu_cookie =
  1723. HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
  1724. ppdu_user_desc->is_ppdu_cookie_valid = 1;
  1725. }
  1726. /* returning earlier causes other feilds unpopulated */
  1727. if (peer_id == DP_SCAN_PEER_ID) {
  1728. vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
  1729. DP_MOD_ID_TX_PPDU_STATS);
  1730. if (!vdev)
  1731. return;
  1732. qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
  1733. QDF_MAC_ADDR_SIZE);
  1734. dp_vdev_unref_delete(pdev->soc, vdev, DP_MOD_ID_TX_PPDU_STATS);
  1735. } else {
  1736. peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
  1737. DP_MOD_ID_TX_PPDU_STATS);
  1738. if (!peer) {
  1739. /*
  1740. * fw sends peer_id which is about to removed but
  1741. * it was already removed in host.
  1742. * eg: for disassoc, fw send ppdu stats
  1743. * with peer id equal to previously associated
  1744. * peer's peer_id but it was removed
  1745. */
  1746. vdev = dp_vdev_get_ref_by_id(pdev->soc,
  1747. ppdu_desc->vdev_id,
  1748. DP_MOD_ID_TX_PPDU_STATS);
  1749. if (!vdev)
  1750. return;
  1751. qdf_mem_copy(ppdu_user_desc->mac_addr,
  1752. vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE);
  1753. dp_vdev_unref_delete(pdev->soc, vdev,
  1754. DP_MOD_ID_TX_PPDU_STATS);
  1755. return;
  1756. }
  1757. qdf_mem_copy(ppdu_user_desc->mac_addr,
  1758. peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
  1759. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  1760. }
  1761. }
  1762. /**
  1763. * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
  1764. * @pdev: DP pdev handle
  1765. * @tag_buf: T2H message buffer carrying the user rate TLV
  1766. * @ppdu_info: per ppdu tlv structure
  1767. *
  1768. * return:void
  1769. */
  1770. static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
  1771. uint32_t *tag_buf,
  1772. struct ppdu_info *ppdu_info)
  1773. {
  1774. uint16_t peer_id;
  1775. struct cdp_tx_completion_ppdu *ppdu_desc;
  1776. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  1777. uint8_t curr_user_index = 0;
  1778. struct dp_vdev *vdev;
  1779. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  1780. ppdu_desc =
  1781. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  1782. tag_buf++;
  1783. peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
  1784. curr_user_index =
  1785. dp_get_ppdu_info_user_index(pdev,
  1786. peer_id, ppdu_info);
  1787. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  1788. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  1789. if (peer_id == DP_SCAN_PEER_ID) {
  1790. vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
  1791. DP_MOD_ID_TX_PPDU_STATS);
  1792. if (!vdev)
  1793. return;
  1794. dp_vdev_unref_delete(pdev->soc, vdev,
  1795. DP_MOD_ID_TX_PPDU_STATS);
  1796. }
  1797. ppdu_user_desc->peer_id = peer_id;
  1798. ppdu_user_desc->tid =
  1799. HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
  1800. tag_buf += 1;
  1801. ppdu_user_desc->user_pos =
  1802. HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
  1803. ppdu_user_desc->mu_group_id =
  1804. HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
  1805. tag_buf += 1;
  1806. ppdu_user_desc->ru_start =
  1807. HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
  1808. ppdu_user_desc->ru_tones =
  1809. (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
  1810. HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
  1811. ppdu_desc->usr_ru_tones_sum += ppdu_user_desc->ru_tones;
  1812. tag_buf += 2;
  1813. ppdu_user_desc->ppdu_type =
  1814. HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
  1815. tag_buf++;
  1816. ppdu_user_desc->tx_rate = *tag_buf;
  1817. ppdu_user_desc->ltf_size =
  1818. HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
  1819. ppdu_user_desc->stbc =
  1820. HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
  1821. ppdu_user_desc->he_re =
  1822. HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
  1823. ppdu_user_desc->txbf =
  1824. HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
  1825. ppdu_user_desc->bw =
  1826. HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
  1827. ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
  1828. ppdu_desc->usr_nss_sum += ppdu_user_desc->nss;
  1829. ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
  1830. ppdu_user_desc->preamble =
  1831. HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
  1832. ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
  1833. ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
  1834. ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
  1835. }
  1836. /*
  1837. * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
  1838. * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
  1839. * pdev: DP PDEV handle
  1840. * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
  1841. * @ppdu_info: per ppdu tlv structure
  1842. *
  1843. * return:void
  1844. */
  1845. static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
  1846. struct dp_pdev *pdev, uint32_t *tag_buf,
  1847. struct ppdu_info *ppdu_info)
  1848. {
  1849. htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
  1850. (htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
  1851. struct cdp_tx_completion_ppdu *ppdu_desc;
  1852. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  1853. uint8_t curr_user_index = 0;
  1854. uint16_t peer_id;
  1855. uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
  1856. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  1857. ppdu_desc =
  1858. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  1859. tag_buf++;
  1860. peer_id =
  1861. HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
  1862. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  1863. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  1864. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  1865. ppdu_user_desc->peer_id = peer_id;
  1866. ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
  1867. qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
  1868. sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
  1869. dp_process_ppdu_stats_update_failed_bitmap(pdev,
  1870. (void *)ppdu_user_desc,
  1871. ppdu_info->ppdu_id,
  1872. size);
  1873. }
  1874. /*
  1875. * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
  1876. * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
  1877. * soc: DP SOC handle
  1878. * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
  1879. * @ppdu_info: per ppdu tlv structure
  1880. *
  1881. * return:void
  1882. */
  1883. static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
  1884. struct dp_pdev *pdev, uint32_t *tag_buf,
  1885. struct ppdu_info *ppdu_info)
  1886. {
  1887. htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
  1888. (htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
  1889. struct cdp_tx_completion_ppdu *ppdu_desc;
  1890. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  1891. uint8_t curr_user_index = 0;
  1892. uint16_t peer_id;
  1893. uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
  1894. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  1895. ppdu_desc =
  1896. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  1897. tag_buf++;
  1898. peer_id =
  1899. HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
  1900. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  1901. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  1902. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  1903. ppdu_user_desc->peer_id = peer_id;
  1904. ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
  1905. qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
  1906. sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
  1907. dp_process_ppdu_stats_update_failed_bitmap(pdev,
  1908. (void *)ppdu_user_desc,
  1909. ppdu_info->ppdu_id,
  1910. size);
  1911. }
  1912. /*
  1913. * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
  1914. * htt_ppdu_stats_user_cmpltn_common_tlv
  1915. * soc: DP SOC handle
  1916. * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
  1917. * @ppdu_info: per ppdu tlv structure
  1918. *
  1919. * return:void
  1920. */
  1921. static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
  1922. struct dp_pdev *pdev, uint32_t *tag_buf,
  1923. struct ppdu_info *ppdu_info)
  1924. {
  1925. uint16_t peer_id;
  1926. struct cdp_tx_completion_ppdu *ppdu_desc;
  1927. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  1928. uint8_t curr_user_index = 0;
  1929. uint8_t bw_iter;
  1930. htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
  1931. (htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
  1932. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  1933. ppdu_desc =
  1934. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  1935. tag_buf++;
  1936. peer_id =
  1937. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
  1938. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  1939. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  1940. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  1941. ppdu_user_desc->peer_id = peer_id;
  1942. ppdu_user_desc->completion_status =
  1943. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
  1944. *tag_buf);
  1945. ppdu_user_desc->tid =
  1946. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
  1947. tag_buf++;
  1948. if (qdf_likely(ppdu_user_desc->completion_status ==
  1949. HTT_PPDU_STATS_USER_STATUS_OK)) {
  1950. ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
  1951. ppdu_user_desc->usr_ack_rssi = dp_stats_buf->ack_rssi;
  1952. ppdu_user_desc->ack_rssi_valid = 1;
  1953. } else {
  1954. ppdu_user_desc->ack_rssi_valid = 0;
  1955. }
  1956. tag_buf++;
  1957. ppdu_user_desc->mpdu_success =
  1958. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
  1959. ppdu_user_desc->mpdu_failed =
  1960. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) -
  1961. ppdu_user_desc->mpdu_success;
  1962. tag_buf++;
  1963. ppdu_user_desc->long_retries =
  1964. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
  1965. ppdu_user_desc->short_retries =
  1966. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
  1967. ppdu_user_desc->retry_msdus =
  1968. ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
  1969. ppdu_user_desc->is_ampdu =
  1970. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
  1971. ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
  1972. ppdu_desc->resp_type =
  1973. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf);
  1974. ppdu_desc->mprot_type =
  1975. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf);
  1976. ppdu_desc->rts_success =
  1977. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf);
  1978. ppdu_desc->rts_failure =
  1979. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf);
  1980. ppdu_user_desc->pream_punct =
  1981. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PREAM_PUNC_TX_GET(*tag_buf);
  1982. ppdu_info->compltn_common_tlv++;
  1983. /*
  1984. * MU BAR may send request to n users but we may received ack only from
  1985. * m users. To have count of number of users respond back, we have a
  1986. * separate counter bar_num_users per PPDU that get increment for every
  1987. * htt_ppdu_stats_user_cmpltn_common_tlv
  1988. */
  1989. ppdu_desc->bar_num_users++;
  1990. tag_buf++;
  1991. for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) {
  1992. ppdu_user_desc->rssi_chain[bw_iter] =
  1993. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf);
  1994. tag_buf++;
  1995. }
  1996. ppdu_user_desc->sa_tx_antenna =
  1997. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf);
  1998. tag_buf++;
  1999. ppdu_user_desc->sa_is_training =
  2000. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf);
  2001. if (ppdu_user_desc->sa_is_training) {
  2002. ppdu_user_desc->sa_goodput =
  2003. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf);
  2004. }
  2005. tag_buf++;
  2006. for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) {
  2007. ppdu_user_desc->sa_max_rates[bw_iter] =
  2008. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]);
  2009. }
  2010. tag_buf += CDP_NUM_SA_BW;
  2011. ppdu_user_desc->current_rate_per =
  2012. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf);
  2013. }
  2014. /*
  2015. * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
  2016. * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
  2017. * pdev: DP PDEV handle
  2018. * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
  2019. * @ppdu_info: per ppdu tlv structure
  2020. *
  2021. * return:void
  2022. */
  2023. static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
  2024. struct dp_pdev *pdev, uint32_t *tag_buf,
  2025. struct ppdu_info *ppdu_info)
  2026. {
  2027. htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
  2028. (htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
  2029. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2030. struct cdp_tx_completion_ppdu *ppdu_desc;
  2031. uint8_t curr_user_index = 0;
  2032. uint16_t peer_id;
  2033. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2034. ppdu_desc =
  2035. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2036. tag_buf++;
  2037. peer_id =
  2038. HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
  2039. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  2040. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2041. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2042. ppdu_user_desc->peer_id = peer_id;
  2043. ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
  2044. qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
  2045. sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
  2046. ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32;
  2047. }
  2048. /*
  2049. * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
  2050. * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
  2051. * pdev: DP PDEV handle
  2052. * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
  2053. * @ppdu_info: per ppdu tlv structure
  2054. *
  2055. * return:void
  2056. */
  2057. static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
  2058. struct dp_pdev *pdev, uint32_t *tag_buf,
  2059. struct ppdu_info *ppdu_info)
  2060. {
  2061. htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
  2062. (htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
  2063. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2064. struct cdp_tx_completion_ppdu *ppdu_desc;
  2065. uint8_t curr_user_index = 0;
  2066. uint16_t peer_id;
  2067. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2068. ppdu_desc =
  2069. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2070. tag_buf++;
  2071. peer_id =
  2072. HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
  2073. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  2074. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2075. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2076. ppdu_user_desc->peer_id = peer_id;
  2077. ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
  2078. qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
  2079. sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
  2080. ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32;
  2081. }
  2082. /*
  2083. * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
  2084. * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
  2085. * pdev: DP PDE handle
  2086. * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
  2087. * @ppdu_info: per ppdu tlv structure
  2088. *
  2089. * return:void
  2090. */
  2091. static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
  2092. struct dp_pdev *pdev, uint32_t *tag_buf,
  2093. struct ppdu_info *ppdu_info)
  2094. {
  2095. uint16_t peer_id;
  2096. struct cdp_tx_completion_ppdu *ppdu_desc;
  2097. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2098. uint8_t curr_user_index = 0;
  2099. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2100. ppdu_desc =
  2101. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2102. tag_buf += 2;
  2103. peer_id =
  2104. HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
  2105. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  2106. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2107. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2108. if (!ppdu_user_desc->ack_ba_tlv) {
  2109. ppdu_user_desc->ack_ba_tlv = 1;
  2110. } else {
  2111. pdev->stats.ack_ba_comes_twice++;
  2112. return;
  2113. }
  2114. ppdu_user_desc->peer_id = peer_id;
  2115. tag_buf++;
  2116. /* not to update ppdu_desc->tid from this TLV */
  2117. ppdu_user_desc->num_mpdu =
  2118. HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
  2119. ppdu_user_desc->num_msdu =
  2120. HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
  2121. ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
  2122. tag_buf++;
  2123. ppdu_user_desc->start_seq =
  2124. HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(
  2125. *tag_buf);
  2126. tag_buf++;
  2127. ppdu_user_desc->success_bytes = *tag_buf;
  2128. /* increase ack ba tlv counter on successful mpdu */
  2129. if (ppdu_user_desc->num_mpdu)
  2130. ppdu_info->ack_ba_tlv++;
  2131. if (ppdu_user_desc->ba_size == 0) {
  2132. ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq;
  2133. ppdu_user_desc->ba_bitmap[0] = 1;
  2134. ppdu_user_desc->ba_size = 1;
  2135. }
  2136. }
  2137. /*
  2138. * dp_process_ppdu_stats_user_common_array_tlv: Process
  2139. * htt_ppdu_stats_user_common_array_tlv
  2140. * pdev: DP PDEV handle
  2141. * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
  2142. * @ppdu_info: per ppdu tlv structure
  2143. *
  2144. * return:void
  2145. */
  2146. static void dp_process_ppdu_stats_user_common_array_tlv(
  2147. struct dp_pdev *pdev, uint32_t *tag_buf,
  2148. struct ppdu_info *ppdu_info)
  2149. {
  2150. uint32_t peer_id;
  2151. struct cdp_tx_completion_ppdu *ppdu_desc;
  2152. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2153. uint8_t curr_user_index = 0;
  2154. struct htt_tx_ppdu_stats_info *dp_stats_buf;
  2155. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2156. ppdu_desc =
  2157. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2158. tag_buf++;
  2159. dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
  2160. tag_buf += 3;
  2161. peer_id =
  2162. HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
  2163. if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
  2164. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2165. "Invalid peer");
  2166. return;
  2167. }
  2168. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  2169. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2170. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2171. ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
  2172. ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
  2173. tag_buf++;
  2174. ppdu_user_desc->success_msdus =
  2175. HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
  2176. ppdu_user_desc->retry_bytes =
  2177. HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
  2178. tag_buf++;
  2179. ppdu_user_desc->failed_msdus =
  2180. HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
  2181. }
  2182. /*
  2183. * dp_process_ppdu_stats_flush_tlv: Process
  2184. * htt_ppdu_stats_flush_tlv
  2185. * @pdev: DP PDEV handle
  2186. * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
  2187. * @ppdu_info: per ppdu tlv structure
  2188. *
  2189. * return:void
  2190. */
  2191. static void
  2192. dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
  2193. uint32_t *tag_buf,
  2194. struct ppdu_info *ppdu_info)
  2195. {
  2196. struct cdp_tx_completion_ppdu *ppdu_desc;
  2197. uint32_t peer_id;
  2198. uint8_t tid;
  2199. struct dp_peer *peer;
  2200. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  2201. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  2202. qdf_nbuf_data(ppdu_info->nbuf);
  2203. ppdu_desc->is_flush = 1;
  2204. tag_buf++;
  2205. ppdu_desc->drop_reason = *tag_buf;
  2206. tag_buf++;
  2207. ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
  2208. ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf);
  2209. ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf);
  2210. tag_buf++;
  2211. peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
  2212. tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
  2213. ppdu_desc->num_users = 1;
  2214. ppdu_desc->user[0].peer_id = peer_id;
  2215. ppdu_desc->user[0].tid = tid;
  2216. ppdu_desc->queue_type =
  2217. HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf);
  2218. peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
  2219. DP_MOD_ID_TX_PPDU_STATS);
  2220. if (!peer)
  2221. goto add_ppdu_to_sched_list;
  2222. if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
  2223. DP_STATS_INC(peer,
  2224. tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
  2225. ppdu_desc->num_msdu);
  2226. }
  2227. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  2228. add_ppdu_to_sched_list:
  2229. ppdu_info->done = 1;
  2230. TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
  2231. mon_pdev->list_depth--;
  2232. TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info,
  2233. ppdu_info_list_elem);
  2234. mon_pdev->sched_comp_list_depth++;
  2235. }
  2236. /**
  2237. * dp_process_ppdu_stats_sch_cmd_status_tlv: Process schedule command status tlv
  2238. * Here we are not going to process the buffer.
  2239. * @pdev: DP PDEV handle
  2240. * @ppdu_info: per ppdu tlv structure
  2241. *
  2242. * return:void
  2243. */
  2244. static void
  2245. dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev,
  2246. struct ppdu_info *ppdu_info)
  2247. {
  2248. struct cdp_tx_completion_ppdu *ppdu_desc;
  2249. struct dp_peer *peer;
  2250. uint8_t num_users;
  2251. uint8_t i;
  2252. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  2253. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  2254. qdf_nbuf_data(ppdu_info->nbuf);
  2255. num_users = ppdu_desc->bar_num_users;
  2256. for (i = 0; i < num_users; i++) {
  2257. if (ppdu_desc->user[i].user_pos == 0) {
  2258. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
  2259. /* update phy mode for bar frame */
  2260. ppdu_desc->phy_mode =
  2261. ppdu_desc->user[i].preamble;
  2262. ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs;
  2263. break;
  2264. }
  2265. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) {
  2266. ppdu_desc->frame_ctrl =
  2267. ppdu_desc->user[i].frame_ctrl;
  2268. break;
  2269. }
  2270. }
  2271. }
  2272. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
  2273. ppdu_desc->delayed_ba) {
  2274. qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
  2275. for (i = 0; i < ppdu_desc->num_users; i++) {
  2276. struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
  2277. uint64_t start_tsf;
  2278. uint64_t end_tsf;
  2279. uint32_t ppdu_id;
  2280. struct dp_mon_peer *mon_peer;
  2281. ppdu_id = ppdu_desc->ppdu_id;
  2282. peer = dp_peer_get_ref_by_id
  2283. (pdev->soc, ppdu_desc->user[i].peer_id,
  2284. DP_MOD_ID_TX_PPDU_STATS);
  2285. /**
  2286. * This check is to make sure peer is not deleted
  2287. * after processing the TLVs.
  2288. */
  2289. if (!peer)
  2290. continue;
  2291. mon_peer = peer->monitor_peer;
  2292. delay_ppdu = &mon_peer->delayed_ba_ppdu_stats;
  2293. start_tsf = ppdu_desc->ppdu_start_timestamp;
  2294. end_tsf = ppdu_desc->ppdu_end_timestamp;
  2295. /**
  2296. * save delayed ba user info
  2297. */
  2298. if (ppdu_desc->user[i].delayed_ba) {
  2299. dp_peer_copy_delay_stats(peer,
  2300. &ppdu_desc->user[i],
  2301. ppdu_id);
  2302. mon_peer->last_delayed_ba_ppduid = ppdu_id;
  2303. delay_ppdu->ppdu_start_timestamp = start_tsf;
  2304. delay_ppdu->ppdu_end_timestamp = end_tsf;
  2305. }
  2306. ppdu_desc->user[i].peer_last_delayed_ba =
  2307. mon_peer->last_delayed_ba;
  2308. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  2309. if (ppdu_desc->user[i].delayed_ba &&
  2310. !ppdu_desc->user[i].debug_copied) {
  2311. QDF_TRACE(QDF_MODULE_ID_TXRX,
  2312. QDF_TRACE_LEVEL_INFO_MED,
  2313. "%s: %d ppdu_id[%d] bar_ppdu_id[%d] num_users[%d] usr[%d] htt_frame_type[%d]\n",
  2314. __func__, __LINE__,
  2315. ppdu_desc->ppdu_id,
  2316. ppdu_desc->bar_ppdu_id,
  2317. ppdu_desc->num_users,
  2318. i,
  2319. ppdu_desc->htt_frame_type);
  2320. }
  2321. }
  2322. }
  2323. /*
  2324. * when frame type is BAR and STATS_COMMON_TLV is set
  2325. * copy the store peer delayed info to BAR status
  2326. */
  2327. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
  2328. for (i = 0; i < ppdu_desc->bar_num_users; i++) {
  2329. struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
  2330. uint64_t start_tsf;
  2331. uint64_t end_tsf;
  2332. struct dp_mon_peer *mon_peer;
  2333. peer = dp_peer_get_ref_by_id
  2334. (pdev->soc,
  2335. ppdu_desc->user[i].peer_id,
  2336. DP_MOD_ID_TX_PPDU_STATS);
  2337. /**
  2338. * This check is to make sure peer is not deleted
  2339. * after processing the TLVs.
  2340. */
  2341. if (!peer)
  2342. continue;
  2343. mon_peer = peer->monitor_peer;
  2344. if (ppdu_desc->user[i].completion_status !=
  2345. HTT_PPDU_STATS_USER_STATUS_OK) {
  2346. dp_peer_unref_delete(peer,
  2347. DP_MOD_ID_TX_PPDU_STATS);
  2348. continue;
  2349. }
  2350. delay_ppdu = &mon_peer->delayed_ba_ppdu_stats;
  2351. start_tsf = delay_ppdu->ppdu_start_timestamp;
  2352. end_tsf = delay_ppdu->ppdu_end_timestamp;
  2353. if (mon_peer->last_delayed_ba) {
  2354. dp_peer_copy_stats_to_bar(peer,
  2355. &ppdu_desc->user[i]);
  2356. ppdu_desc->ppdu_id =
  2357. mon_peer->last_delayed_ba_ppduid;
  2358. ppdu_desc->ppdu_start_timestamp = start_tsf;
  2359. ppdu_desc->ppdu_end_timestamp = end_tsf;
  2360. }
  2361. ppdu_desc->user[i].peer_last_delayed_ba =
  2362. mon_peer->last_delayed_ba;
  2363. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  2364. }
  2365. }
  2366. TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
  2367. mon_pdev->list_depth--;
  2368. TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info,
  2369. ppdu_info_list_elem);
  2370. mon_pdev->sched_comp_list_depth++;
  2371. }
  2372. /**
  2373. * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU
  2374. *
  2375. * If the TLV length sent as part of PPDU TLV is less that expected size i.e
  2376. * size of corresponding data structure, pad the remaining bytes with zeros
  2377. * and continue processing the TLVs
  2378. *
  2379. * @pdev: DP pdev handle
  2380. * @tag_buf: TLV buffer
  2381. * @tlv_expected_size: Expected size of Tag
  2382. * @tlv_len: TLV length received from FW
  2383. *
  2384. * Return: Pointer to updated TLV
  2385. */
  2386. static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev,
  2387. uint32_t *tag_buf,
  2388. uint16_t tlv_expected_size,
  2389. uint16_t tlv_len)
  2390. {
  2391. uint32_t *tlv_desc = tag_buf;
  2392. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  2393. qdf_assert_always(tlv_len != 0);
  2394. if (tlv_len < tlv_expected_size) {
  2395. qdf_mem_zero(mon_pdev->ppdu_tlv_buf, tlv_expected_size);
  2396. qdf_mem_copy(mon_pdev->ppdu_tlv_buf, tag_buf, tlv_len);
  2397. tlv_desc = mon_pdev->ppdu_tlv_buf;
  2398. }
  2399. return tlv_desc;
  2400. }
  2401. /**
  2402. * dp_process_ppdu_tag(): Function to process the PPDU TLVs
  2403. * @pdev: DP pdev handle
  2404. * @tag_buf: TLV buffer
  2405. * @tlv_len: length of tlv
  2406. * @ppdu_info: per ppdu tlv structure
  2407. *
  2408. * return: void
  2409. */
  2410. static void dp_process_ppdu_tag(struct dp_pdev *pdev,
  2411. uint32_t *tag_buf,
  2412. uint32_t tlv_len,
  2413. struct ppdu_info *ppdu_info)
  2414. {
  2415. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2416. uint16_t tlv_expected_size;
  2417. uint32_t *tlv_desc;
  2418. switch (tlv_type) {
  2419. case HTT_PPDU_STATS_COMMON_TLV:
  2420. tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv);
  2421. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  2422. tlv_expected_size, tlv_len);
  2423. dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info);
  2424. break;
  2425. case HTT_PPDU_STATS_USR_COMMON_TLV:
  2426. tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv);
  2427. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  2428. tlv_expected_size, tlv_len);
  2429. dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc,
  2430. ppdu_info);
  2431. break;
  2432. case HTT_PPDU_STATS_USR_RATE_TLV:
  2433. tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv);
  2434. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  2435. tlv_expected_size, tlv_len);
  2436. dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc,
  2437. ppdu_info);
  2438. break;
  2439. case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
  2440. tlv_expected_size =
  2441. sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv);
  2442. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  2443. tlv_expected_size, tlv_len);
  2444. dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
  2445. pdev, tlv_desc, ppdu_info);
  2446. break;
  2447. case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
  2448. tlv_expected_size =
  2449. sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv);
  2450. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  2451. tlv_expected_size, tlv_len);
  2452. dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
  2453. pdev, tlv_desc, ppdu_info);
  2454. break;
  2455. case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
  2456. tlv_expected_size =
  2457. sizeof(htt_ppdu_stats_user_cmpltn_common_tlv);
  2458. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  2459. tlv_expected_size, tlv_len);
  2460. dp_process_ppdu_stats_user_cmpltn_common_tlv(
  2461. pdev, tlv_desc, ppdu_info);
  2462. break;
  2463. case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
  2464. tlv_expected_size =
  2465. sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv);
  2466. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  2467. tlv_expected_size, tlv_len);
  2468. dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
  2469. pdev, tlv_desc, ppdu_info);
  2470. break;
  2471. case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
  2472. tlv_expected_size =
  2473. sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv);
  2474. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  2475. tlv_expected_size, tlv_len);
  2476. dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
  2477. pdev, tlv_desc, ppdu_info);
  2478. break;
  2479. case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
  2480. tlv_expected_size =
  2481. sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv);
  2482. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  2483. tlv_expected_size, tlv_len);
  2484. dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
  2485. pdev, tlv_desc, ppdu_info);
  2486. break;
  2487. case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
  2488. tlv_expected_size =
  2489. sizeof(htt_ppdu_stats_usr_common_array_tlv_v);
  2490. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  2491. tlv_expected_size, tlv_len);
  2492. dp_process_ppdu_stats_user_common_array_tlv(
  2493. pdev, tlv_desc, ppdu_info);
  2494. break;
  2495. case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
  2496. tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv);
  2497. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  2498. tlv_expected_size, tlv_len);
  2499. dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc,
  2500. ppdu_info);
  2501. break;
  2502. case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV:
  2503. dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info);
  2504. break;
  2505. default:
  2506. break;
  2507. }
  2508. }
  2509. #ifdef WLAN_ATF_ENABLE
  2510. static void
  2511. dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
  2512. struct cdp_tx_completion_ppdu *ppdu_desc,
  2513. struct cdp_tx_completion_ppdu_user *user)
  2514. {
  2515. uint32_t nss_ru_width_sum = 0;
  2516. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  2517. if (!pdev || !ppdu_desc || !user)
  2518. return;
  2519. if (!mon_pdev->dp_atf_stats_enable)
  2520. return;
  2521. if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA)
  2522. return;
  2523. nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum;
  2524. if (!nss_ru_width_sum)
  2525. nss_ru_width_sum = 1;
  2526. /**
  2527. * For SU-MIMO PPDU phy Tx time is same for the single user.
  2528. * For MU-MIMO phy Tx time is calculated per user as below
  2529. * user phy tx time =
  2530. * Entire PPDU duration * MU Ratio * OFDMA Ratio
  2531. * MU Ratio = usr_nss / Sum_of_nss_of_all_users
  2532. * OFDMA_ratio = usr_ru_width / Sum_of_ru_width_of_all_users
  2533. * usr_ru_widt = ru_end – ru_start + 1
  2534. */
  2535. if (ppdu_desc->htt_frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) {
  2536. user->phy_tx_time_us = ppdu_desc->phy_ppdu_tx_time_us;
  2537. } else {
  2538. user->phy_tx_time_us = (ppdu_desc->phy_ppdu_tx_time_us *
  2539. user->nss * user->ru_tones) / nss_ru_width_sum;
  2540. }
  2541. }
  2542. #else
  2543. static void
  2544. dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
  2545. struct cdp_tx_completion_ppdu *ppdu_desc,
  2546. struct cdp_tx_completion_ppdu_user *user)
  2547. {
  2548. }
  2549. #endif
  2550. /**
  2551. * dp_ppdu_desc_user_stats_update(): Function to update TX user stats
  2552. * @pdev: DP pdev handle
  2553. * @ppdu_info: per PPDU TLV descriptor
  2554. *
  2555. * return: void
  2556. */
  2557. void
  2558. dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
  2559. struct ppdu_info *ppdu_info)
  2560. {
  2561. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  2562. struct dp_peer *peer = NULL;
  2563. uint32_t tlv_bitmap_expected;
  2564. uint32_t tlv_bitmap_default;
  2565. uint16_t i;
  2566. uint32_t num_users;
  2567. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  2568. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  2569. qdf_nbuf_data(ppdu_info->nbuf);
  2570. if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR)
  2571. ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
  2572. tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
  2573. if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode ||
  2574. mon_pdev->tx_capture_enabled) {
  2575. if (ppdu_info->is_ampdu)
  2576. tlv_bitmap_expected =
  2577. dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
  2578. ppdu_info->tlv_bitmap);
  2579. }
  2580. tlv_bitmap_default = tlv_bitmap_expected;
  2581. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
  2582. num_users = ppdu_desc->bar_num_users;
  2583. ppdu_desc->num_users = ppdu_desc->bar_num_users;
  2584. } else {
  2585. num_users = ppdu_desc->num_users;
  2586. }
  2587. qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
  2588. for (i = 0; i < num_users; i++) {
  2589. ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
  2590. ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
  2591. peer = dp_peer_get_ref_by_id(pdev->soc,
  2592. ppdu_desc->user[i].peer_id,
  2593. DP_MOD_ID_TX_PPDU_STATS);
  2594. /**
  2595. * This check is to make sure peer is not deleted
  2596. * after processing the TLVs.
  2597. */
  2598. if (!peer)
  2599. continue;
  2600. ppdu_desc->user[i].is_bss_peer = peer->bss_peer;
  2601. /*
  2602. * different frame like DATA, BAR or CTRL has different
  2603. * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we
  2604. * receive other tlv in-order/sequential from fw.
  2605. * Since ACK_BA_STATUS TLV come from Hardware it is
  2606. * asynchronous So we need to depend on some tlv to confirm
  2607. * all tlv is received for a ppdu.
  2608. * So we depend on both SCHED_CMD_STATUS_TLV and
  2609. * ACK_BA_STATUS_TLV. for failure packet we won't get
  2610. * ACK_BA_STATUS_TLV.
  2611. */
  2612. if (!(ppdu_info->tlv_bitmap &
  2613. (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) ||
  2614. (!(ppdu_info->tlv_bitmap &
  2615. (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) &&
  2616. (ppdu_desc->user[i].completion_status ==
  2617. HTT_PPDU_STATS_USER_STATUS_OK))) {
  2618. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  2619. continue;
  2620. }
  2621. /**
  2622. * Update tx stats for data frames having Qos as well as
  2623. * non-Qos data tid
  2624. */
  2625. if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
  2626. (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) ||
  2627. (ppdu_desc->htt_frame_type ==
  2628. HTT_STATS_FTYPE_SGEN_QOS_NULL) ||
  2629. ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) &&
  2630. (ppdu_desc->num_mpdu > 1))) &&
  2631. (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) {
  2632. dp_tx_stats_update(pdev, peer,
  2633. &ppdu_desc->user[i],
  2634. ppdu_desc->ack_rssi);
  2635. dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]);
  2636. }
  2637. dp_ppdu_desc_user_phy_tx_time_update(pdev, ppdu_desc,
  2638. &ppdu_desc->user[i]);
  2639. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  2640. tlv_bitmap_expected = tlv_bitmap_default;
  2641. }
  2642. }
  2643. #ifndef WLAN_TX_PKT_CAPTURE_ENH
  2644. /**
  2645. * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
  2646. * to upper layer
  2647. * @pdev: DP pdev handle
  2648. * @ppdu_info: per PPDU TLV descriptor
  2649. *
  2650. * return: void
  2651. */
  2652. static
  2653. void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
  2654. struct ppdu_info *ppdu_info)
  2655. {
  2656. struct ppdu_info *s_ppdu_info = NULL;
  2657. struct ppdu_info *ppdu_info_next = NULL;
  2658. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  2659. qdf_nbuf_t nbuf;
  2660. uint32_t time_delta = 0;
  2661. bool starved = 0;
  2662. bool matched = 0;
  2663. bool recv_ack_ba_done = 0;
  2664. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  2665. if (ppdu_info->tlv_bitmap &
  2666. (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
  2667. ppdu_info->done)
  2668. recv_ack_ba_done = 1;
  2669. mon_pdev->last_sched_cmdid = ppdu_info->sched_cmdid;
  2670. s_ppdu_info = TAILQ_FIRST(&mon_pdev->sched_comp_ppdu_list);
  2671. TAILQ_FOREACH_SAFE(s_ppdu_info, &mon_pdev->sched_comp_ppdu_list,
  2672. ppdu_info_list_elem, ppdu_info_next) {
  2673. if (s_ppdu_info->tsf_l32 > ppdu_info->tsf_l32)
  2674. time_delta = (MAX_TSF_32 - s_ppdu_info->tsf_l32) +
  2675. ppdu_info->tsf_l32;
  2676. else
  2677. time_delta = ppdu_info->tsf_l32 - s_ppdu_info->tsf_l32;
  2678. if (!s_ppdu_info->done && !recv_ack_ba_done) {
  2679. if (time_delta < MAX_SCHED_STARVE) {
  2680. dp_mon_info("pdev[%d] ppdu_id[%d] sched_cmdid[%d] TLV_B[0x%x] TSF[%u] D[%d]",
  2681. pdev->pdev_id,
  2682. s_ppdu_info->ppdu_id,
  2683. s_ppdu_info->sched_cmdid,
  2684. s_ppdu_info->tlv_bitmap,
  2685. s_ppdu_info->tsf_l32,
  2686. s_ppdu_info->done);
  2687. break;
  2688. }
  2689. starved = 1;
  2690. }
  2691. mon_pdev->delivered_sched_cmdid = s_ppdu_info->sched_cmdid;
  2692. TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list, s_ppdu_info,
  2693. ppdu_info_list_elem);
  2694. mon_pdev->sched_comp_list_depth--;
  2695. nbuf = s_ppdu_info->nbuf;
  2696. qdf_assert_always(nbuf);
  2697. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  2698. qdf_nbuf_data(nbuf);
  2699. ppdu_desc->tlv_bitmap = s_ppdu_info->tlv_bitmap;
  2700. if (starved) {
  2701. dp_mon_err("ppdu starved fc[0x%x] h_ftype[%d] tlv_bitmap[0x%x] cs[%d]\n",
  2702. ppdu_desc->frame_ctrl,
  2703. ppdu_desc->htt_frame_type,
  2704. ppdu_desc->tlv_bitmap,
  2705. ppdu_desc->user[0].completion_status);
  2706. starved = 0;
  2707. }
  2708. if (ppdu_info->ppdu_id == s_ppdu_info->ppdu_id &&
  2709. ppdu_info->sched_cmdid == s_ppdu_info->sched_cmdid)
  2710. matched = 1;
  2711. dp_ppdu_desc_user_stats_update(pdev, s_ppdu_info);
  2712. qdf_mem_free(s_ppdu_info);
  2713. /**
  2714. * Deliver PPDU stats only for valid (acked) data
  2715. * frames if sniffer mode is not enabled.
  2716. * If sniffer mode is enabled, PPDU stats
  2717. * for all frames including mgmt/control
  2718. * frames should be delivered to upper layer
  2719. */
  2720. if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode) {
  2721. dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
  2722. pdev->soc,
  2723. nbuf, HTT_INVALID_PEER,
  2724. WDI_NO_VAL,
  2725. pdev->pdev_id);
  2726. } else {
  2727. if ((ppdu_desc->num_mpdu != 0 ||
  2728. ppdu_desc->delayed_ba) &&
  2729. ppdu_desc->num_users != 0 &&
  2730. ((ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) ||
  2731. ((ppdu_desc->htt_frame_type ==
  2732. HTT_STATS_FTYPE_SGEN_MU_BAR) ||
  2733. (ppdu_desc->htt_frame_type ==
  2734. HTT_STATS_FTYPE_SGEN_BAR)))) {
  2735. dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
  2736. pdev->soc,
  2737. nbuf, HTT_INVALID_PEER,
  2738. WDI_NO_VAL,
  2739. pdev->pdev_id);
  2740. } else {
  2741. qdf_nbuf_free(nbuf);
  2742. }
  2743. }
  2744. if (matched)
  2745. break;
  2746. }
  2747. }
  2748. #endif
  2749. /**
  2750. * dp_get_ppdu_desc(): Function to allocate new PPDU status
  2751. * desc for new ppdu id
  2752. * @pdev: DP pdev handle
  2753. * @ppdu_id: PPDU unique identifier
  2754. * @tlv_type: TLV type received
  2755. * @tsf_l32: timestamp received along with ppdu stats indication header
  2756. * @max_users: Maximum user for that particular ppdu
  2757. *
  2758. * return: ppdu_info per ppdu tlv structure
  2759. */
  2760. static
  2761. struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
  2762. uint8_t tlv_type, uint32_t tsf_l32,
  2763. uint8_t max_users)
  2764. {
  2765. struct ppdu_info *ppdu_info = NULL;
  2766. struct ppdu_info *s_ppdu_info = NULL;
  2767. struct ppdu_info *ppdu_info_next = NULL;
  2768. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  2769. uint32_t size = 0;
  2770. struct cdp_tx_completion_ppdu *tmp_ppdu_desc = NULL;
  2771. struct cdp_tx_completion_ppdu_user *tmp_user;
  2772. uint32_t time_delta;
  2773. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  2774. /*
  2775. * Find ppdu_id node exists or not
  2776. */
  2777. TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list,
  2778. ppdu_info_list_elem, ppdu_info_next) {
  2779. if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
  2780. if (ppdu_info->tsf_l32 > tsf_l32)
  2781. time_delta = (MAX_TSF_32 -
  2782. ppdu_info->tsf_l32) + tsf_l32;
  2783. else
  2784. time_delta = tsf_l32 - ppdu_info->tsf_l32;
  2785. if (time_delta > WRAP_DROP_TSF_DELTA) {
  2786. TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
  2787. ppdu_info, ppdu_info_list_elem);
  2788. mon_pdev->list_depth--;
  2789. pdev->stats.ppdu_wrap_drop++;
  2790. tmp_ppdu_desc =
  2791. (struct cdp_tx_completion_ppdu *)
  2792. qdf_nbuf_data(ppdu_info->nbuf);
  2793. tmp_user = &tmp_ppdu_desc->user[0];
  2794. dp_htt_tx_stats_info("S_PID [%d] S_TSF[%u] TLV_BITMAP[0x%x] [CMPLTN - %d ACK_BA - %d] CS[%d] - R_PID[%d] R_TSF[%u] R_TLV_TAG[0x%x]\n",
  2795. ppdu_info->ppdu_id,
  2796. ppdu_info->tsf_l32,
  2797. ppdu_info->tlv_bitmap,
  2798. tmp_user->completion_status,
  2799. ppdu_info->compltn_common_tlv,
  2800. ppdu_info->ack_ba_tlv,
  2801. ppdu_id, tsf_l32,
  2802. tlv_type);
  2803. qdf_nbuf_free(ppdu_info->nbuf);
  2804. ppdu_info->nbuf = NULL;
  2805. qdf_mem_free(ppdu_info);
  2806. } else {
  2807. break;
  2808. }
  2809. }
  2810. }
  2811. /*
  2812. * check if it is ack ba tlv and if it is not there in ppdu info
  2813. * list then check it in sched completion ppdu list
  2814. */
  2815. if (!ppdu_info &&
  2816. tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) {
  2817. TAILQ_FOREACH(s_ppdu_info,
  2818. &mon_pdev->sched_comp_ppdu_list,
  2819. ppdu_info_list_elem) {
  2820. if (s_ppdu_info && (s_ppdu_info->ppdu_id == ppdu_id)) {
  2821. if (s_ppdu_info->tsf_l32 > tsf_l32)
  2822. time_delta = (MAX_TSF_32 -
  2823. s_ppdu_info->tsf_l32) +
  2824. tsf_l32;
  2825. else
  2826. time_delta = tsf_l32 -
  2827. s_ppdu_info->tsf_l32;
  2828. if (time_delta < WRAP_DROP_TSF_DELTA) {
  2829. ppdu_info = s_ppdu_info;
  2830. break;
  2831. }
  2832. } else {
  2833. /*
  2834. * ACK BA STATUS TLV comes sequential order
  2835. * if we received ack ba status tlv for second
  2836. * ppdu and first ppdu is still waiting for
  2837. * ACK BA STATUS TLV. Based on fw comment
  2838. * we won't receive it tlv later. So we can
  2839. * set ppdu info done.
  2840. */
  2841. if (s_ppdu_info)
  2842. s_ppdu_info->done = 1;
  2843. }
  2844. }
  2845. }
  2846. if (ppdu_info) {
  2847. if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
  2848. /**
  2849. * if we get tlv_type that is already been processed
  2850. * for ppdu, that means we got a new ppdu with same
  2851. * ppdu id. Hence Flush the older ppdu
  2852. * for MUMIMO and OFDMA, In a PPDU we have
  2853. * multiple user with same tlv types. tlv bitmap is
  2854. * used to check whether SU or MU_MIMO/OFDMA
  2855. */
  2856. if (!(ppdu_info->tlv_bitmap &
  2857. (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
  2858. return ppdu_info;
  2859. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  2860. qdf_nbuf_data(ppdu_info->nbuf);
  2861. /**
  2862. * apart from ACK BA STATUS TLV rest all comes in order
  2863. * so if tlv type not ACK BA STATUS TLV we can deliver
  2864. * ppdu_info
  2865. */
  2866. if ((tlv_type ==
  2867. HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
  2868. (ppdu_desc->htt_frame_type ==
  2869. HTT_STATS_FTYPE_SGEN_MU_BAR ||
  2870. ppdu_desc->htt_frame_type ==
  2871. HTT_STATS_FTYPE_SGEN_BAR))
  2872. return ppdu_info;
  2873. dp_ppdu_desc_deliver(pdev, ppdu_info);
  2874. } else {
  2875. return ppdu_info;
  2876. }
  2877. }
  2878. /**
  2879. * Flush the head ppdu descriptor if ppdu desc list reaches max
  2880. * threshold
  2881. */
  2882. if (mon_pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
  2883. ppdu_info = TAILQ_FIRST(&mon_pdev->ppdu_info_list);
  2884. TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
  2885. ppdu_info, ppdu_info_list_elem);
  2886. mon_pdev->list_depth--;
  2887. pdev->stats.ppdu_drop++;
  2888. qdf_nbuf_free(ppdu_info->nbuf);
  2889. ppdu_info->nbuf = NULL;
  2890. qdf_mem_free(ppdu_info);
  2891. }
  2892. size = sizeof(struct cdp_tx_completion_ppdu) +
  2893. (max_users * sizeof(struct cdp_tx_completion_ppdu_user));
  2894. /*
  2895. * Allocate new ppdu_info node
  2896. */
  2897. ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
  2898. if (!ppdu_info)
  2899. return NULL;
  2900. ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, size,
  2901. 0, 4, TRUE);
  2902. if (!ppdu_info->nbuf) {
  2903. qdf_mem_free(ppdu_info);
  2904. return NULL;
  2905. }
  2906. ppdu_info->ppdu_desc =
  2907. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2908. qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), size);
  2909. if (qdf_nbuf_put_tail(ppdu_info->nbuf, size) == NULL) {
  2910. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2911. "No tailroom for HTT PPDU");
  2912. qdf_nbuf_free(ppdu_info->nbuf);
  2913. ppdu_info->nbuf = NULL;
  2914. ppdu_info->last_user = 0;
  2915. qdf_mem_free(ppdu_info);
  2916. return NULL;
  2917. }
  2918. ppdu_info->ppdu_desc->max_users = max_users;
  2919. ppdu_info->tsf_l32 = tsf_l32;
  2920. /**
  2921. * No lock is needed because all PPDU TLVs are processed in
  2922. * same context and this list is updated in same context
  2923. */
  2924. TAILQ_INSERT_TAIL(&mon_pdev->ppdu_info_list, ppdu_info,
  2925. ppdu_info_list_elem);
  2926. mon_pdev->list_depth++;
  2927. return ppdu_info;
  2928. }
  2929. /**
  2930. * dp_htt_process_tlv(): Function to process each PPDU TLVs
  2931. * @pdev: DP pdev handle
  2932. * @htt_t2h_msg: HTT target to host message
  2933. *
  2934. * return: ppdu_info per ppdu tlv structure
  2935. */
  2936. static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
  2937. qdf_nbuf_t htt_t2h_msg)
  2938. {
  2939. uint32_t length;
  2940. uint32_t ppdu_id;
  2941. uint8_t tlv_type;
  2942. uint32_t tlv_length, tlv_bitmap_expected;
  2943. uint8_t *tlv_buf;
  2944. struct ppdu_info *ppdu_info = NULL;
  2945. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  2946. uint8_t max_users = CDP_MU_MAX_USERS;
  2947. uint32_t tsf_l32;
  2948. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  2949. uint32_t *msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
  2950. length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
  2951. msg_word = msg_word + 1;
  2952. ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
  2953. msg_word = msg_word + 1;
  2954. tsf_l32 = (uint32_t)(*msg_word);
  2955. msg_word = msg_word + 2;
  2956. while (length > 0) {
  2957. tlv_buf = (uint8_t *)msg_word;
  2958. tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
  2959. tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
  2960. if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
  2961. pdev->stats.ppdu_stats_counter[tlv_type]++;
  2962. if (tlv_length == 0)
  2963. break;
  2964. tlv_length += HTT_TLV_HDR_LEN;
  2965. /**
  2966. * Not allocating separate ppdu descriptor for MGMT Payload
  2967. * TLV as this is sent as separate WDI indication and it
  2968. * doesn't contain any ppdu information
  2969. */
  2970. if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
  2971. mon_pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
  2972. mon_pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
  2973. mon_pdev->mgmtctrl_frm_info.mgmt_buf_len =
  2974. HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET
  2975. (*(msg_word + 1));
  2976. msg_word =
  2977. (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
  2978. length -= (tlv_length);
  2979. continue;
  2980. }
  2981. /*
  2982. * retrieve max_users if it's USERS_INFO,
  2983. * else, it's 1 for COMPLTN_FLUSH,
  2984. * else, use CDP_MU_MAX_USERS
  2985. */
  2986. if (tlv_type == HTT_PPDU_STATS_USERS_INFO_TLV) {
  2987. max_users =
  2988. HTT_PPDU_STATS_USERS_INFO_TLV_MAX_USERS_GET(*(msg_word + 1));
  2989. } else if (tlv_type == HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) {
  2990. max_users = 1;
  2991. }
  2992. ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type,
  2993. tsf_l32, max_users);
  2994. if (!ppdu_info)
  2995. return NULL;
  2996. ppdu_info->ppdu_id = ppdu_id;
  2997. ppdu_info->tlv_bitmap |= (1 << tlv_type);
  2998. dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
  2999. /**
  3000. * Increment pdev level tlv count to monitor
  3001. * missing TLVs
  3002. */
  3003. mon_pdev->tlv_count++;
  3004. ppdu_info->last_tlv_cnt = mon_pdev->tlv_count;
  3005. msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
  3006. length -= (tlv_length);
  3007. }
  3008. if (!ppdu_info)
  3009. return NULL;
  3010. mon_pdev->last_ppdu_id = ppdu_id;
  3011. tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
  3012. if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode ||
  3013. mon_pdev->tx_capture_enabled) {
  3014. if (ppdu_info->is_ampdu)
  3015. tlv_bitmap_expected =
  3016. dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
  3017. ppdu_info->tlv_bitmap);
  3018. }
  3019. ppdu_desc = ppdu_info->ppdu_desc;
  3020. if (!ppdu_desc)
  3021. return NULL;
  3022. if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
  3023. HTT_PPDU_STATS_USER_STATUS_OK) {
  3024. tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
  3025. }
  3026. /*
  3027. * for frame type DATA and BAR, we update stats based on MSDU,
  3028. * successful msdu and mpdu are populate from ACK BA STATUS TLV
  3029. * which comes out of order. successful mpdu also populated from
  3030. * COMPLTN COMMON TLV which comes in order. for every ppdu_info
  3031. * we store successful mpdu from both tlv and compare before delivering
  3032. * to make sure we received ACK BA STATUS TLV. For some self generated
  3033. * frame we won't get ack ba status tlv so no need to wait for
  3034. * ack ba status tlv.
  3035. */
  3036. if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL &&
  3037. ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) {
  3038. /*
  3039. * most of the time bar frame will have duplicate ack ba
  3040. * status tlv
  3041. */
  3042. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR &&
  3043. (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv))
  3044. return NULL;
  3045. /*
  3046. * For data frame, compltn common tlv should match ack ba status
  3047. * tlv and completion status. Reason we are checking first user
  3048. * for ofdma, completion seen at next MU BAR frm, for mimo
  3049. * only for first user completion will be immediate.
  3050. */
  3051. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
  3052. (ppdu_desc->user[0].completion_status == 0 &&
  3053. (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)))
  3054. return NULL;
  3055. }
  3056. /**
  3057. * Once all the TLVs for a given PPDU has been processed,
  3058. * return PPDU status to be delivered to higher layer.
  3059. * tlv_bitmap_expected can't be available for different frame type.
  3060. * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu.
  3061. * apart from ACK BA TLV, FW sends other TLV in sequential order.
  3062. * flush tlv comes separate.
  3063. */
  3064. if ((ppdu_info->tlv_bitmap != 0 &&
  3065. (ppdu_info->tlv_bitmap &
  3066. (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) ||
  3067. (ppdu_info->tlv_bitmap &
  3068. (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) {
  3069. ppdu_info->done = 1;
  3070. return ppdu_info;
  3071. }
  3072. return NULL;
  3073. }
  3074. #else
  3075. void
  3076. dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
  3077. struct ppdu_info *ppdu_info)
  3078. {
  3079. }
  3080. #endif /* QCA_ENHANCED_STATS_SUPPORT */
  3081. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  3082. static void dp_htt_process_smu_ppdu_stats_tlv(struct dp_soc *soc,
  3083. qdf_nbuf_t htt_t2h_msg)
  3084. {
  3085. uint32_t length;
  3086. uint8_t tlv_type;
  3087. uint32_t tlv_length, tlv_expected_size;
  3088. uint8_t *tlv_buf;
  3089. uint32_t *msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
  3090. length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
  3091. msg_word = msg_word + 4;
  3092. while (length > 0) {
  3093. tlv_buf = (uint8_t *)msg_word;
  3094. tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
  3095. tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
  3096. if (tlv_length == 0)
  3097. break;
  3098. tlv_length += HTT_TLV_HDR_LEN;
  3099. if (tlv_type == HTT_PPDU_STATS_FOR_SMU_TLV) {
  3100. tlv_expected_size = sizeof(htt_ppdu_stats_for_smu_tlv);
  3101. if (tlv_length >= tlv_expected_size)
  3102. dp_wdi_event_handler(
  3103. WDI_EVENT_PKT_CAPTURE_PPDU_STATS,
  3104. soc, msg_word, HTT_INVALID_VDEV,
  3105. WDI_NO_VAL, 0);
  3106. }
  3107. msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
  3108. length -= (tlv_length);
  3109. }
  3110. }
  3111. #endif
  3112. /**
  3113. * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
  3114. * @soc: DP SOC handle
  3115. * @pdev_id: pdev id
  3116. * @htt_t2h_msg: HTT message nbuf
  3117. *
  3118. * return:void
  3119. */
  3120. #if defined(WDI_EVENT_ENABLE)
  3121. #ifdef QCA_ENHANCED_STATS_SUPPORT
  3122. static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
  3123. uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
  3124. {
  3125. struct dp_pdev *pdev = soc->pdev_list[pdev_id];
  3126. struct ppdu_info *ppdu_info = NULL;
  3127. bool free_buf = true;
  3128. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3129. if (pdev_id >= MAX_PDEV_CNT)
  3130. return true;
  3131. pdev = soc->pdev_list[pdev_id];
  3132. if (!pdev)
  3133. return true;
  3134. if (!mon_pdev->enhanced_stats_en && !mon_pdev->tx_sniffer_enable &&
  3135. !mon_pdev->mcopy_mode && !mon_pdev->bpr_enable)
  3136. return free_buf;
  3137. qdf_spin_lock_bh(&mon_pdev->ppdu_stats_lock);
  3138. ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
  3139. if (mon_pdev->mgmtctrl_frm_info.mgmt_buf) {
  3140. if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
  3141. (pdev, htt_t2h_msg, mon_pdev->mgmtctrl_frm_info.ppdu_id) !=
  3142. QDF_STATUS_SUCCESS)
  3143. free_buf = false;
  3144. }
  3145. if (ppdu_info)
  3146. dp_ppdu_desc_deliver(pdev, ppdu_info);
  3147. mon_pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
  3148. mon_pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
  3149. mon_pdev->mgmtctrl_frm_info.ppdu_id = 0;
  3150. qdf_spin_unlock_bh(&mon_pdev->ppdu_stats_lock);
  3151. return free_buf;
  3152. }
  3153. #elif defined(WLAN_FEATURE_PKT_CAPTURE_V2)
  3154. static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
  3155. uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
  3156. {
  3157. if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
  3158. dp_htt_process_smu_ppdu_stats_tlv(soc, htt_t2h_msg);
  3159. return true;
  3160. }
  3161. #else
  3162. static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
  3163. uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
  3164. {
  3165. return true;
  3166. }
  3167. #endif/* QCA_ENHANCED_STATS_SUPPORT */
  3168. #endif
  3169. #if defined(WDI_EVENT_ENABLE) &&\
  3170. (defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
  3171. /*
  3172. * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
  3173. * @htt_soc: HTT SOC handle
  3174. * @msg_word: Pointer to payload
  3175. * @htt_t2h_msg: HTT msg nbuf
  3176. *
  3177. * Return: True if buffer should be freed by caller.
  3178. */
  3179. static bool
  3180. dp_ppdu_stats_ind_handler(struct htt_soc *soc,
  3181. uint32_t *msg_word,
  3182. qdf_nbuf_t htt_t2h_msg)
  3183. {
  3184. u_int8_t pdev_id;
  3185. u_int8_t target_pdev_id;
  3186. bool free_buf;
  3187. target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
  3188. pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
  3189. target_pdev_id);
  3190. dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
  3191. htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
  3192. pdev_id);
  3193. free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
  3194. htt_t2h_msg);
  3195. return free_buf;
  3196. }
  3197. #endif
  3198. /*
  3199. * dp_htt_ppdu_stats_attach() - attach resources for HTT PPDU stats processing
  3200. * @pdev: Datapath PDEV handle
  3201. *
  3202. * Return: QDF_STATUS_SUCCESS: Success
  3203. * QDF_STATUS_E_NOMEM: Error
  3204. */
  3205. static QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
  3206. {
  3207. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3208. mon_pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
  3209. if (!mon_pdev->ppdu_tlv_buf) {
  3210. QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
  3211. return QDF_STATUS_E_NOMEM;
  3212. }
  3213. return QDF_STATUS_SUCCESS;
  3214. }
  3215. /*
  3216. * dp_htt_ppdu_stats_detach() - detach stats resources
  3217. * @pdev: Datapath PDEV handle
  3218. *
  3219. * Return: void
  3220. */
  3221. static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
  3222. {
  3223. struct ppdu_info *ppdu_info, *ppdu_info_next;
  3224. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3225. TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list,
  3226. ppdu_info_list_elem, ppdu_info_next) {
  3227. if (!ppdu_info)
  3228. break;
  3229. TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
  3230. ppdu_info, ppdu_info_list_elem);
  3231. mon_pdev->list_depth--;
  3232. qdf_assert_always(ppdu_info->nbuf);
  3233. qdf_nbuf_free(ppdu_info->nbuf);
  3234. qdf_mem_free(ppdu_info);
  3235. }
  3236. TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->sched_comp_ppdu_list,
  3237. ppdu_info_list_elem, ppdu_info_next) {
  3238. if (!ppdu_info)
  3239. break;
  3240. TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list,
  3241. ppdu_info, ppdu_info_list_elem);
  3242. mon_pdev->sched_comp_list_depth--;
  3243. qdf_assert_always(ppdu_info->nbuf);
  3244. qdf_nbuf_free(ppdu_info->nbuf);
  3245. qdf_mem_free(ppdu_info);
  3246. }
  3247. if (mon_pdev->ppdu_tlv_buf)
  3248. qdf_mem_free(mon_pdev->ppdu_tlv_buf);
  3249. }
  3250. static void
  3251. dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
  3252. {
  3253. struct cdp_pdev_mon_stats *rx_mon_stats;
  3254. uint32_t *stat_ring_ppdu_ids;
  3255. uint32_t *dest_ring_ppdu_ids;
  3256. int i, idx;
  3257. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3258. rx_mon_stats = &mon_pdev->rx_mon_stats;
  3259. DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
  3260. DP_PRINT_STATS("status_ppdu_compl_cnt = %d",
  3261. rx_mon_stats->status_ppdu_compl);
  3262. DP_PRINT_STATS("status_ppdu_start_cnt = %d",
  3263. rx_mon_stats->status_ppdu_start);
  3264. DP_PRINT_STATS("status_ppdu_end_cnt = %d",
  3265. rx_mon_stats->status_ppdu_end);
  3266. DP_PRINT_STATS("status_ppdu_start_mis_cnt = %d",
  3267. rx_mon_stats->status_ppdu_start_mis);
  3268. DP_PRINT_STATS("status_ppdu_end_mis_cnt = %d",
  3269. rx_mon_stats->status_ppdu_end_mis);
  3270. DP_PRINT_STATS("status_ppdu_done_cnt = %d",
  3271. rx_mon_stats->status_ppdu_done);
  3272. DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
  3273. rx_mon_stats->dest_ppdu_done);
  3274. DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
  3275. rx_mon_stats->dest_mpdu_done);
  3276. DP_PRINT_STATS("tlv_tag_status_err_cnt = %u",
  3277. rx_mon_stats->tlv_tag_status_err);
  3278. DP_PRINT_STATS("mon status DMA not done WAR count= %u",
  3279. rx_mon_stats->status_buf_done_war);
  3280. DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
  3281. rx_mon_stats->dest_mpdu_drop);
  3282. DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d",
  3283. rx_mon_stats->dup_mon_linkdesc_cnt);
  3284. DP_PRINT_STATS("dup_mon_buf_cnt = %d",
  3285. rx_mon_stats->dup_mon_buf_cnt);
  3286. DP_PRINT_STATS("mon_rx_buf_reaped = %u",
  3287. rx_mon_stats->mon_rx_bufs_reaped_dest);
  3288. DP_PRINT_STATS("mon_rx_buf_replenished = %u",
  3289. rx_mon_stats->mon_rx_bufs_replenished_dest);
  3290. DP_PRINT_STATS("ppdu_id_mismatch = %u",
  3291. rx_mon_stats->ppdu_id_mismatch);
  3292. DP_PRINT_STATS("mpdu_ppdu_id_match_cnt = %d",
  3293. rx_mon_stats->ppdu_id_match);
  3294. DP_PRINT_STATS("ppdus dropped frm status ring = %d",
  3295. rx_mon_stats->status_ppdu_drop);
  3296. DP_PRINT_STATS("ppdus dropped frm dest ring = %d",
  3297. rx_mon_stats->dest_ppdu_drop);
  3298. stat_ring_ppdu_ids =
  3299. (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST);
  3300. dest_ring_ppdu_ids =
  3301. (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST);
  3302. if (!stat_ring_ppdu_ids || !dest_ring_ppdu_ids)
  3303. DP_PRINT_STATS("Unable to allocate ppdu id hist mem\n");
  3304. qdf_spin_lock_bh(&mon_pdev->mon_lock);
  3305. idx = rx_mon_stats->ppdu_id_hist_idx;
  3306. qdf_mem_copy(stat_ring_ppdu_ids,
  3307. rx_mon_stats->stat_ring_ppdu_id_hist,
  3308. sizeof(uint32_t) * MAX_PPDU_ID_HIST);
  3309. qdf_mem_copy(dest_ring_ppdu_ids,
  3310. rx_mon_stats->dest_ring_ppdu_id_hist,
  3311. sizeof(uint32_t) * MAX_PPDU_ID_HIST);
  3312. qdf_spin_unlock_bh(&mon_pdev->mon_lock);
  3313. DP_PRINT_STATS("PPDU Id history:");
  3314. DP_PRINT_STATS("stat_ring_ppdu_ids\t dest_ring_ppdu_ids");
  3315. for (i = 0; i < MAX_PPDU_ID_HIST; i++) {
  3316. idx = (idx + 1) & (MAX_PPDU_ID_HIST - 1);
  3317. DP_PRINT_STATS("%*u\t%*u", 16,
  3318. rx_mon_stats->stat_ring_ppdu_id_hist[idx], 16,
  3319. rx_mon_stats->dest_ring_ppdu_id_hist[idx]);
  3320. }
  3321. qdf_mem_free(stat_ring_ppdu_ids);
  3322. qdf_mem_free(dest_ring_ppdu_ids);
  3323. DP_PRINT_STATS("mon_rx_dest_stuck = %d",
  3324. rx_mon_stats->mon_rx_dest_stuck);
  3325. }
  3326. /*
  3327. *dp_set_bpr_enable() - API to enable/disable bpr feature
  3328. *@pdev_handle: DP_PDEV handle.
  3329. *@val: Provided value.
  3330. *
  3331. *Return: 0 for success. nonzero for failure.
  3332. */
  3333. #ifdef QCA_SUPPORT_BPR
  3334. static QDF_STATUS
  3335. dp_set_bpr_enable(struct dp_pdev *pdev, int val)
  3336. {
  3337. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3338. switch (val) {
  3339. case CDP_BPR_DISABLE:
  3340. mon_pdev->bpr_enable = CDP_BPR_DISABLE;
  3341. if (!mon_pdev->pktlog_ppdu_stats &&
  3342. !mon_pdev->enhanced_stats_en &&
  3343. !mon_pdev->tx_sniffer_enable && !mon_pdev->mcopy_mode) {
  3344. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  3345. } else if (mon_pdev->enhanced_stats_en &&
  3346. !mon_pdev->tx_sniffer_enable &&
  3347. !mon_pdev->mcopy_mode &&
  3348. !mon_pdev->pktlog_ppdu_stats) {
  3349. dp_h2t_cfg_stats_msg_send(pdev,
  3350. DP_PPDU_STATS_CFG_ENH_STATS,
  3351. pdev->pdev_id);
  3352. }
  3353. break;
  3354. case CDP_BPR_ENABLE:
  3355. mon_pdev->bpr_enable = CDP_BPR_ENABLE;
  3356. if (!mon_pdev->enhanced_stats_en &&
  3357. !mon_pdev->tx_sniffer_enable &&
  3358. !mon_pdev->mcopy_mode && !mon_pdev->pktlog_ppdu_stats) {
  3359. dp_h2t_cfg_stats_msg_send(pdev,
  3360. DP_PPDU_STATS_CFG_BPR,
  3361. pdev->pdev_id);
  3362. } else if (mon_pdev->enhanced_stats_en &&
  3363. !mon_pdev->tx_sniffer_enable &&
  3364. !mon_pdev->mcopy_mode &&
  3365. !mon_pdev->pktlog_ppdu_stats) {
  3366. dp_h2t_cfg_stats_msg_send(pdev,
  3367. DP_PPDU_STATS_CFG_BPR_ENH,
  3368. pdev->pdev_id);
  3369. } else if (mon_pdev->pktlog_ppdu_stats) {
  3370. dp_h2t_cfg_stats_msg_send(pdev,
  3371. DP_PPDU_STATS_CFG_BPR_PKTLOG,
  3372. pdev->pdev_id);
  3373. }
  3374. break;
  3375. default:
  3376. break;
  3377. }
  3378. return QDF_STATUS_SUCCESS;
  3379. }
  3380. #endif
  3381. #ifdef ATH_SUPPORT_NAC
  3382. /*
  3383. * dp_set_filter_neigh_peers() - set filter neighbour peers for smart mesh
  3384. * @pdev_handle: device object
  3385. * @val: value to be set
  3386. *
  3387. * Return: void
  3388. */
  3389. static int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
  3390. bool val)
  3391. {
  3392. /* Enable/Disable smart mesh filtering. This flag will be checked
  3393. * during rx processing to check if packets are from NAC clients.
  3394. */
  3395. pdev->monitor_pdev->filter_neighbour_peers = val;
  3396. return 0;
  3397. }
  3398. #endif /* ATH_SUPPORT_NAC */
  3399. #ifdef WLAN_ATF_ENABLE
  3400. static void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
  3401. {
  3402. if (!pdev) {
  3403. dp_cdp_err("Invalid pdev");
  3404. return;
  3405. }
  3406. pdev->monitor_pdev->dp_atf_stats_enable = value;
  3407. }
  3408. #endif
  3409. /**
  3410. * dp_set_bsscolor() - sets bsscolor for tx capture
  3411. * @pdev: Datapath PDEV handle
  3412. * @bsscolor: new bsscolor
  3413. */
  3414. static void
  3415. dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor)
  3416. {
  3417. pdev->monitor_pdev->rx_mon_recv_status.bsscolor = bsscolor;
  3418. }
  3419. /**
  3420. * dp_pdev_get_filter_ucast_data() - get DP PDEV monitor ucast filter
  3421. * @soc : data path soc handle
  3422. * @pdev_id : pdev_id
  3423. * Return: true on ucast filter flag set
  3424. */
  3425. static bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle)
  3426. {
  3427. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  3428. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3429. if ((mon_pdev->fp_data_filter & FILTER_DATA_UCAST) ||
  3430. (mon_pdev->mo_data_filter & FILTER_DATA_UCAST))
  3431. return true;
  3432. return false;
  3433. }
  3434. /**
  3435. * dp_pdev_get_filter_mcast_data() - get DP PDEV monitor mcast filter
  3436. * @pdev_handle: Datapath PDEV handle
  3437. * Return: true on mcast filter flag set
  3438. */
  3439. static bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle)
  3440. {
  3441. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  3442. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3443. if ((mon_pdev->fp_data_filter & FILTER_DATA_MCAST) ||
  3444. (mon_pdev->mo_data_filter & FILTER_DATA_MCAST))
  3445. return true;
  3446. return false;
  3447. }
  3448. /**
  3449. * dp_pdev_get_filter_non_data() - get DP PDEV monitor non_data filter
  3450. * @pdev_handle: Datapath PDEV handle
  3451. * Return: true on non data filter flag set
  3452. */
  3453. static bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle)
  3454. {
  3455. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  3456. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3457. if ((mon_pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
  3458. (mon_pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
  3459. if ((mon_pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
  3460. (mon_pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
  3461. return true;
  3462. }
  3463. }
  3464. return false;
  3465. }
  3466. #ifdef QCA_MONITOR_PKT_SUPPORT
  3467. /**
  3468. * dp_vdev_set_monitor_mode_buf_rings () - set monitor mode buf rings
  3469. *
  3470. * Allocate SW descriptor pool, buffers, link descriptor memory
  3471. * Initialize monitor related SRNGs
  3472. *
  3473. * @pdev: DP pdev object
  3474. *
  3475. * Return: void
  3476. */
  3477. static void dp_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev)
  3478. {
  3479. uint32_t mac_id;
  3480. uint32_t mac_for_pdev;
  3481. struct dp_srng *mon_buf_ring;
  3482. uint32_t num_entries;
  3483. struct dp_soc *soc = pdev->soc;
  3484. /* If delay monitor replenish is disabled, allocate link descriptor
  3485. * monitor ring buffers of ring size.
  3486. */
  3487. if (!wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) {
  3488. dp_vdev_set_monitor_mode_rings(pdev, false);
  3489. } else {
  3490. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  3491. mac_for_pdev =
  3492. dp_get_lmac_id_for_pdev_id(pdev->soc,
  3493. mac_id,
  3494. pdev->pdev_id);
  3495. dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
  3496. FALSE);
  3497. mon_buf_ring =
  3498. &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
  3499. /*
  3500. * Configure low interrupt threshld when monitor mode is
  3501. * configured.
  3502. */
  3503. if (mon_buf_ring->hal_srng) {
  3504. num_entries = mon_buf_ring->num_entries;
  3505. hal_set_low_threshold(mon_buf_ring->hal_srng,
  3506. num_entries >> 3);
  3507. htt_srng_setup(pdev->soc->htt_handle,
  3508. pdev->pdev_id,
  3509. mon_buf_ring->hal_srng,
  3510. RXDMA_MONITOR_BUF);
  3511. }
  3512. }
  3513. }
  3514. }
  3515. #else
  3516. static void dp_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev)
  3517. {
  3518. }
  3519. #endif
  3520. /*
  3521. * dp_set_pktlog_wifi3() - attach txrx vdev
  3522. * @pdev: Datapath PDEV handle
  3523. * @event: which event's notifications are being subscribed to
  3524. * @enable: WDI event subscribe or not. (True or False)
  3525. *
  3526. * Return: Success, NULL on failure
  3527. */
  3528. #ifdef WDI_EVENT_ENABLE
  3529. static int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
  3530. bool enable)
  3531. {
  3532. struct dp_soc *soc = NULL;
  3533. int max_mac_rings = wlan_cfg_get_num_mac_rings
  3534. (pdev->wlan_cfg_ctx);
  3535. uint8_t mac_id = 0;
  3536. struct dp_mon_soc *mon_soc;
  3537. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3538. soc = pdev->soc;
  3539. mon_soc = soc->monitor_soc;
  3540. dp_is_hw_dbs_enable(soc, &max_mac_rings);
  3541. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  3542. FL("Max_mac_rings %d "),
  3543. max_mac_rings);
  3544. if (enable) {
  3545. switch (event) {
  3546. case WDI_EVENT_RX_DESC:
  3547. if (mon_pdev->mvdev) {
  3548. /* Nothing needs to be done if monitor mode is
  3549. * enabled
  3550. */
  3551. mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
  3552. return 0;
  3553. }
  3554. if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
  3555. mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
  3556. dp_mon_filter_setup_rx_pkt_log_full(pdev);
  3557. if (dp_mon_filter_update(pdev) !=
  3558. QDF_STATUS_SUCCESS) {
  3559. dp_cdp_err("%pK: Pktlog full filters set failed", soc);
  3560. dp_mon_filter_reset_rx_pkt_log_full(pdev);
  3561. mon_pdev->rx_pktlog_mode =
  3562. DP_RX_PKTLOG_DISABLED;
  3563. return 0;
  3564. }
  3565. if (mon_soc->reap_timer_init &&
  3566. (!dp_mon_is_enable_reap_timer_non_pkt(pdev)))
  3567. qdf_timer_mod(&mon_soc->mon_reap_timer,
  3568. DP_INTR_POLL_TIMER_MS);
  3569. }
  3570. break;
  3571. case WDI_EVENT_LITE_RX:
  3572. if (mon_pdev->mvdev) {
  3573. /* Nothing needs to be done if monitor mode is
  3574. * enabled
  3575. */
  3576. mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
  3577. return 0;
  3578. }
  3579. if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
  3580. mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
  3581. /*
  3582. * Set the packet log lite mode filter.
  3583. */
  3584. dp_mon_filter_setup_rx_pkt_log_lite(pdev);
  3585. if (dp_mon_filter_update(pdev) !=
  3586. QDF_STATUS_SUCCESS) {
  3587. dp_cdp_err("%pK: Pktlog lite filters set failed", soc);
  3588. dp_mon_filter_reset_rx_pkt_log_lite(pdev);
  3589. mon_pdev->rx_pktlog_mode =
  3590. DP_RX_PKTLOG_DISABLED;
  3591. return 0;
  3592. }
  3593. if (mon_soc->reap_timer_init &&
  3594. (!dp_mon_is_enable_reap_timer_non_pkt(pdev)))
  3595. qdf_timer_mod(&mon_soc->mon_reap_timer,
  3596. DP_INTR_POLL_TIMER_MS);
  3597. }
  3598. break;
  3599. case WDI_EVENT_LITE_T2H:
  3600. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  3601. int mac_for_pdev = dp_get_mac_id_for_pdev(
  3602. mac_id, pdev->pdev_id);
  3603. mon_pdev->pktlog_ppdu_stats = true;
  3604. dp_h2t_cfg_stats_msg_send(pdev,
  3605. DP_PPDU_TXLITE_STATS_BITMASK_CFG,
  3606. mac_for_pdev);
  3607. }
  3608. break;
  3609. case WDI_EVENT_RX_CBF:
  3610. if (mon_pdev->mvdev) {
  3611. /* Nothing needs to be done if monitor mode is
  3612. * enabled
  3613. */
  3614. dp_mon_info("Mon mode, CBF setting filters");
  3615. mon_pdev->rx_pktlog_cbf = true;
  3616. return 0;
  3617. }
  3618. if (!mon_pdev->rx_pktlog_cbf) {
  3619. mon_pdev->rx_pktlog_cbf = true;
  3620. mon_pdev->monitor_configured = true;
  3621. dp_vdev_set_monitor_mode_buf_rings(pdev);
  3622. /*
  3623. * Set the packet log lite mode filter.
  3624. */
  3625. qdf_info("Non mon mode: Enable destination ring");
  3626. dp_mon_filter_setup_rx_pkt_log_cbf(pdev);
  3627. if (dp_mon_filter_update(pdev) !=
  3628. QDF_STATUS_SUCCESS) {
  3629. dp_mon_err("Pktlog set CBF filters failed");
  3630. dp_mon_filter_reset_rx_pktlog_cbf(pdev);
  3631. mon_pdev->rx_pktlog_mode =
  3632. DP_RX_PKTLOG_DISABLED;
  3633. mon_pdev->monitor_configured = false;
  3634. return 0;
  3635. }
  3636. if (mon_soc->reap_timer_init &&
  3637. !dp_mon_is_enable_reap_timer_non_pkt(pdev))
  3638. qdf_timer_mod(&mon_soc->mon_reap_timer,
  3639. DP_INTR_POLL_TIMER_MS);
  3640. }
  3641. break;
  3642. default:
  3643. /* Nothing needs to be done for other pktlog types */
  3644. break;
  3645. }
  3646. } else {
  3647. switch (event) {
  3648. case WDI_EVENT_RX_DESC:
  3649. case WDI_EVENT_LITE_RX:
  3650. if (mon_pdev->mvdev) {
  3651. /* Nothing needs to be done if monitor mode is
  3652. * enabled
  3653. */
  3654. mon_pdev->rx_pktlog_mode =
  3655. DP_RX_PKTLOG_DISABLED;
  3656. return 0;
  3657. }
  3658. if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
  3659. mon_pdev->rx_pktlog_mode =
  3660. DP_RX_PKTLOG_DISABLED;
  3661. dp_mon_filter_reset_rx_pkt_log_full(pdev);
  3662. if (dp_mon_filter_update(pdev) !=
  3663. QDF_STATUS_SUCCESS) {
  3664. dp_cdp_err("%pK: Pktlog filters reset failed", soc);
  3665. return 0;
  3666. }
  3667. dp_mon_filter_reset_rx_pkt_log_lite(pdev);
  3668. if (dp_mon_filter_update(pdev) !=
  3669. QDF_STATUS_SUCCESS) {
  3670. dp_cdp_err("%pK: Pktlog filters reset failed", soc);
  3671. return 0;
  3672. }
  3673. if (mon_soc->reap_timer_init &&
  3674. (!dp_mon_is_enable_reap_timer_non_pkt(pdev)))
  3675. qdf_timer_stop(&mon_soc->mon_reap_timer);
  3676. }
  3677. break;
  3678. case WDI_EVENT_LITE_T2H:
  3679. /*
  3680. * To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
  3681. * passing value 0. Once these macros will define in htt
  3682. * header file will use proper macros
  3683. */
  3684. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  3685. int mac_for_pdev =
  3686. dp_get_mac_id_for_pdev(mac_id,
  3687. pdev->pdev_id);
  3688. mon_pdev->pktlog_ppdu_stats = false;
  3689. if (!mon_pdev->enhanced_stats_en &&
  3690. !mon_pdev->tx_sniffer_enable &&
  3691. !mon_pdev->mcopy_mode) {
  3692. dp_h2t_cfg_stats_msg_send(pdev, 0,
  3693. mac_for_pdev);
  3694. } else if (mon_pdev->tx_sniffer_enable ||
  3695. mon_pdev->mcopy_mode) {
  3696. dp_h2t_cfg_stats_msg_send(pdev,
  3697. DP_PPDU_STATS_CFG_SNIFFER,
  3698. mac_for_pdev);
  3699. } else if (mon_pdev->enhanced_stats_en) {
  3700. dp_h2t_cfg_stats_msg_send(pdev,
  3701. DP_PPDU_STATS_CFG_ENH_STATS,
  3702. mac_for_pdev);
  3703. }
  3704. }
  3705. break;
  3706. case WDI_EVENT_RX_CBF:
  3707. mon_pdev->rx_pktlog_cbf = false;
  3708. break;
  3709. default:
  3710. /* Nothing needs to be done for other pktlog types */
  3711. break;
  3712. }
  3713. }
  3714. return 0;
  3715. }
  3716. #endif
  3717. /* MCL specific functions */
  3718. #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
  3719. /**
  3720. * dp_pktlogmod_exit() - API to cleanup pktlog info
  3721. * @pdev: Pdev handle
  3722. *
  3723. * Return: none
  3724. */
  3725. static void dp_pktlogmod_exit(struct dp_pdev *pdev)
  3726. {
  3727. struct dp_soc *soc = pdev->soc;
  3728. struct hif_opaque_softc *scn = soc->hif_handle;
  3729. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  3730. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3731. if (!scn) {
  3732. dp_mon_err("Invalid hif(scn) handle");
  3733. return;
  3734. }
  3735. /* stop mon_reap_timer if it has been started */
  3736. if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED &&
  3737. mon_soc->reap_timer_init &&
  3738. (!dp_mon_is_enable_reap_timer_non_pkt(pdev)))
  3739. qdf_timer_sync_cancel(&mon_soc->mon_reap_timer);
  3740. pktlogmod_exit(scn);
  3741. mon_pdev->pkt_log_init = false;
  3742. }
  3743. #else
  3744. static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
  3745. #endif /*DP_CON_MON*/
  3746. #ifdef WDI_EVENT_ENABLE
  3747. QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer)
  3748. {
  3749. struct cdp_interface_peer_stats peer_stats_intf;
  3750. struct cdp_peer_stats *peer_stats = &peer->stats;
  3751. if (!peer->vdev)
  3752. return QDF_STATUS_E_FAULT;
  3753. qdf_mem_zero(&peer_stats_intf, sizeof(peer_stats_intf));
  3754. if (peer_stats->rx.last_snr != peer_stats->rx.snr)
  3755. peer_stats_intf.rssi_changed = true;
  3756. if ((peer_stats->rx.snr && peer_stats_intf.rssi_changed) ||
  3757. (peer_stats->tx.tx_rate &&
  3758. peer_stats->tx.tx_rate != peer_stats->tx.last_tx_rate)) {
  3759. qdf_mem_copy(peer_stats_intf.peer_mac, peer->mac_addr.raw,
  3760. QDF_MAC_ADDR_SIZE);
  3761. peer_stats_intf.vdev_id = peer->vdev->vdev_id;
  3762. peer_stats_intf.last_peer_tx_rate = peer_stats->tx.last_tx_rate;
  3763. peer_stats_intf.peer_tx_rate = peer_stats->tx.tx_rate;
  3764. peer_stats_intf.peer_rssi = peer_stats->rx.snr;
  3765. peer_stats_intf.tx_packet_count = peer_stats->tx.ucast.num;
  3766. peer_stats_intf.rx_packet_count = peer_stats->rx.to_stack.num;
  3767. peer_stats_intf.tx_byte_count = peer_stats->tx.tx_success.bytes;
  3768. peer_stats_intf.rx_byte_count = peer_stats->rx.to_stack.bytes;
  3769. peer_stats_intf.per = peer_stats->tx.last_per;
  3770. peer_stats_intf.ack_rssi = peer_stats->tx.last_ack_rssi;
  3771. peer_stats_intf.free_buff = INVALID_FREE_BUFF;
  3772. dp_wdi_event_handler(WDI_EVENT_PEER_STATS, dp_pdev->soc,
  3773. (void *)&peer_stats_intf, 0,
  3774. WDI_NO_VAL, dp_pdev->pdev_id);
  3775. }
  3776. return QDF_STATUS_SUCCESS;
  3777. }
  3778. #endif
  3779. #ifdef FEATURE_NAC_RSSI
  3780. /**
  3781. * dp_rx_nac_filter(): Function to perform filtering of non-associated
  3782. * clients
  3783. * @pdev: DP pdev handle
  3784. * @rx_pkt_hdr: Rx packet Header
  3785. *
  3786. * return: dp_vdev*
  3787. */
  3788. static
  3789. struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
  3790. uint8_t *rx_pkt_hdr)
  3791. {
  3792. struct ieee80211_frame *wh;
  3793. struct dp_neighbour_peer *peer = NULL;
  3794. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3795. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  3796. if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
  3797. return NULL;
  3798. qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
  3799. TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
  3800. neighbour_peer_list_elem) {
  3801. if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  3802. wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) {
  3803. dp_rx_debug("%pK: NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x",
  3804. pdev->soc,
  3805. peer->neighbour_peers_macaddr.raw[0],
  3806. peer->neighbour_peers_macaddr.raw[1],
  3807. peer->neighbour_peers_macaddr.raw[2],
  3808. peer->neighbour_peers_macaddr.raw[3],
  3809. peer->neighbour_peers_macaddr.raw[4],
  3810. peer->neighbour_peers_macaddr.raw[5]);
  3811. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  3812. return mon_pdev->mvdev;
  3813. }
  3814. }
  3815. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  3816. return NULL;
  3817. }
  3818. static QDF_STATUS dp_filter_neighbour_peer(struct dp_pdev *pdev,
  3819. uint8_t *rx_pkt_hdr)
  3820. {
  3821. struct dp_vdev *vdev = NULL;
  3822. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3823. if (mon_pdev->filter_neighbour_peers) {
  3824. /* Next Hop scenario not yet handle */
  3825. vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
  3826. if (vdev) {
  3827. dp_rx_mon_deliver(pdev->soc, pdev->pdev_id,
  3828. pdev->invalid_peer_head_msdu,
  3829. pdev->invalid_peer_tail_msdu);
  3830. pdev->invalid_peer_head_msdu = NULL;
  3831. pdev->invalid_peer_tail_msdu = NULL;
  3832. return QDF_STATUS_SUCCESS;
  3833. }
  3834. }
  3835. return QDF_STATUS_E_FAILURE;
  3836. }
  3837. #endif
  3838. #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
  3839. /*
  3840. * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
  3841. * address for smart mesh filtering
  3842. * @txrx_soc: cdp soc handle
  3843. * @vdev_id: id of virtual device object
  3844. * @cmd: Add/Del command
  3845. * @macaddr: nac client mac address
  3846. *
  3847. * Return: success/failure
  3848. */
  3849. static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc_hdl,
  3850. uint8_t vdev_id,
  3851. uint32_t cmd, uint8_t *macaddr)
  3852. {
  3853. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  3854. struct dp_pdev *pdev;
  3855. struct dp_neighbour_peer *peer = NULL;
  3856. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  3857. DP_MOD_ID_CDP);
  3858. struct dp_mon_pdev *mon_pdev;
  3859. if (!vdev || !macaddr)
  3860. goto fail0;
  3861. pdev = vdev->pdev;
  3862. if (!pdev)
  3863. goto fail0;
  3864. mon_pdev = pdev->monitor_pdev;
  3865. /* Store address of NAC (neighbour peer) which will be checked
  3866. * against TA of received packets.
  3867. */
  3868. if (cmd == DP_NAC_PARAM_ADD) {
  3869. peer = (struct dp_neighbour_peer *)qdf_mem_malloc(
  3870. sizeof(*peer));
  3871. if (!peer) {
  3872. dp_cdp_err("%pK: DP neighbour peer node memory allocation failed"
  3873. , soc);
  3874. goto fail0;
  3875. }
  3876. qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
  3877. macaddr, QDF_MAC_ADDR_SIZE);
  3878. peer->vdev = vdev;
  3879. qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
  3880. /* add this neighbour peer into the list */
  3881. TAILQ_INSERT_TAIL(&mon_pdev->neighbour_peers_list, peer,
  3882. neighbour_peer_list_elem);
  3883. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  3884. /* first neighbour */
  3885. if (!mon_pdev->neighbour_peers_added) {
  3886. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3887. mon_pdev->neighbour_peers_added = true;
  3888. dp_mon_filter_setup_smart_monitor(pdev);
  3889. status = dp_mon_filter_update(pdev);
  3890. if (status != QDF_STATUS_SUCCESS) {
  3891. dp_cdp_err("%pK: smart mon filter setup failed",
  3892. soc);
  3893. dp_mon_filter_reset_smart_monitor(pdev);
  3894. mon_pdev->neighbour_peers_added = false;
  3895. }
  3896. }
  3897. } else if (cmd == DP_NAC_PARAM_DEL) {
  3898. qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
  3899. TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
  3900. neighbour_peer_list_elem) {
  3901. if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  3902. macaddr, QDF_MAC_ADDR_SIZE)) {
  3903. /* delete this peer from the list */
  3904. TAILQ_REMOVE(&mon_pdev->neighbour_peers_list,
  3905. peer, neighbour_peer_list_elem);
  3906. qdf_mem_free(peer);
  3907. break;
  3908. }
  3909. }
  3910. /* last neighbour deleted */
  3911. if (TAILQ_EMPTY(&mon_pdev->neighbour_peers_list)) {
  3912. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3913. dp_mon_filter_reset_smart_monitor(pdev);
  3914. status = dp_mon_filter_update(pdev);
  3915. if (status != QDF_STATUS_SUCCESS) {
  3916. dp_cdp_err("%pK: smart mon filter clear failed",
  3917. soc);
  3918. }
  3919. mon_pdev->neighbour_peers_added = false;
  3920. }
  3921. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  3922. }
  3923. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  3924. return 1;
  3925. fail0:
  3926. if (vdev)
  3927. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  3928. return 0;
  3929. }
  3930. #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
  3931. #ifdef ATH_SUPPORT_NAC_RSSI
  3932. /**
  3933. * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
  3934. * @soc_hdl: DP soc handle
  3935. * @vdev_id: id of DP vdev handle
  3936. * @mac_addr: neighbour mac
  3937. * @rssi: rssi value
  3938. *
  3939. * Return: 0 for success. nonzero for failure.
  3940. */
  3941. static QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc_hdl,
  3942. uint8_t vdev_id,
  3943. char *mac_addr,
  3944. uint8_t *rssi)
  3945. {
  3946. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3947. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  3948. DP_MOD_ID_CDP);
  3949. struct dp_pdev *pdev;
  3950. struct dp_neighbour_peer *peer = NULL;
  3951. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  3952. struct dp_mon_pdev *mon_pdev;
  3953. if (!vdev)
  3954. return status;
  3955. pdev = vdev->pdev;
  3956. mon_pdev = pdev->monitor_pdev;
  3957. *rssi = 0;
  3958. qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
  3959. TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
  3960. neighbour_peer_list_elem) {
  3961. if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  3962. mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
  3963. *rssi = peer->rssi;
  3964. status = QDF_STATUS_SUCCESS;
  3965. break;
  3966. }
  3967. }
  3968. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  3969. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  3970. return status;
  3971. }
  3972. static QDF_STATUS
  3973. dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc,
  3974. uint8_t vdev_id,
  3975. enum cdp_nac_param_cmd cmd, char *bssid,
  3976. char *client_macaddr,
  3977. uint8_t chan_num)
  3978. {
  3979. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  3980. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  3981. DP_MOD_ID_CDP);
  3982. struct dp_pdev *pdev;
  3983. struct dp_mon_pdev *mon_pdev;
  3984. if (!vdev)
  3985. return QDF_STATUS_E_FAILURE;
  3986. pdev = (struct dp_pdev *)vdev->pdev;
  3987. mon_pdev = pdev->monitor_pdev;
  3988. mon_pdev->nac_rssi_filtering = 1;
  3989. /* Store address of NAC (neighbour peer) which will be checked
  3990. * against TA of received packets.
  3991. */
  3992. if (cmd == CDP_NAC_PARAM_ADD) {
  3993. dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
  3994. DP_NAC_PARAM_ADD,
  3995. (uint8_t *)client_macaddr);
  3996. } else if (cmd == CDP_NAC_PARAM_DEL) {
  3997. dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
  3998. DP_NAC_PARAM_DEL,
  3999. (uint8_t *)client_macaddr);
  4000. }
  4001. if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
  4002. soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
  4003. (soc->ctrl_psoc, pdev->pdev_id,
  4004. vdev->vdev_id, cmd, bssid, client_macaddr);
  4005. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  4006. return QDF_STATUS_SUCCESS;
  4007. }
  4008. #endif
  4009. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  4010. /*
  4011. * dp_cfr_filter() - Configure HOST RX monitor status ring for CFR
  4012. * @soc_hdl: Datapath soc handle
  4013. * @pdev_id: id of data path pdev handle
  4014. * @enable: Enable/Disable CFR
  4015. * @filter_val: Flag to select Filter for monitor mode
  4016. */
  4017. static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
  4018. uint8_t pdev_id,
  4019. bool enable,
  4020. struct cdp_monitor_filter *filter_val)
  4021. {
  4022. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4023. struct dp_pdev *pdev = NULL;
  4024. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  4025. int max_mac_rings;
  4026. uint8_t mac_id = 0;
  4027. struct dp_mon_pdev *mon_pdev;
  4028. pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  4029. if (!pdev) {
  4030. dp_mon_err("pdev is NULL");
  4031. return;
  4032. }
  4033. mon_pdev = pdev->monitor_pdev;
  4034. if (mon_pdev->mvdev) {
  4035. dp_mon_info("No action is needed since mon mode is enabled\n");
  4036. return;
  4037. }
  4038. soc = pdev->soc;
  4039. pdev->cfr_rcc_mode = false;
  4040. max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
  4041. dp_is_hw_dbs_enable(soc, &max_mac_rings);
  4042. dp_mon_debug("Max_mac_rings %d", max_mac_rings);
  4043. dp_mon_info("enable : %d, mode: 0x%x", enable, filter_val->mode);
  4044. if (enable) {
  4045. pdev->cfr_rcc_mode = true;
  4046. htt_tlv_filter.ppdu_start = 1;
  4047. htt_tlv_filter.ppdu_end = 1;
  4048. htt_tlv_filter.ppdu_end_user_stats = 1;
  4049. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  4050. htt_tlv_filter.ppdu_end_status_done = 1;
  4051. htt_tlv_filter.mpdu_start = 1;
  4052. htt_tlv_filter.offset_valid = false;
  4053. htt_tlv_filter.enable_fp =
  4054. (filter_val->mode & MON_FILTER_PASS) ? 1 : 0;
  4055. htt_tlv_filter.enable_md = 0;
  4056. htt_tlv_filter.enable_mo =
  4057. (filter_val->mode & MON_FILTER_OTHER) ? 1 : 0;
  4058. htt_tlv_filter.fp_mgmt_filter = filter_val->fp_mgmt;
  4059. htt_tlv_filter.fp_ctrl_filter = filter_val->fp_ctrl;
  4060. htt_tlv_filter.fp_data_filter = filter_val->fp_data;
  4061. htt_tlv_filter.mo_mgmt_filter = filter_val->mo_mgmt;
  4062. htt_tlv_filter.mo_ctrl_filter = filter_val->mo_ctrl;
  4063. htt_tlv_filter.mo_data_filter = filter_val->mo_data;
  4064. }
  4065. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  4066. int mac_for_pdev =
  4067. dp_get_mac_id_for_pdev(mac_id,
  4068. pdev->pdev_id);
  4069. htt_h2t_rx_ring_cfg(soc->htt_handle,
  4070. mac_for_pdev,
  4071. soc->rxdma_mon_status_ring[mac_id]
  4072. .hal_srng,
  4073. RXDMA_MONITOR_STATUS,
  4074. RX_MON_STATUS_BUF_SIZE,
  4075. &htt_tlv_filter);
  4076. }
  4077. }
  4078. /*
  4079. * dp_enable_mon_reap_timer() - enable/disable reap timer
  4080. * @soc_hdl: Datapath soc handle
  4081. * @pdev_id: id of objmgr pdev
  4082. * @enable: Enable/Disable reap timer of monitor status ring
  4083. *
  4084. * Return: none
  4085. */
  4086. static void
  4087. dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  4088. bool enable)
  4089. {
  4090. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4091. struct dp_pdev *pdev = NULL;
  4092. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4093. struct dp_mon_pdev *mon_pdev;
  4094. pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  4095. if (!pdev) {
  4096. dp_mon_err("pdev is NULL");
  4097. return;
  4098. }
  4099. mon_pdev = pdev->monitor_pdev;
  4100. mon_pdev->enable_reap_timer_non_pkt = enable;
  4101. if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
  4102. dp_mon_debug("pktlog enabled %d", mon_pdev->rx_pktlog_mode);
  4103. return;
  4104. }
  4105. if (!mon_soc->reap_timer_init) {
  4106. dp_mon_err("reap timer not init");
  4107. return;
  4108. }
  4109. if (enable)
  4110. qdf_timer_mod(&mon_soc->mon_reap_timer,
  4111. DP_INTR_POLL_TIMER_MS);
  4112. else
  4113. qdf_timer_sync_cancel(&mon_soc->mon_reap_timer);
  4114. }
  4115. #endif
  4116. #if defined(DP_CON_MON)
  4117. #ifndef REMOVE_PKT_LOG
  4118. /**
  4119. * dp_pkt_log_init() - API to initialize packet log
  4120. * @soc_hdl: Datapath soc handle
  4121. * @pdev_id: id of data path pdev handle
  4122. * @scn: HIF context
  4123. *
  4124. * Return: none
  4125. */
  4126. void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
  4127. {
  4128. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4129. struct dp_pdev *handle =
  4130. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  4131. struct dp_mon_pdev *mon_pdev;
  4132. if (!handle) {
  4133. dp_mon_err("pdev handle is NULL");
  4134. return;
  4135. }
  4136. mon_pdev = handle->monitor_pdev;
  4137. if (mon_pdev->pkt_log_init) {
  4138. dp_mon_err("%pK: Packet log not initialized", soc);
  4139. return;
  4140. }
  4141. pktlog_sethandle(&mon_pdev->pl_dev, scn);
  4142. pktlog_set_pdev_id(mon_pdev->pl_dev, pdev_id);
  4143. pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
  4144. if (pktlogmod_init(scn)) {
  4145. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4146. "%s: pktlogmod_init failed", __func__);
  4147. mon_pdev->pkt_log_init = false;
  4148. } else {
  4149. mon_pdev->pkt_log_init = true;
  4150. }
  4151. }
  4152. /**
  4153. * dp_pkt_log_con_service() - connect packet log service
  4154. * @soc_hdl: Datapath soc handle
  4155. * @pdev_id: id of data path pdev handle
  4156. * @scn: device context
  4157. *
  4158. * Return: none
  4159. */
  4160. static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
  4161. uint8_t pdev_id, void *scn)
  4162. {
  4163. dp_pkt_log_init(soc_hdl, pdev_id, scn);
  4164. pktlog_htc_attach();
  4165. }
  4166. /**
  4167. * dp_pkt_log_exit() - Wrapper API to cleanup pktlog info
  4168. * @soc_hdl: Datapath soc handle
  4169. * @pdev_id: id of data path pdev handle
  4170. *
  4171. * Return: none
  4172. */
  4173. static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  4174. {
  4175. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4176. struct dp_pdev *pdev =
  4177. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  4178. if (!pdev) {
  4179. dp_err("pdev handle is NULL");
  4180. return;
  4181. }
  4182. dp_pktlogmod_exit(pdev);
  4183. }
  4184. #else
  4185. static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
  4186. uint8_t pdev_id, void *scn)
  4187. {
  4188. }
  4189. static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  4190. {
  4191. }
  4192. #endif
  4193. #endif
  4194. /*
  4195. * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
  4196. * @pdev: device object
  4197. *
  4198. * Return: void
  4199. */
  4200. static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
  4201. {
  4202. struct dp_neighbour_peer *peer = NULL;
  4203. struct dp_neighbour_peer *temp_peer = NULL;
  4204. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4205. TAILQ_FOREACH_SAFE(peer, &mon_pdev->neighbour_peers_list,
  4206. neighbour_peer_list_elem, temp_peer) {
  4207. /* delete this peer from the list */
  4208. TAILQ_REMOVE(&mon_pdev->neighbour_peers_list,
  4209. peer, neighbour_peer_list_elem);
  4210. qdf_mem_free(peer);
  4211. }
  4212. qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex);
  4213. }
  4214. /*
  4215. * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
  4216. * modes are enabled or not.
  4217. * @dp_pdev: dp pdev handle.
  4218. *
  4219. * Return: bool
  4220. */
  4221. static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
  4222. {
  4223. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4224. if (!mon_pdev->pktlog_ppdu_stats && !mon_pdev->tx_sniffer_enable &&
  4225. !mon_pdev->mcopy_mode)
  4226. return true;
  4227. else
  4228. return false;
  4229. }
  4230. #ifdef QCA_ENHANCED_STATS_SUPPORT
  4231. /*
  4232. * dp_enable_enhanced_stats()- API to enable enhanced statistcs
  4233. * @soc_handle: DP_SOC handle
  4234. * @pdev_id: id of DP_PDEV handle
  4235. *
  4236. * Return: QDF_STATUS
  4237. */
  4238. static QDF_STATUS
  4239. dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
  4240. {
  4241. struct dp_pdev *pdev = NULL;
  4242. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4243. struct dp_mon_pdev *mon_pdev;
  4244. pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  4245. pdev_id);
  4246. if (!pdev)
  4247. return QDF_STATUS_E_FAILURE;
  4248. mon_pdev = pdev->monitor_pdev;
  4249. if (mon_pdev->enhanced_stats_en == 0)
  4250. dp_cal_client_timer_start(mon_pdev->cal_client_ctx);
  4251. mon_pdev->enhanced_stats_en = 1;
  4252. dp_mon_filter_setup_enhanced_stats(mon_pdev);
  4253. status = dp_mon_filter_update(pdev);
  4254. if (status != QDF_STATUS_SUCCESS) {
  4255. dp_cdp_err("%pK: Failed to set enhanced mode filters", soc);
  4256. dp_mon_filter_reset_enhanced_stats(mon_pdev);
  4257. dp_cal_client_timer_stop(mon_pdev->cal_client_ctx);
  4258. mon_pdev->enhanced_stats_en = 0;
  4259. return QDF_STATUS_E_FAILURE;
  4260. }
  4261. pdev->enhanced_stats_en = true;
  4262. if (is_ppdu_txrx_capture_enabled(pdev) && !mon_pdev->bpr_enable) {
  4263. dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
  4264. pdev->pdev_id);
  4265. } else if (is_ppdu_txrx_capture_enabled(pdev) &&
  4266. mon_pdev->bpr_enable) {
  4267. dp_h2t_cfg_stats_msg_send(pdev,
  4268. DP_PPDU_STATS_CFG_BPR_ENH,
  4269. pdev->pdev_id);
  4270. }
  4271. return QDF_STATUS_SUCCESS;
  4272. }
  4273. /*
  4274. * dp_disable_enhanced_stats()- API to disable enhanced statistcs
  4275. *
  4276. * @param soc - the soc handle
  4277. * @param pdev_id - pdev_id of pdev
  4278. * @return - QDF_STATUS
  4279. */
  4280. static QDF_STATUS
  4281. dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
  4282. {
  4283. struct dp_pdev *pdev =
  4284. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  4285. pdev_id);
  4286. struct dp_mon_pdev *mon_pdev;
  4287. if (!pdev)
  4288. return QDF_STATUS_E_FAILURE;
  4289. mon_pdev = pdev->monitor_pdev;
  4290. if (mon_pdev->enhanced_stats_en == 1)
  4291. dp_cal_client_timer_stop(mon_pdev->cal_client_ctx);
  4292. mon_pdev->enhanced_stats_en = 0;
  4293. pdev->enhanced_stats_en = false;
  4294. if (is_ppdu_txrx_capture_enabled(pdev) && !mon_pdev->bpr_enable) {
  4295. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  4296. } else if (is_ppdu_txrx_capture_enabled(pdev) && mon_pdev->bpr_enable) {
  4297. dp_h2t_cfg_stats_msg_send(pdev,
  4298. DP_PPDU_STATS_CFG_BPR,
  4299. pdev->pdev_id);
  4300. }
  4301. dp_mon_filter_reset_enhanced_stats(mon_pdev);
  4302. if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
  4303. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4304. FL("Failed to reset enhanced mode filters"));
  4305. }
  4306. return QDF_STATUS_SUCCESS;
  4307. }
  4308. #ifdef WDI_EVENT_ENABLE
  4309. QDF_STATUS dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
  4310. struct cdp_rx_stats_ppdu_user *ppdu_user)
  4311. {
  4312. struct cdp_interface_peer_qos_stats qos_stats_intf;
  4313. if (ppdu_user->peer_id == HTT_INVALID_PEER) {
  4314. dp_mon_err("Invalid peer id");
  4315. return QDF_STATUS_E_FAILURE;
  4316. }
  4317. qdf_mem_zero(&qos_stats_intf, sizeof(qos_stats_intf));
  4318. qdf_mem_copy(qos_stats_intf.peer_mac, ppdu_user->mac_addr,
  4319. QDF_MAC_ADDR_SIZE);
  4320. qos_stats_intf.frame_control = ppdu_user->frame_control;
  4321. qos_stats_intf.frame_control_info_valid =
  4322. ppdu_user->frame_control_info_valid;
  4323. qos_stats_intf.qos_control = ppdu_user->qos_control;
  4324. qos_stats_intf.qos_control_info_valid =
  4325. ppdu_user->qos_control_info_valid;
  4326. qos_stats_intf.vdev_id = ppdu_user->vdev_id;
  4327. dp_wdi_event_handler(WDI_EVENT_PEER_QOS_STATS, dp_pdev->soc,
  4328. (void *)&qos_stats_intf, 0,
  4329. WDI_NO_VAL, dp_pdev->pdev_id);
  4330. return QDF_STATUS_SUCCESS;
  4331. }
  4332. #else
  4333. static inline QDF_STATUS
  4334. dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
  4335. struct cdp_rx_stats_ppdu_user *ppdu_user)
  4336. {
  4337. return QDF_STATUS_SUCCESS;
  4338. }
  4339. #endif
  4340. #endif /* QCA_ENHANCED_STATS_SUPPORT */
  4341. /**
  4342. * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
  4343. * for pktlog
  4344. * @soc: cdp_soc handle
  4345. * @pdev_id: id of dp pdev handle
  4346. * @mac_addr: Peer mac address
  4347. * @enb_dsb: Enable or disable peer based filtering
  4348. *
  4349. * Return: QDF_STATUS
  4350. */
  4351. static int
  4352. dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id,
  4353. uint8_t *mac_addr, uint8_t enb_dsb)
  4354. {
  4355. struct dp_peer *peer;
  4356. struct dp_pdev *pdev =
  4357. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  4358. pdev_id);
  4359. struct dp_mon_pdev *mon_pdev;
  4360. if (!pdev)
  4361. return QDF_STATUS_E_FAILURE;
  4362. mon_pdev = pdev->monitor_pdev;
  4363. peer = dp_peer_find_hash_find((struct dp_soc *)soc, mac_addr,
  4364. 0, DP_VDEV_ALL, DP_MOD_ID_CDP);
  4365. if (!peer) {
  4366. dp_mon_err("Invalid Peer");
  4367. return QDF_STATUS_E_FAILURE;
  4368. }
  4369. peer->peer_based_pktlog_filter = enb_dsb;
  4370. mon_pdev->dp_peer_based_pktlog = enb_dsb;
  4371. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  4372. return QDF_STATUS_SUCCESS;
  4373. }
  4374. /**
  4375. * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
  4376. * @soc: DP_SOC handle
  4377. * @pdev_id: id of DP_PDEV handle
  4378. * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
  4379. * @is_tx_pkt_cap_enable: enable/disable/delete/print
  4380. * Tx packet capture in monitor mode
  4381. * @peer_mac: MAC address for which the above need to be enabled/disabled
  4382. *
  4383. * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
  4384. */
  4385. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  4386. static QDF_STATUS
  4387. dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
  4388. uint8_t pdev_id,
  4389. bool is_rx_pkt_cap_enable,
  4390. uint8_t is_tx_pkt_cap_enable,
  4391. uint8_t *peer_mac)
  4392. {
  4393. struct dp_peer *peer;
  4394. QDF_STATUS status;
  4395. struct dp_pdev *pdev =
  4396. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  4397. pdev_id);
  4398. if (!pdev)
  4399. return QDF_STATUS_E_FAILURE;
  4400. peer = dp_peer_find_hash_find((struct dp_soc *)soc,
  4401. peer_mac, 0, DP_VDEV_ALL,
  4402. DP_MOD_ID_CDP);
  4403. if (!peer)
  4404. return QDF_STATUS_E_FAILURE;
  4405. /* we need to set tx pkt capture for non associated peer */
  4406. status = dp_peer_set_tx_capture_enabled(pdev, peer,
  4407. is_tx_pkt_cap_enable,
  4408. peer_mac);
  4409. status = dp_peer_set_rx_capture_enabled(pdev, peer,
  4410. is_rx_pkt_cap_enable,
  4411. peer_mac);
  4412. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  4413. return status;
  4414. }
  4415. #endif
  4416. #if defined(QCA_MONITOR_PKT_SUPPORT) || defined(QCA_MCOPY_SUPPORT)
  4417. /**
  4418. * dp_vdev_set_monitor_mode_rings () - set monitor mode rings
  4419. *
  4420. * Allocate SW descriptor pool, buffers, link descriptor memory
  4421. * Initialize monitor related SRNGs
  4422. *
  4423. * @pdev: DP pdev object
  4424. *
  4425. * Return: QDF_STATUS
  4426. */
  4427. static QDF_STATUS dp_vdev_set_monitor_mode_rings(struct dp_pdev *pdev,
  4428. uint8_t delayed_replenish)
  4429. {
  4430. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  4431. uint32_t mac_id;
  4432. uint32_t mac_for_pdev;
  4433. struct dp_soc *soc = pdev->soc;
  4434. QDF_STATUS status = QDF_STATUS_SUCCESS;
  4435. struct dp_srng *mon_buf_ring;
  4436. uint32_t num_entries;
  4437. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4438. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  4439. /* If monitor rings are aleady initilized, return from here */
  4440. if (mon_pdev->pdev_mon_init)
  4441. return QDF_STATUS_SUCCESS;
  4442. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  4443. mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
  4444. pdev->pdev_id);
  4445. /* Allocate sw rx descriptor pool for mon RxDMA buffer ring */
  4446. status = dp_rx_pdev_mon_buf_desc_pool_alloc(pdev, mac_for_pdev);
  4447. if (!QDF_IS_STATUS_SUCCESS(status)) {
  4448. dp_mon_err("%s: dp_rx_pdev_mon_buf_desc_pool_alloc() failed\n",
  4449. __func__);
  4450. goto fail0;
  4451. }
  4452. dp_rx_pdev_mon_buf_desc_pool_init(pdev, mac_for_pdev);
  4453. /* If monitor buffers are already allocated,
  4454. * do not allocate.
  4455. */
  4456. status = dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
  4457. delayed_replenish);
  4458. mon_buf_ring = &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
  4459. /*
  4460. * Configure low interrupt threshld when monitor mode is
  4461. * configured.
  4462. */
  4463. if (mon_buf_ring->hal_srng) {
  4464. num_entries = mon_buf_ring->num_entries;
  4465. hal_set_low_threshold(mon_buf_ring->hal_srng,
  4466. num_entries >> 3);
  4467. htt_srng_setup(pdev->soc->htt_handle,
  4468. pdev->pdev_id,
  4469. mon_buf_ring->hal_srng,
  4470. RXDMA_MONITOR_BUF);
  4471. }
  4472. /* Allocate link descriptors for the mon link descriptor ring */
  4473. status = dp_hw_link_desc_pool_banks_alloc(soc, mac_for_pdev);
  4474. if (!QDF_IS_STATUS_SUCCESS(status)) {
  4475. dp_mon_err("%s: dp_hw_link_desc_pool_banks_alloc() failed",
  4476. __func__);
  4477. goto fail0;
  4478. }
  4479. dp_link_desc_ring_replenish(soc, mac_for_pdev);
  4480. htt_srng_setup(soc->htt_handle, pdev->pdev_id,
  4481. soc->rxdma_mon_desc_ring[mac_for_pdev].hal_srng,
  4482. RXDMA_MONITOR_DESC);
  4483. htt_srng_setup(soc->htt_handle, pdev->pdev_id,
  4484. soc->rxdma_mon_dst_ring[mac_for_pdev].hal_srng,
  4485. RXDMA_MONITOR_DST);
  4486. }
  4487. mon_pdev->pdev_mon_init = 1;
  4488. return QDF_STATUS_SUCCESS;
  4489. fail0:
  4490. return QDF_STATUS_E_FAILURE;
  4491. }
  4492. #endif
  4493. /* dp_mon_vdev_timer()- timer poll for interrupts
  4494. *
  4495. * @arg: SoC Handle
  4496. *
  4497. * Return:
  4498. *
  4499. */
  4500. static void dp_mon_vdev_timer(void *arg)
  4501. {
  4502. struct dp_soc *soc = (struct dp_soc *)arg;
  4503. struct dp_pdev *pdev = soc->pdev_list[0];
  4504. enum timer_yield_status yield = DP_TIMER_NO_YIELD;
  4505. uint32_t work_done = 0, total_work_done = 0;
  4506. int budget = 0xffff;
  4507. uint32_t remaining_quota = budget;
  4508. uint64_t start_time;
  4509. uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
  4510. uint32_t lmac_iter;
  4511. int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
  4512. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4513. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4514. if (!qdf_atomic_read(&soc->cmn_init_done))
  4515. return;
  4516. if (mon_pdev->mon_chan_band != REG_BAND_UNKNOWN)
  4517. lmac_id =
  4518. pdev->ch_band_lmac_id_mapping[mon_pdev->mon_chan_band];
  4519. start_time = qdf_get_log_timestamp();
  4520. dp_is_hw_dbs_enable(soc, &max_mac_rings);
  4521. while (yield == DP_TIMER_NO_YIELD) {
  4522. for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
  4523. if (lmac_iter == lmac_id)
  4524. work_done = dp_mon_process(soc, NULL,
  4525. lmac_iter,
  4526. remaining_quota);
  4527. else
  4528. work_done =
  4529. dp_mon_drop_packets_for_mac(pdev,
  4530. lmac_iter,
  4531. remaining_quota);
  4532. if (work_done) {
  4533. budget -= work_done;
  4534. if (budget <= 0) {
  4535. yield = DP_TIMER_WORK_EXHAUST;
  4536. goto budget_done;
  4537. }
  4538. remaining_quota = budget;
  4539. total_work_done += work_done;
  4540. }
  4541. }
  4542. yield = dp_should_timer_irq_yield(soc, total_work_done,
  4543. start_time);
  4544. total_work_done = 0;
  4545. }
  4546. budget_done:
  4547. if (yield == DP_TIMER_WORK_EXHAUST ||
  4548. yield == DP_TIMER_TIME_EXHAUST)
  4549. qdf_timer_mod(&mon_soc->mon_vdev_timer, 1);
  4550. else
  4551. qdf_timer_mod(&mon_soc->mon_vdev_timer, DP_INTR_POLL_TIMER_MS);
  4552. }
  4553. /* MCL specific functions */
  4554. #if defined(DP_CON_MON)
  4555. /*
  4556. * dp_mon_reap_timer_handler()- timer to reap monitor rings
  4557. * reqd as we are not getting ppdu end interrupts
  4558. * @arg: SoC Handle
  4559. *
  4560. * Return:
  4561. *
  4562. */
  4563. static void dp_mon_reap_timer_handler(void *arg)
  4564. {
  4565. struct dp_soc *soc = (struct dp_soc *)arg;
  4566. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4567. dp_service_mon_rings(soc, QCA_NAPI_BUDGET);
  4568. qdf_timer_mod(&mon_soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
  4569. }
  4570. #endif
  4571. #ifdef QCA_HOST2FW_RXBUF_RING
  4572. static void dp_mon_reap_timer_init(struct dp_soc *soc)
  4573. {
  4574. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4575. qdf_timer_init(soc->osdev, &mon_soc->mon_reap_timer,
  4576. dp_mon_reap_timer_handler, (void *)soc,
  4577. QDF_TIMER_TYPE_WAKE_APPS);
  4578. mon_soc->reap_timer_init = 1;
  4579. }
  4580. #else
  4581. static void dp_mon_reap_timer_init(struct dp_soc *soc)
  4582. {
  4583. }
  4584. #endif
  4585. static void dp_mon_reap_timer_deinit(struct dp_soc *soc)
  4586. {
  4587. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4588. if (mon_soc->reap_timer_init) {
  4589. qdf_timer_free(&mon_soc->mon_reap_timer);
  4590. mon_soc->reap_timer_init = 0;
  4591. }
  4592. }
  4593. static void dp_mon_reap_timer_start(struct dp_soc *soc)
  4594. {
  4595. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4596. if (mon_soc->reap_timer_init)
  4597. qdf_timer_mod(&mon_soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
  4598. }
  4599. static bool dp_mon_reap_timer_stop(struct dp_soc *soc)
  4600. {
  4601. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4602. if (mon_soc->reap_timer_init) {
  4603. qdf_timer_sync_cancel(&mon_soc->mon_reap_timer);
  4604. return true;
  4605. }
  4606. return false;
  4607. }
  4608. static void dp_mon_vdev_timer_init(struct dp_soc *soc)
  4609. {
  4610. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4611. qdf_timer_init(soc->osdev, &mon_soc->mon_vdev_timer,
  4612. dp_mon_vdev_timer, (void *)soc,
  4613. QDF_TIMER_TYPE_WAKE_APPS);
  4614. mon_soc->mon_vdev_timer_state |= MON_VDEV_TIMER_INIT;
  4615. }
  4616. static void dp_mon_vdev_timer_deinit(struct dp_soc *soc)
  4617. {
  4618. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4619. if (mon_soc->mon_vdev_timer_state & MON_VDEV_TIMER_INIT) {
  4620. qdf_timer_free(&mon_soc->mon_vdev_timer);
  4621. mon_soc->mon_vdev_timer_state = 0;
  4622. }
  4623. }
  4624. static void dp_mon_vdev_timer_start(struct dp_soc *soc)
  4625. {
  4626. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4627. if (mon_soc->mon_vdev_timer_state & MON_VDEV_TIMER_INIT) {
  4628. qdf_timer_mod(&mon_soc->mon_vdev_timer, DP_INTR_POLL_TIMER_MS);
  4629. mon_soc->mon_vdev_timer_state |= MON_VDEV_TIMER_RUNNING;
  4630. }
  4631. }
  4632. static bool dp_mon_vdev_timer_stop(struct dp_soc *soc)
  4633. {
  4634. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4635. if (mon_soc->mon_vdev_timer_state & MON_VDEV_TIMER_RUNNING) {
  4636. qdf_timer_sync_cancel(&mon_soc->mon_vdev_timer);
  4637. mon_soc->mon_vdev_timer_state &= ~MON_VDEV_TIMER_RUNNING;
  4638. return true;
  4639. }
  4640. return false;
  4641. }
  4642. #ifdef QCA_MCOPY_SUPPORT
  4643. static QDF_STATUS dp_mcopy_check_deliver(struct dp_pdev *pdev,
  4644. uint16_t peer_id,
  4645. uint32_t ppdu_id,
  4646. uint8_t first_msdu)
  4647. {
  4648. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4649. if (mon_pdev->mcopy_mode) {
  4650. if (mon_pdev->mcopy_mode == M_COPY) {
  4651. if ((mon_pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
  4652. (mon_pdev->m_copy_id.tx_peer_id == peer_id)) {
  4653. return QDF_STATUS_E_INVAL;
  4654. }
  4655. }
  4656. if (!first_msdu)
  4657. return QDF_STATUS_E_INVAL;
  4658. mon_pdev->m_copy_id.tx_ppdu_id = ppdu_id;
  4659. mon_pdev->m_copy_id.tx_peer_id = peer_id;
  4660. }
  4661. return QDF_STATUS_SUCCESS;
  4662. }
  4663. #endif
  4664. static void dp_mon_neighbour_peer_add_ast(struct dp_pdev *pdev,
  4665. struct dp_peer *ta_peer,
  4666. uint8_t *mac_addr,
  4667. qdf_nbuf_t nbuf,
  4668. uint32_t flags)
  4669. {
  4670. struct dp_neighbour_peer *neighbour_peer = NULL;
  4671. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4672. struct dp_soc *soc = pdev->soc;
  4673. uint32_t ret = 0;
  4674. if (mon_pdev->neighbour_peers_added) {
  4675. qdf_mem_copy(mac_addr,
  4676. (qdf_nbuf_data(nbuf) +
  4677. QDF_MAC_ADDR_SIZE),
  4678. QDF_MAC_ADDR_SIZE);
  4679. qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
  4680. TAILQ_FOREACH(neighbour_peer,
  4681. &mon_pdev->neighbour_peers_list,
  4682. neighbour_peer_list_elem) {
  4683. if (!qdf_mem_cmp(&neighbour_peer->neighbour_peers_macaddr,
  4684. mac_addr,
  4685. QDF_MAC_ADDR_SIZE)) {
  4686. ret = dp_peer_add_ast(soc,
  4687. ta_peer,
  4688. mac_addr,
  4689. CDP_TXRX_AST_TYPE_WDS,
  4690. flags);
  4691. QDF_TRACE(QDF_MODULE_ID_DP,
  4692. QDF_TRACE_LEVEL_INFO,
  4693. "sa valid and nac roamed to wds");
  4694. break;
  4695. }
  4696. }
  4697. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  4698. }
  4699. }
  4700. #ifdef WDI_EVENT_ENABLE
  4701. #ifndef REMOVE_PKT_LOG
  4702. static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  4703. {
  4704. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4705. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  4706. if (!pdev || !pdev->monitor_pdev)
  4707. return NULL;
  4708. return pdev->monitor_pdev->pl_dev;
  4709. }
  4710. #else
  4711. static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  4712. {
  4713. return NULL;
  4714. }
  4715. #endif
  4716. #endif
  4717. QDF_STATUS dp_rx_populate_cbf_hdr(struct dp_soc *soc,
  4718. uint32_t mac_id,
  4719. uint32_t event,
  4720. qdf_nbuf_t mpdu,
  4721. uint32_t msdu_timestamp)
  4722. {
  4723. uint32_t data_size, hdr_size, ppdu_id, align4byte;
  4724. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  4725. uint32_t *msg_word;
  4726. if (!pdev)
  4727. return QDF_STATUS_E_INVAL;
  4728. ppdu_id = pdev->monitor_pdev->ppdu_info.com_info.ppdu_id;
  4729. hdr_size = HTT_T2H_PPDU_STATS_IND_HDR_SIZE
  4730. + qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload);
  4731. data_size = qdf_nbuf_len(mpdu);
  4732. qdf_nbuf_push_head(mpdu, hdr_size);
  4733. msg_word = (uint32_t *)qdf_nbuf_data(mpdu);
  4734. /*
  4735. * Populate the PPDU Stats Indication header
  4736. */
  4737. HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_T2H_MSG_TYPE_PPDU_STATS_IND);
  4738. HTT_T2H_PPDU_STATS_MAC_ID_SET(*msg_word, mac_id);
  4739. HTT_T2H_PPDU_STATS_PDEV_ID_SET(*msg_word, pdev->pdev_id);
  4740. align4byte = ((data_size +
  4741. qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload)
  4742. + 3) >> 2) << 2;
  4743. HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_SET(*msg_word, align4byte);
  4744. msg_word++;
  4745. HTT_T2H_PPDU_STATS_PPDU_ID_SET(*msg_word, ppdu_id);
  4746. msg_word++;
  4747. *msg_word = msdu_timestamp;
  4748. msg_word++;
  4749. /* Skip reserved field */
  4750. msg_word++;
  4751. /*
  4752. * Populate MGMT_CTRL Payload TLV first
  4753. */
  4754. HTT_STATS_TLV_TAG_SET(*msg_word,
  4755. HTT_PPDU_STATS_RX_MGMTCTRL_PAYLOAD_TLV);
  4756. align4byte = ((data_size - sizeof(htt_tlv_hdr_t) +
  4757. qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload)
  4758. + 3) >> 2) << 2;
  4759. HTT_STATS_TLV_LENGTH_SET(*msg_word, align4byte);
  4760. msg_word++;
  4761. HTT_PPDU_STATS_RX_MGMTCTRL_TLV_FRAME_LENGTH_SET(
  4762. *msg_word, data_size);
  4763. msg_word++;
  4764. dp_wdi_event_handler(event, soc, (void *)mpdu,
  4765. HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id);
  4766. qdf_nbuf_pull_head(mpdu, hdr_size);
  4767. return QDF_STATUS_SUCCESS;
  4768. }
  4769. #ifdef ATH_SUPPORT_EXT_STAT
  4770. /*dp_peer_cal_clients_stats_update - update peer stats on cal client timer
  4771. * @soc : Datapath SOC
  4772. * @peer : Datapath peer
  4773. * @arg : argument to iter function
  4774. */
  4775. static void
  4776. dp_peer_cal_clients_stats_update(struct dp_soc *soc,
  4777. struct dp_peer *peer,
  4778. void *arg)
  4779. {
  4780. dp_cal_client_update_peer_stats(&peer->stats);
  4781. }
  4782. /*dp_iterate_update_peer_list - update peer stats on cal client timer
  4783. * @pdev_hdl: pdev handle
  4784. */
  4785. static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
  4786. {
  4787. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  4788. dp_pdev_iterate_peer(pdev, dp_peer_cal_clients_stats_update, NULL,
  4789. DP_MOD_ID_CDP);
  4790. }
  4791. #else
  4792. static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
  4793. {
  4794. }
  4795. #endif
  4796. QDF_STATUS dp_mon_soc_cfg_init(struct dp_soc *soc)
  4797. {
  4798. int target_type;
  4799. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4800. target_type = hal_get_target_type(soc->hal_soc);
  4801. switch (target_type) {
  4802. case TARGET_TYPE_QCA6290:
  4803. case TARGET_TYPE_QCA6390:
  4804. case TARGET_TYPE_QCA6490:
  4805. case TARGET_TYPE_QCA6750:
  4806. case TARGET_TYPE_WCN7850:
  4807. /* do nothing */
  4808. break;
  4809. case TARGET_TYPE_QCA8074:
  4810. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4811. MON_BUF_MIN_ENTRIES);
  4812. break;
  4813. case TARGET_TYPE_QCA8074V2:
  4814. case TARGET_TYPE_QCA6018:
  4815. case TARGET_TYPE_QCA9574:
  4816. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4817. MON_BUF_MIN_ENTRIES);
  4818. mon_soc->hw_nac_monitor_support = 1;
  4819. break;
  4820. case TARGET_TYPE_QCN9000:
  4821. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4822. MON_BUF_MIN_ENTRIES);
  4823. mon_soc->hw_nac_monitor_support = 1;
  4824. if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE))
  4825. dp_config_full_mon_mode((struct cdp_soc_t *)soc, 1);
  4826. break;
  4827. case TARGET_TYPE_QCA5018:
  4828. case TARGET_TYPE_QCN6122:
  4829. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4830. MON_BUF_MIN_ENTRIES);
  4831. mon_soc->hw_nac_monitor_support = 1;
  4832. break;
  4833. case TARGET_TYPE_QCN9224:
  4834. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4835. MON_BUF_MIN_ENTRIES);
  4836. mon_soc->hw_nac_monitor_support = 1;
  4837. mon_soc->monitor_mode_v2 = 1;
  4838. break;
  4839. default:
  4840. dp_mon_info("%s: Unknown tgt type %d\n", __func__, target_type);
  4841. qdf_assert_always(0);
  4842. break;
  4843. }
  4844. dp_mon_info("hw_nac_monitor_support = %d",
  4845. mon_soc->hw_nac_monitor_support);
  4846. return QDF_STATUS_SUCCESS;
  4847. }
  4848. QDF_STATUS dp_mon_pdev_attach(struct dp_pdev *pdev)
  4849. {
  4850. struct dp_soc *soc;
  4851. struct dp_mon_pdev *mon_pdev;
  4852. if (!pdev) {
  4853. dp_mon_err("pdev is NULL");
  4854. goto fail0;
  4855. }
  4856. soc = pdev->soc;
  4857. mon_pdev = (struct dp_mon_pdev *)qdf_mem_malloc(sizeof(*mon_pdev));
  4858. if (!mon_pdev) {
  4859. dp_mon_err("%pK: MONITOR pdev allocation failed", pdev);
  4860. goto fail0;
  4861. }
  4862. if (dp_mon_rings_alloc(soc, pdev)) {
  4863. dp_mon_err("%pK: MONITOR rings setup failed", pdev);
  4864. goto fail1;
  4865. }
  4866. /* Rx monitor mode specific init */
  4867. if (dp_rx_pdev_mon_desc_pool_alloc(pdev)) {
  4868. dp_mon_err("%pK: dp_rx_pdev_mon_desc_pool_alloc failed",
  4869. pdev);
  4870. goto fail2;
  4871. }
  4872. pdev->monitor_pdev = mon_pdev;
  4873. return QDF_STATUS_SUCCESS;
  4874. fail2:
  4875. dp_mon_rings_free(pdev);
  4876. fail1:
  4877. pdev->monitor_pdev = NULL;
  4878. qdf_mem_free(mon_pdev);
  4879. fail0:
  4880. return QDF_STATUS_E_NOMEM;
  4881. }
  4882. QDF_STATUS dp_mon_pdev_detach(struct dp_pdev *pdev)
  4883. {
  4884. struct dp_mon_pdev *mon_pdev;
  4885. if (!pdev) {
  4886. dp_mon_err("pdev is NULL");
  4887. return QDF_STATUS_E_FAILURE;
  4888. }
  4889. mon_pdev = pdev->monitor_pdev;
  4890. dp_rx_pdev_mon_desc_pool_free(pdev);
  4891. dp_mon_rings_free(pdev);
  4892. pdev->monitor_pdev = NULL;
  4893. qdf_mem_free(mon_pdev);
  4894. return QDF_STATUS_SUCCESS;
  4895. }
  4896. QDF_STATUS dp_mon_pdev_init(struct dp_pdev *pdev)
  4897. {
  4898. struct dp_soc *soc;
  4899. struct dp_mon_pdev *mon_pdev;
  4900. if (!pdev) {
  4901. dp_mon_err("pdev is NULL");
  4902. return QDF_STATUS_E_FAILURE;
  4903. }
  4904. soc = pdev->soc;
  4905. mon_pdev = pdev->monitor_pdev;
  4906. mon_pdev->filter = dp_mon_filter_alloc(mon_pdev);
  4907. if (!mon_pdev->filter) {
  4908. dp_mon_err("%pK: Memory allocation failed for monitor filter",
  4909. pdev);
  4910. return QDF_STATUS_E_NOMEM;
  4911. }
  4912. qdf_spinlock_create(&mon_pdev->ppdu_stats_lock);
  4913. qdf_spinlock_create(&mon_pdev->neighbour_peer_mutex);
  4914. mon_pdev->monitor_configured = false;
  4915. mon_pdev->mon_chan_band = REG_BAND_UNKNOWN;
  4916. TAILQ_INIT(&mon_pdev->neighbour_peers_list);
  4917. mon_pdev->neighbour_peers_added = false;
  4918. mon_pdev->monitor_configured = false;
  4919. /* Monitor filter init */
  4920. mon_pdev->mon_filter_mode = MON_FILTER_ALL;
  4921. mon_pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
  4922. mon_pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
  4923. mon_pdev->fp_data_filter = FILTER_DATA_ALL;
  4924. mon_pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
  4925. mon_pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
  4926. mon_pdev->mo_data_filter = FILTER_DATA_ALL;
  4927. /*
  4928. * initialize ppdu tlv list
  4929. */
  4930. TAILQ_INIT(&mon_pdev->ppdu_info_list);
  4931. TAILQ_INIT(&mon_pdev->sched_comp_ppdu_list);
  4932. mon_pdev->list_depth = 0;
  4933. mon_pdev->tlv_count = 0;
  4934. /* initlialize cal client timer */
  4935. dp_cal_client_attach(&mon_pdev->cal_client_ctx,
  4936. dp_pdev_to_cdp_pdev(pdev),
  4937. pdev->soc->osdev,
  4938. &dp_iterate_update_peer_list);
  4939. if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
  4940. goto fail0;
  4941. if (dp_mon_rings_init(soc, pdev)) {
  4942. dp_mon_err("%pK: MONITOR rings setup failed", pdev);
  4943. goto fail1;
  4944. }
  4945. /* initialize sw monitor rx descriptors */
  4946. dp_rx_pdev_mon_desc_pool_init(pdev);
  4947. /* allocate buffers and replenish the monitor RxDMA ring */
  4948. dp_rx_pdev_mon_buffers_alloc(pdev);
  4949. dp_tx_ppdu_stats_attach(pdev);
  4950. mon_pdev->is_dp_mon_pdev_initialized = true;
  4951. return QDF_STATUS_SUCCESS;
  4952. fail1:
  4953. dp_htt_ppdu_stats_detach(pdev);
  4954. fail0:
  4955. qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex);
  4956. qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock);
  4957. dp_mon_filter_dealloc(mon_pdev);
  4958. return QDF_STATUS_E_FAILURE;
  4959. }
  4960. QDF_STATUS dp_mon_pdev_deinit(struct dp_pdev *pdev)
  4961. {
  4962. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4963. if (!mon_pdev->is_dp_mon_pdev_initialized)
  4964. return QDF_STATUS_SUCCESS;
  4965. dp_tx_ppdu_stats_detach(pdev);
  4966. dp_rx_pdev_mon_buffers_free(pdev);
  4967. dp_rx_pdev_mon_desc_pool_deinit(pdev);
  4968. dp_mon_rings_deinit(pdev);
  4969. dp_cal_client_detach(&mon_pdev->cal_client_ctx);
  4970. dp_htt_ppdu_stats_detach(pdev);
  4971. qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock);
  4972. dp_neighbour_peers_detach(pdev);
  4973. dp_pktlogmod_exit(pdev);
  4974. if (mon_pdev->filter)
  4975. dp_mon_filter_dealloc(mon_pdev);
  4976. dp_mon_rings_deinit(pdev);
  4977. mon_pdev->is_dp_mon_pdev_initialized = false;
  4978. return QDF_STATUS_SUCCESS;
  4979. }
  4980. static QDF_STATUS dp_mon_vdev_attach(struct dp_vdev *vdev)
  4981. {
  4982. struct dp_mon_vdev *mon_vdev;
  4983. struct dp_pdev *pdev = vdev->pdev;
  4984. mon_vdev = (struct dp_mon_vdev *)qdf_mem_malloc(sizeof(*mon_vdev));
  4985. if (!mon_vdev) {
  4986. dp_mon_err("%pK: Monitor vdev allocation failed", vdev);
  4987. return QDF_STATUS_E_NOMEM;
  4988. }
  4989. if (pdev->monitor_pdev->scan_spcl_vap_configured)
  4990. dp_scan_spcl_vap_stats_attach(mon_vdev);
  4991. vdev->monitor_vdev = mon_vdev;
  4992. return QDF_STATUS_SUCCESS;
  4993. }
  4994. static QDF_STATUS dp_mon_vdev_detach(struct dp_vdev *vdev)
  4995. {
  4996. struct dp_mon_vdev *mon_vdev = vdev->monitor_vdev;
  4997. struct dp_pdev *pdev = vdev->pdev;
  4998. if (!mon_vdev)
  4999. return QDF_STATUS_E_FAILURE;
  5000. if (pdev->monitor_pdev->scan_spcl_vap_configured)
  5001. dp_scan_spcl_vap_stats_detach(mon_vdev);
  5002. qdf_mem_free(mon_vdev);
  5003. vdev->monitor_vdev = NULL;
  5004. /* set mvdev to NULL only if detach is called for monitor/special vap
  5005. */
  5006. if (pdev->monitor_pdev->mvdev == vdev)
  5007. pdev->monitor_pdev->mvdev = NULL;
  5008. return QDF_STATUS_SUCCESS;
  5009. }
  5010. static QDF_STATUS dp_mon_peer_attach(struct dp_peer *peer)
  5011. {
  5012. struct dp_mon_peer *mon_peer;
  5013. struct dp_pdev *pdev;
  5014. mon_peer = (struct dp_mon_peer *)qdf_mem_malloc(sizeof(*mon_peer));
  5015. if (!mon_peer) {
  5016. dp_mon_err("%pK: MONITOR peer allocation failed", peer);
  5017. return QDF_STATUS_E_NOMEM;
  5018. }
  5019. peer->monitor_peer = mon_peer;
  5020. pdev = peer->vdev->pdev;
  5021. /*
  5022. * In tx_monitor mode, filter may be set for unassociated peer
  5023. * when unassociated peer get associated peer need to
  5024. * update tx_cap_enabled flag to support peer filter.
  5025. */
  5026. dp_peer_tx_capture_filter_check(pdev, peer);
  5027. return QDF_STATUS_SUCCESS;
  5028. }
  5029. static QDF_STATUS dp_mon_peer_detach(struct dp_peer *peer)
  5030. {
  5031. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  5032. qdf_mem_free(mon_peer);
  5033. peer->monitor_peer = NULL;
  5034. return QDF_STATUS_SUCCESS;
  5035. }
  5036. static struct dp_mon_ops monitor_ops = {
  5037. .mon_soc_cfg_init = dp_mon_soc_cfg_init,
  5038. .mon_pdev_attach = dp_mon_pdev_attach,
  5039. .mon_pdev_detach = dp_mon_pdev_detach,
  5040. .mon_pdev_init = dp_mon_pdev_init,
  5041. .mon_pdev_deinit = dp_mon_pdev_deinit,
  5042. .mon_vdev_attach = dp_mon_vdev_attach,
  5043. .mon_vdev_detach = dp_mon_vdev_detach,
  5044. .mon_peer_attach = dp_mon_peer_attach,
  5045. .mon_peer_detach = dp_mon_peer_detach,
  5046. .mon_config_debug_sniffer = dp_config_debug_sniffer,
  5047. .mon_flush_rings = dp_flush_monitor_rings,
  5048. #if !defined(DISABLE_MON_CONFIG)
  5049. .mon_htt_srng_setup = dp_mon_htt_srng_setup,
  5050. #endif
  5051. #if defined(DP_CON_MON)
  5052. .mon_service_rings = dp_service_mon_rings,
  5053. #endif
  5054. #ifndef DISABLE_MON_CONFIG
  5055. .mon_process = dp_mon_process,
  5056. #endif
  5057. #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
  5058. .mon_drop_packets_for_mac = dp_mon_drop_packets_for_mac,
  5059. #endif
  5060. .mon_peer_tx_init = dp_peer_tx_init,
  5061. .mon_peer_tx_cleanup = dp_peer_tx_cleanup,
  5062. #ifdef WLAN_TX_PKT_CAPTURE_ENH
  5063. .mon_peer_tid_peer_id_update = dp_peer_tid_peer_id_update,
  5064. .mon_tx_ppdu_stats_attach = dp_tx_ppdu_stats_attach,
  5065. .mon_tx_ppdu_stats_detach = dp_tx_ppdu_stats_detach,
  5066. .mon_tx_capture_debugfs_init = dp_tx_capture_debugfs_init,
  5067. .mon_tx_add_to_comp_queue = dp_tx_add_to_comp_queue,
  5068. .mon_peer_tx_capture_filter_check = dp_peer_tx_capture_filter_check,
  5069. .mon_update_msdu_to_list = dp_update_msdu_to_list,
  5070. #endif
  5071. #if defined(WDI_EVENT_ENABLE) &&\
  5072. (defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
  5073. .mon_ppdu_stats_ind_handler = dp_ppdu_stats_ind_handler,
  5074. #endif
  5075. .mon_htt_ppdu_stats_attach = dp_htt_ppdu_stats_attach,
  5076. .mon_htt_ppdu_stats_detach = dp_htt_ppdu_stats_detach,
  5077. .mon_print_pdev_rx_mon_stats = dp_print_pdev_rx_mon_stats,
  5078. #ifdef WLAN_TX_PKT_CAPTURE_ENH
  5079. .mon_print_pdev_tx_capture_stats = dp_print_pdev_tx_capture_stats,
  5080. .mon_config_enh_tx_capture = dp_config_enh_tx_capture,
  5081. #endif
  5082. #ifdef WLAN_RX_PKT_CAPTURE_ENH
  5083. .mon_config_enh_rx_capture = dp_config_enh_rx_capture,
  5084. #endif
  5085. #ifdef QCA_SUPPORT_BPR
  5086. .mon_set_bpr_enable = dp_set_bpr_enable,
  5087. #endif
  5088. #ifdef ATH_SUPPORT_NAC
  5089. .mon_set_filter_neigh_peers = dp_set_filter_neigh_peers,
  5090. #endif
  5091. #ifdef WLAN_ATF_ENABLE
  5092. .mon_set_atf_stats_enable = dp_set_atf_stats_enable,
  5093. #endif
  5094. .mon_set_bsscolor = dp_mon_set_bsscolor,
  5095. .mon_pdev_get_filter_ucast_data = dp_pdev_get_filter_ucast_data,
  5096. .mon_pdev_get_filter_mcast_data = dp_pdev_get_filter_mcast_data,
  5097. .mon_pdev_get_filter_non_data = dp_pdev_get_filter_non_data,
  5098. #ifdef WDI_EVENT_ENABLE
  5099. .mon_set_pktlog_wifi3 = dp_set_pktlog_wifi3,
  5100. #endif
  5101. #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
  5102. .mon_pktlogmod_exit = dp_pktlogmod_exit,
  5103. #endif
  5104. .mon_vdev_set_monitor_mode_buf_rings =
  5105. dp_vdev_set_monitor_mode_buf_rings,
  5106. .mon_neighbour_peers_detach = dp_neighbour_peers_detach,
  5107. #ifdef FEATURE_NAC_RSSI
  5108. .mon_filter_neighbour_peer = dp_filter_neighbour_peer,
  5109. #endif
  5110. .mon_vdev_timer_init = dp_mon_vdev_timer_init,
  5111. .mon_vdev_timer_start = dp_mon_vdev_timer_start,
  5112. .mon_vdev_timer_stop = dp_mon_vdev_timer_stop,
  5113. .mon_vdev_timer_deinit = dp_mon_vdev_timer_deinit,
  5114. .mon_reap_timer_init = dp_mon_reap_timer_init,
  5115. .mon_reap_timer_start = dp_mon_reap_timer_start,
  5116. .mon_reap_timer_stop = dp_mon_reap_timer_stop,
  5117. .mon_reap_timer_deinit = dp_mon_reap_timer_deinit,
  5118. #ifdef QCA_MCOPY_SUPPORT
  5119. .mon_mcopy_check_deliver = dp_mcopy_check_deliver,
  5120. #endif
  5121. .mon_neighbour_peer_add_ast = dp_mon_neighbour_peer_add_ast,
  5122. };
  5123. static struct cdp_mon_ops dp_ops_mon = {
  5124. .txrx_reset_monitor_mode = dp_reset_monitor_mode,
  5125. /* Added support for HK advance filter */
  5126. .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
  5127. .txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
  5128. .config_full_mon_mode = dp_config_full_mon_mode,
  5129. };
  5130. void dp_mon_ops_register(struct dp_mon_soc *mon_soc)
  5131. {
  5132. mon_soc->mon_ops = &monitor_ops;
  5133. }
  5134. void dp_mon_cdp_ops_register(struct dp_soc *soc)
  5135. {
  5136. struct cdp_ops *ops = soc->cdp_soc.ops;
  5137. if (!ops) {
  5138. dp_mon_err("cdp_ops is NULL");
  5139. return;
  5140. }
  5141. ops->mon_ops = &dp_ops_mon;
  5142. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  5143. ops->cfr_ops->txrx_cfr_filter = dp_cfr_filter;
  5144. ops->cfr_ops->txrx_enable_mon_reap_timer = dp_enable_mon_reap_timer;
  5145. #endif
  5146. ops->cmn_drv_ops->txrx_set_monitor_mode = dp_vdev_set_monitor_mode;
  5147. ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev =
  5148. dp_get_mon_vdev_from_pdev_wifi3;
  5149. #ifdef DP_PEER_EXTENDED_API
  5150. ops->misc_ops->pkt_log_init = dp_pkt_log_init;
  5151. ops->misc_ops->pkt_log_con_service = dp_pkt_log_con_service;
  5152. ops->misc_ops->pkt_log_exit = dp_pkt_log_exit;
  5153. #endif
  5154. #ifdef ATH_SUPPORT_NAC_RSSI
  5155. ops->ctrl_ops->txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi;
  5156. ops->ctrl_ops->txrx_vdev_get_neighbour_rssi =
  5157. dp_vdev_get_neighbour_rssi;
  5158. #endif
  5159. #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
  5160. ops->ctrl_ops->txrx_update_filter_neighbour_peers =
  5161. dp_update_filter_neighbour_peers;
  5162. #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
  5163. ops->ctrl_ops->enable_peer_based_pktlog =
  5164. dp_enable_peer_based_pktlog;
  5165. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  5166. ops->ctrl_ops->txrx_update_peer_pkt_capture_params =
  5167. dp_peer_update_pkt_capture_params;
  5168. #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
  5169. #ifdef QCA_ENHANCED_STATS_SUPPORT
  5170. ops->host_stats_ops->txrx_enable_enhanced_stats =
  5171. dp_enable_enhanced_stats;
  5172. ops->host_stats_ops->txrx_disable_enhanced_stats =
  5173. dp_disable_enhanced_stats;
  5174. #endif /* QCA_ENHANCED_STATS_SUPPORT */
  5175. #ifdef WDI_EVENT_ENABLE
  5176. ops->ctrl_ops->txrx_get_pldev = dp_get_pldev;
  5177. #endif
  5178. #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
  5179. ops->host_stats_ops->txrx_get_scan_spcl_vap_stats =
  5180. dp_get_scan_spcl_vap_stats;
  5181. #endif
  5182. return;
  5183. }
  5184. void dp_mon_cdp_ops_deregister(struct dp_soc *soc)
  5185. {
  5186. struct cdp_ops *ops = soc->cdp_soc.ops;
  5187. if (!ops) {
  5188. dp_mon_err("cdp_ops is NULL");
  5189. return;
  5190. }
  5191. ops->mon_ops = NULL;
  5192. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  5193. ops->cfr_ops->txrx_cfr_filter = NULL;
  5194. ops->cfr_ops->txrx_enable_mon_reap_timer = NULL;
  5195. #endif
  5196. ops->cmn_drv_ops->txrx_set_monitor_mode = NULL;
  5197. ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev = NULL;
  5198. #ifdef DP_PEER_EXTENDED_API
  5199. ops->misc_ops->pkt_log_init = NULL;
  5200. ops->misc_ops->pkt_log_con_service = NULL;
  5201. ops->misc_ops->pkt_log_exit = NULL;
  5202. #endif
  5203. #ifdef ATH_SUPPORT_NAC_RSSI
  5204. ops->ctrl_ops->txrx_vdev_config_for_nac_rssi = NULL;
  5205. ops->ctrl_ops->txrx_vdev_get_neighbour_rssi = NULL;
  5206. #endif
  5207. #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
  5208. ops->ctrl_ops->txrx_update_filter_neighbour_peers = NULL;
  5209. #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
  5210. ops->ctrl_ops->enable_peer_based_pktlog = NULL;
  5211. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  5212. ops->ctrl_ops->txrx_update_peer_pkt_capture_params = NULL;
  5213. #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
  5214. #ifdef FEATURE_PERPKT_INFO
  5215. ops->host_stats_ops->txrx_enable_enhanced_stats = NULL;
  5216. ops->host_stats_ops->txrx_disable_enhanced_stats = NULL;
  5217. #endif /* FEATURE_PERPKT_INFO */
  5218. #ifdef WDI_EVENT_ENABLE
  5219. ops->ctrl_ops->txrx_get_pldev = NULL;
  5220. #endif
  5221. return;
  5222. }
  5223. QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc)
  5224. {
  5225. struct dp_mon_soc *mon_soc;
  5226. if (!soc) {
  5227. dp_mon_err("dp_soc is NULL");
  5228. return QDF_STATUS_E_FAILURE;
  5229. }
  5230. mon_soc = (struct dp_mon_soc *)qdf_mem_malloc(sizeof(*mon_soc));
  5231. if (!mon_soc) {
  5232. dp_mon_err("%pK: mem allocation failed", soc);
  5233. return QDF_STATUS_E_NOMEM;
  5234. }
  5235. /* register monitor ops */
  5236. dp_mon_ops_register(mon_soc);
  5237. soc->monitor_soc = mon_soc;
  5238. dp_mon_cdp_ops_register(soc);
  5239. return QDF_STATUS_SUCCESS;
  5240. }
  5241. QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc)
  5242. {
  5243. struct dp_mon_soc *mon_soc;
  5244. if (!soc) {
  5245. dp_mon_err("dp_soc is NULL");
  5246. return QDF_STATUS_E_FAILURE;
  5247. }
  5248. mon_soc = soc->monitor_soc;
  5249. dp_mon_vdev_timer_deinit(soc);
  5250. dp_mon_cdp_ops_deregister(soc);
  5251. soc->monitor_soc = NULL;
  5252. qdf_mem_free(mon_soc);
  5253. return QDF_STATUS_SUCCESS;
  5254. }