dp_mon.c 159 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759
  1. /*
  2. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include <dp_types.h>
  18. #include "dp_rx.h"
  19. #include "dp_peer.h"
  20. #include <dp_htt.h>
  21. #include <dp_mon_filter.h>
  22. #include <dp_htt.h>
  23. #include <dp_mon.h>
  24. #include <dp_rx_mon.h>
  25. #include <dp_internal.h>
  26. #include "htt_ppdu_stats.h"
  27. #include "dp_cal_client_api.h"
  28. #if defined(DP_CON_MON)
  29. #ifndef REMOVE_PKT_LOG
  30. #include <pktlog_ac_api.h>
  31. #include <pktlog_ac.h>
  32. #endif
  33. #endif
  34. #ifdef FEATURE_PERPKT_INFO
  35. #include "dp_ratetable.h"
  36. #endif
  37. #ifdef QCA_SUPPORT_LITE_MONITOR
  38. #include "dp_lite_mon.h"
  39. #endif
  40. #define DP_INTR_POLL_TIMER_MS 5
  41. #define INVALID_FREE_BUFF 0xffffffff
  42. #ifdef WLAN_RX_PKT_CAPTURE_ENH
  43. #include "dp_rx_mon_feature.h"
  44. #endif /* WLAN_RX_PKT_CAPTURE_ENH */
  45. #ifdef QCA_UNDECODED_METADATA_SUPPORT
  46. #define MAX_STRING_LEN_PER_FIELD 6
  47. #define DP_UNDECODED_ERR_LENGTH (MAX_STRING_LEN_PER_FIELD * CDP_PHYRX_ERR_MAX)
  48. #endif
  49. #ifdef QCA_MCOPY_SUPPORT
  50. static inline void
  51. dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
  52. {
  53. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  54. mon_pdev->mcopy_mode = M_COPY_DISABLED;
  55. mon_pdev->mvdev = NULL;
  56. }
  57. static inline void
  58. dp_reset_mcopy_mode(struct dp_pdev *pdev)
  59. {
  60. QDF_STATUS status = QDF_STATUS_SUCCESS;
  61. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  62. struct cdp_mon_ops *cdp_ops;
  63. if (mon_pdev->mcopy_mode) {
  64. cdp_ops = dp_mon_cdp_ops_get(pdev->soc);
  65. if (cdp_ops && cdp_ops->config_full_mon_mode)
  66. cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
  67. DP_FULL_MON_ENABLE);
  68. dp_pdev_disable_mcopy_code(pdev);
  69. dp_mon_filter_reset_mcopy_mode(pdev);
  70. status = dp_mon_filter_update(pdev);
  71. if (status != QDF_STATUS_SUCCESS) {
  72. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  73. FL("Failed to reset AM copy mode filters"));
  74. }
  75. mon_pdev->monitor_configured = false;
  76. }
  77. }
  78. static QDF_STATUS
  79. dp_config_mcopy_mode(struct dp_pdev *pdev, int val)
  80. {
  81. QDF_STATUS status = QDF_STATUS_SUCCESS;
  82. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  83. struct dp_mon_ops *mon_ops;
  84. struct cdp_mon_ops *cdp_ops;
  85. if (mon_pdev->mvdev)
  86. return QDF_STATUS_E_RESOURCES;
  87. mon_pdev->mcopy_mode = val;
  88. mon_pdev->tx_sniffer_enable = 0;
  89. mon_pdev->monitor_configured = true;
  90. mon_ops = dp_mon_ops_get(pdev->soc);
  91. if (!wlan_cfg_is_delay_mon_replenish(pdev->soc->wlan_cfg_ctx)) {
  92. if (mon_ops && mon_ops->mon_vdev_set_monitor_mode_rings)
  93. mon_ops->mon_vdev_set_monitor_mode_rings(pdev, true);
  94. }
  95. /*
  96. * Setup the M copy mode filter.
  97. */
  98. cdp_ops = dp_mon_cdp_ops_get(pdev->soc);
  99. if (cdp_ops && cdp_ops->config_full_mon_mode)
  100. cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
  101. DP_FULL_MON_ENABLE);
  102. dp_mon_filter_setup_mcopy_mode(pdev);
  103. status = dp_mon_filter_update(pdev);
  104. if (status != QDF_STATUS_SUCCESS) {
  105. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  106. FL("Failed to set M_copy mode filters"));
  107. dp_mon_filter_reset_mcopy_mode(pdev);
  108. dp_pdev_disable_mcopy_code(pdev);
  109. return status;
  110. }
  111. if (!mon_pdev->pktlog_ppdu_stats)
  112. dp_h2t_cfg_stats_msg_send(pdev,
  113. DP_PPDU_STATS_CFG_SNIFFER,
  114. pdev->pdev_id);
  115. return status;
  116. }
  117. #else
  118. static inline void
  119. dp_reset_mcopy_mode(struct dp_pdev *pdev)
  120. {
  121. }
  122. static inline QDF_STATUS
  123. dp_config_mcopy_mode(struct dp_pdev *pdev, int val)
  124. {
  125. return QDF_STATUS_E_INVAL;
  126. }
  127. #endif /* QCA_MCOPY_SUPPORT */
  128. #ifdef QCA_UNDECODED_METADATA_SUPPORT
  129. static QDF_STATUS
  130. dp_reset_undecoded_metadata_capture(struct dp_pdev *pdev)
  131. {
  132. QDF_STATUS status = QDF_STATUS_SUCCESS;
  133. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  134. if (mon_pdev->undecoded_metadata_capture) {
  135. dp_mon_filter_reset_undecoded_metadata_mode(pdev);
  136. status = dp_mon_filter_update(pdev);
  137. if (status != QDF_STATUS_SUCCESS) {
  138. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  139. FL("Undecoded capture filter reset failed"));
  140. }
  141. }
  142. mon_pdev->undecoded_metadata_capture = 0;
  143. return status;
  144. }
  145. static QDF_STATUS
  146. dp_enable_undecoded_metadata_capture(struct dp_pdev *pdev, int val)
  147. {
  148. QDF_STATUS status = QDF_STATUS_SUCCESS;
  149. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  150. struct dp_mon_ops *mon_ops;
  151. if (!mon_pdev->mvdev) {
  152. qdf_err("monitor_pdev is NULL");
  153. return QDF_STATUS_E_RESOURCES;
  154. }
  155. mon_pdev->undecoded_metadata_capture = val;
  156. mon_pdev->monitor_configured = true;
  157. mon_ops = dp_mon_ops_get(pdev->soc);
  158. /* Setup the undecoded metadata capture mode filter. */
  159. dp_mon_filter_setup_undecoded_metadata_mode(pdev);
  160. status = dp_mon_filter_update(pdev);
  161. if (status != QDF_STATUS_SUCCESS) {
  162. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  163. FL("Failed to set Undecoded capture filters"));
  164. dp_mon_filter_reset_undecoded_metadata_mode(pdev);
  165. return status;
  166. }
  167. return status;
  168. }
  169. #else
  170. static inline QDF_STATUS
  171. dp_reset_undecoded_metadata_capture(struct dp_pdev *pdev)
  172. {
  173. return QDF_STATUS_E_INVAL;
  174. }
  175. static inline QDF_STATUS
  176. dp_enable_undecoded_metadata_capture(struct dp_pdev *pdev, int val)
  177. {
  178. return QDF_STATUS_E_INVAL;
  179. }
  180. #endif /* QCA_UNDECODED_METADATA_SUPPORT */
  181. QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl,
  182. uint8_t pdev_id,
  183. uint8_t special_monitor)
  184. {
  185. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  186. struct dp_pdev *pdev =
  187. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  188. pdev_id);
  189. QDF_STATUS status = QDF_STATUS_SUCCESS;
  190. struct dp_mon_pdev *mon_pdev;
  191. struct cdp_mon_ops *cdp_ops;
  192. if (!pdev)
  193. return QDF_STATUS_E_FAILURE;
  194. mon_pdev = pdev->monitor_pdev;
  195. qdf_spin_lock_bh(&mon_pdev->mon_lock);
  196. cdp_ops = dp_mon_cdp_ops_get(soc);
  197. if (cdp_ops && cdp_ops->soc_config_full_mon_mode)
  198. cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
  199. DP_FULL_MON_DISABLE);
  200. mon_pdev->mvdev = NULL;
  201. /*
  202. * Lite monitor mode, smart monitor mode and monitor
  203. * mode uses this APIs to filter reset and mode disable
  204. */
  205. if (mon_pdev->mcopy_mode) {
  206. #if defined(QCA_MCOPY_SUPPORT)
  207. dp_pdev_disable_mcopy_code(pdev);
  208. dp_mon_filter_reset_mcopy_mode(pdev);
  209. #endif /* QCA_MCOPY_SUPPORT */
  210. } else if (special_monitor) {
  211. #if defined(ATH_SUPPORT_NAC)
  212. dp_mon_filter_reset_smart_monitor(pdev);
  213. #endif /* ATH_SUPPORT_NAC */
  214. /* for mon 2.0 we make use of lite mon to
  215. * set filters for smart monitor use case.
  216. */
  217. dp_monitor_lite_mon_disable_rx(pdev);
  218. } else if (mon_pdev->undecoded_metadata_capture) {
  219. #ifdef QCA_UNDECODED_METADATA_SUPPORT
  220. dp_reset_undecoded_metadata_capture(pdev);
  221. #endif
  222. } else {
  223. dp_mon_filter_reset_mon_mode(pdev);
  224. }
  225. status = dp_mon_filter_update(pdev);
  226. if (status != QDF_STATUS_SUCCESS) {
  227. dp_rx_mon_dest_err("%pK: Failed to reset monitor filters",
  228. soc);
  229. }
  230. mon_pdev->monitor_configured = false;
  231. qdf_spin_unlock_bh(&mon_pdev->mon_lock);
  232. return QDF_STATUS_SUCCESS;
  233. }
  234. #ifdef QCA_ADVANCE_MON_FILTER_SUPPORT
  235. QDF_STATUS
  236. dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  237. struct cdp_monitor_filter *filter_val)
  238. {
  239. /* Many monitor VAPs can exists in a system but only one can be up at
  240. * anytime
  241. */
  242. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  243. struct dp_vdev *vdev;
  244. struct dp_pdev *pdev =
  245. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  246. pdev_id);
  247. QDF_STATUS status = QDF_STATUS_SUCCESS;
  248. struct dp_mon_pdev *mon_pdev;
  249. if (!pdev || !pdev->monitor_pdev)
  250. return QDF_STATUS_E_FAILURE;
  251. mon_pdev = pdev->monitor_pdev;
  252. vdev = mon_pdev->mvdev;
  253. if (!vdev)
  254. return QDF_STATUS_E_FAILURE;
  255. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  256. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
  257. pdev, pdev_id, soc, vdev);
  258. /*Check if current pdev's monitor_vdev exists */
  259. if (!mon_pdev->mvdev) {
  260. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  261. "vdev=%pK", vdev);
  262. qdf_assert(vdev);
  263. }
  264. /* update filter mode, type in pdev structure */
  265. mon_pdev->mon_filter_mode = filter_val->mode;
  266. mon_pdev->fp_mgmt_filter = filter_val->fp_mgmt;
  267. mon_pdev->fp_ctrl_filter = filter_val->fp_ctrl;
  268. mon_pdev->fp_data_filter = filter_val->fp_data;
  269. mon_pdev->mo_mgmt_filter = filter_val->mo_mgmt;
  270. mon_pdev->mo_ctrl_filter = filter_val->mo_ctrl;
  271. mon_pdev->mo_data_filter = filter_val->mo_data;
  272. dp_mon_filter_setup_mon_mode(pdev);
  273. status = dp_mon_filter_update(pdev);
  274. if (status != QDF_STATUS_SUCCESS) {
  275. dp_rx_mon_dest_err("%pK: Failed to set filter for adv mon mode",
  276. soc);
  277. dp_mon_filter_reset_mon_mode(pdev);
  278. }
  279. return status;
  280. }
  281. #endif
  282. QDF_STATUS
  283. dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf)
  284. {
  285. struct dp_pdev *pdev =
  286. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
  287. pdev_id);
  288. if (!pdev)
  289. return QDF_STATUS_E_FAILURE;
  290. dp_deliver_mgmt_frm(pdev, nbuf);
  291. return QDF_STATUS_SUCCESS;
  292. }
  293. #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
  294. /**
  295. * dp_scan_spcl_vap_stats_attach() - alloc spcl vap stats struct
  296. * @mon_vdev: Datapath mon VDEV handle
  297. *
  298. * Return: 0 on success, not 0 on failure
  299. */
  300. static inline QDF_STATUS
  301. dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev)
  302. {
  303. mon_vdev->scan_spcl_vap_stats =
  304. qdf_mem_malloc(sizeof(struct cdp_scan_spcl_vap_stats));
  305. if (!mon_vdev->scan_spcl_vap_stats) {
  306. dp_mon_err("scan spcl vap stats attach fail");
  307. return QDF_STATUS_E_NOMEM;
  308. }
  309. return QDF_STATUS_SUCCESS;
  310. }
  311. /**
  312. * dp_scan_spcl_vap_stats_detach() - free spcl vap stats struct
  313. * @mon_vdev: Datapath mon VDEV handle
  314. *
  315. * Return: void
  316. */
  317. static inline void
  318. dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev)
  319. {
  320. if (mon_vdev->scan_spcl_vap_stats) {
  321. qdf_mem_free(mon_vdev->scan_spcl_vap_stats);
  322. mon_vdev->scan_spcl_vap_stats = NULL;
  323. }
  324. }
  325. /**
  326. * dp_reset_scan_spcl_vap_stats() - reset spcl vap rx stats
  327. * @vdev: Datapath VDEV handle
  328. *
  329. * Return: void
  330. */
  331. static inline void
  332. dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev)
  333. {
  334. struct dp_mon_vdev *mon_vdev;
  335. struct dp_mon_pdev *mon_pdev;
  336. mon_pdev = vdev->pdev->monitor_pdev;
  337. if (!mon_pdev || !mon_pdev->reset_scan_spcl_vap_stats_enable)
  338. return;
  339. mon_vdev = vdev->monitor_vdev;
  340. if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats)
  341. return;
  342. qdf_mem_zero(mon_vdev->scan_spcl_vap_stats,
  343. sizeof(struct cdp_scan_spcl_vap_stats));
  344. }
  345. /**
  346. * dp_get_scan_spcl_vap_stats() - get spcl vap rx stats
  347. * @soc_hdl: Datapath soc handle
  348. * @vdev_id: vdev id
  349. * @stats: structure to hold spcl vap stats
  350. *
  351. * Return: 0 on success, not 0 on failure
  352. */
  353. static QDF_STATUS
  354. dp_get_scan_spcl_vap_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  355. struct cdp_scan_spcl_vap_stats *stats)
  356. {
  357. struct dp_mon_vdev *mon_vdev = NULL;
  358. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  359. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  360. DP_MOD_ID_CDP);
  361. if (!vdev || !stats) {
  362. if (vdev)
  363. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  364. return QDF_STATUS_E_INVAL;
  365. }
  366. mon_vdev = vdev->monitor_vdev;
  367. if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats) {
  368. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  369. return QDF_STATUS_E_INVAL;
  370. }
  371. qdf_mem_copy(stats, mon_vdev->scan_spcl_vap_stats,
  372. sizeof(struct cdp_scan_spcl_vap_stats));
  373. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  374. return QDF_STATUS_SUCCESS;
  375. }
  376. #else
  377. static inline void
  378. dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev)
  379. {
  380. }
  381. static inline QDF_STATUS
  382. dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev)
  383. {
  384. return QDF_STATUS_SUCCESS;
  385. }
  386. static inline void
  387. dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev)
  388. {
  389. }
  390. #endif
  391. /**
  392. * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
  393. * @vdev_handle: Datapath VDEV handle
  394. * @smart_monitor: Flag to denote if its smart monitor mode
  395. *
  396. * Return: 0 on success, not 0 on failure
  397. */
  398. static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *dp_soc,
  399. uint8_t vdev_id,
  400. uint8_t special_monitor)
  401. {
  402. struct dp_soc *soc = (struct dp_soc *)dp_soc;
  403. struct dp_pdev *pdev;
  404. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  405. DP_MOD_ID_CDP);
  406. QDF_STATUS status = QDF_STATUS_SUCCESS;
  407. struct dp_mon_pdev *mon_pdev;
  408. struct cdp_mon_ops *cdp_ops;
  409. if (!vdev)
  410. return QDF_STATUS_E_FAILURE;
  411. pdev = vdev->pdev;
  412. if (!pdev || !pdev->monitor_pdev)
  413. return QDF_STATUS_E_FAILURE;
  414. mon_pdev = pdev->monitor_pdev;
  415. mon_pdev->mvdev = vdev;
  416. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  417. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
  418. pdev, pdev->pdev_id, pdev->soc, vdev);
  419. /*
  420. * do not configure monitor buf ring and filter for smart and
  421. * lite monitor
  422. * for smart monitor filters are added along with first NAC
  423. * for lite monitor required configuration done through
  424. * dp_set_pdev_param
  425. */
  426. if (special_monitor) {
  427. status = QDF_STATUS_SUCCESS;
  428. goto fail;
  429. }
  430. if (mon_pdev->scan_spcl_vap_configured)
  431. dp_reset_scan_spcl_vap_stats(vdev);
  432. /*Check if current pdev's monitor_vdev exists */
  433. if (mon_pdev->monitor_configured) {
  434. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  435. "monitor vap already created vdev=%pK\n", vdev);
  436. status = QDF_STATUS_E_RESOURCES;
  437. goto fail;
  438. }
  439. mon_pdev->monitor_configured = true;
  440. /* disable lite mon if configured, monitor vap takes
  441. * priority over lite mon when its created. Lite mon
  442. * can be configured later again.
  443. */
  444. dp_monitor_lite_mon_disable_rx(pdev);
  445. cdp_ops = dp_mon_cdp_ops_get(soc);
  446. if (cdp_ops && cdp_ops->soc_config_full_mon_mode)
  447. cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
  448. DP_FULL_MON_ENABLE);
  449. dp_mon_filter_setup_mon_mode(pdev);
  450. status = dp_mon_filter_update(pdev);
  451. if (status != QDF_STATUS_SUCCESS) {
  452. dp_cdp_err("%pK: Failed to reset monitor filters", soc);
  453. dp_mon_filter_reset_mon_mode(pdev);
  454. mon_pdev->monitor_configured = false;
  455. mon_pdev->mvdev = NULL;
  456. }
  457. fail:
  458. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  459. return status;
  460. }
  461. #ifdef QCA_TX_CAPTURE_SUPPORT
  462. static QDF_STATUS
  463. dp_config_tx_capture_mode(struct dp_pdev *pdev)
  464. {
  465. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  466. mon_pdev->tx_sniffer_enable = 1;
  467. mon_pdev->monitor_configured = false;
  468. if (!mon_pdev->pktlog_ppdu_stats)
  469. dp_h2t_cfg_stats_msg_send(pdev,
  470. DP_PPDU_STATS_CFG_SNIFFER,
  471. pdev->pdev_id);
  472. return QDF_STATUS_SUCCESS;
  473. }
  474. #else
  475. #ifdef QCA_MCOPY_SUPPORT
  476. static QDF_STATUS
  477. dp_config_tx_capture_mode(struct dp_pdev *pdev)
  478. {
  479. return QDF_STATUS_E_INVAL;
  480. }
  481. #endif
  482. #endif
  483. #if defined(QCA_MCOPY_SUPPORT) || defined(QCA_TX_CAPTURE_SUPPORT)
  484. QDF_STATUS
  485. dp_config_debug_sniffer(struct dp_pdev *pdev, int val)
  486. {
  487. QDF_STATUS status = QDF_STATUS_SUCCESS;
  488. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  489. /*
  490. * Note: The mirror copy mode cannot co-exist with any other
  491. * monitor modes. Hence disabling the filter for this mode will
  492. * reset the monitor destination ring filters.
  493. */
  494. dp_reset_mcopy_mode(pdev);
  495. switch (val) {
  496. case 0:
  497. mon_pdev->tx_sniffer_enable = 0;
  498. mon_pdev->monitor_configured = false;
  499. /*
  500. * We don't need to reset the Rx monitor status ring or call
  501. * the API dp_ppdu_ring_reset() if all debug sniffer mode is
  502. * disabled. The Rx monitor status ring will be disabled when
  503. * the last mode using the monitor status ring get disabled.
  504. */
  505. if (!mon_pdev->pktlog_ppdu_stats &&
  506. !mon_pdev->enhanced_stats_en &&
  507. !mon_pdev->bpr_enable) {
  508. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  509. } else if (mon_pdev->enhanced_stats_en &&
  510. !mon_pdev->bpr_enable) {
  511. dp_h2t_cfg_stats_msg_send(pdev,
  512. DP_PPDU_STATS_CFG_ENH_STATS,
  513. pdev->pdev_id);
  514. } else if (!mon_pdev->enhanced_stats_en &&
  515. mon_pdev->bpr_enable) {
  516. dp_h2t_cfg_stats_msg_send(pdev,
  517. DP_PPDU_STATS_CFG_BPR_ENH,
  518. pdev->pdev_id);
  519. } else {
  520. dp_h2t_cfg_stats_msg_send(pdev,
  521. DP_PPDU_STATS_CFG_BPR,
  522. pdev->pdev_id);
  523. }
  524. break;
  525. case 1:
  526. status = dp_config_tx_capture_mode(pdev);
  527. break;
  528. case 2:
  529. case 4:
  530. status = dp_config_mcopy_mode(pdev, val);
  531. break;
  532. default:
  533. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  534. "Invalid value, mode not supported");
  535. status = QDF_STATUS_E_INVAL;
  536. break;
  537. }
  538. return status;
  539. }
  540. #endif
  541. #ifdef QCA_UNDECODED_METADATA_SUPPORT
  542. QDF_STATUS
  543. dp_mon_config_undecoded_metadata_capture(struct dp_pdev *pdev, int val)
  544. {
  545. QDF_STATUS status = QDF_STATUS_SUCCESS;
  546. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  547. if (!mon_pdev->mvdev && !mon_pdev->scan_spcl_vap_configured) {
  548. qdf_err("No monitor or Special vap, undecoded capture not supported");
  549. return QDF_STATUS_E_RESOURCES;
  550. }
  551. if (val)
  552. status = dp_enable_undecoded_metadata_capture(pdev, val);
  553. else
  554. status = dp_reset_undecoded_metadata_capture(pdev);
  555. return status;
  556. }
  557. #endif
  558. /**
  559. * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
  560. * ring based on target
  561. * @soc: soc handle
  562. * @mac_for_pdev: WIN- pdev_id, MCL- mac id
  563. * @pdev: physical device handle
  564. * @ring_num: mac id
  565. * @htt_tlv_filter: tlv filter
  566. *
  567. * Return: zero on success, non-zero on failure
  568. */
  569. static inline QDF_STATUS
  570. dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
  571. struct dp_pdev *pdev, uint8_t ring_num,
  572. struct htt_rx_ring_tlv_filter htt_tlv_filter)
  573. {
  574. QDF_STATUS status;
  575. if (soc->wlan_cfg_ctx->rxdma1_enable)
  576. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  577. soc->rxdma_mon_buf_ring[ring_num]
  578. .hal_srng,
  579. RXDMA_MONITOR_BUF,
  580. RX_MONITOR_BUFFER_SIZE,
  581. &htt_tlv_filter);
  582. else
  583. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  584. pdev->rx_mac_buf_ring[ring_num]
  585. .hal_srng,
  586. RXDMA_BUF, RX_DATA_BUFFER_SIZE,
  587. &htt_tlv_filter);
  588. return status;
  589. }
  590. /**
  591. * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode
  592. * @soc_hdl: datapath soc handle
  593. * @pdev_id: physical device instance id
  594. *
  595. * Return: virtual interface id
  596. */
  597. static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl,
  598. uint8_t pdev_id)
  599. {
  600. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  601. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  602. if (qdf_unlikely(!pdev || !pdev->monitor_pdev ||
  603. !pdev->monitor_pdev->mvdev))
  604. return -EINVAL;
  605. return pdev->monitor_pdev->mvdev->vdev_id;
  606. }
  607. #if defined(QCA_TX_CAPTURE_SUPPORT) || defined(QCA_ENHANCED_STATS_SUPPORT)
  608. #ifndef WLAN_TX_PKT_CAPTURE_ENH
  609. void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
  610. {
  611. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  612. if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode) {
  613. dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
  614. nbuf, HTT_INVALID_PEER,
  615. WDI_NO_VAL, pdev->pdev_id);
  616. } else {
  617. if (!mon_pdev->bpr_enable)
  618. qdf_nbuf_free(nbuf);
  619. }
  620. }
  621. #endif
  622. #endif
  623. QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
  624. {
  625. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  626. mon_pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
  627. if (!mon_pdev->ppdu_tlv_buf) {
  628. QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
  629. return QDF_STATUS_E_NOMEM;
  630. }
  631. return QDF_STATUS_SUCCESS;
  632. }
  633. void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
  634. {
  635. struct ppdu_info *ppdu_info, *ppdu_info_next;
  636. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  637. TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list,
  638. ppdu_info_list_elem, ppdu_info_next) {
  639. if (!ppdu_info)
  640. break;
  641. TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
  642. ppdu_info, ppdu_info_list_elem);
  643. mon_pdev->list_depth--;
  644. qdf_assert_always(ppdu_info->nbuf);
  645. qdf_nbuf_free(ppdu_info->nbuf);
  646. qdf_mem_free(ppdu_info);
  647. }
  648. TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->sched_comp_ppdu_list,
  649. ppdu_info_list_elem, ppdu_info_next) {
  650. if (!ppdu_info)
  651. break;
  652. TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list,
  653. ppdu_info, ppdu_info_list_elem);
  654. mon_pdev->sched_comp_list_depth--;
  655. qdf_assert_always(ppdu_info->nbuf);
  656. qdf_nbuf_free(ppdu_info->nbuf);
  657. qdf_mem_free(ppdu_info);
  658. }
  659. if (mon_pdev->ppdu_tlv_buf)
  660. qdf_mem_free(mon_pdev->ppdu_tlv_buf);
  661. }
  662. QDF_STATUS dp_pdev_get_rx_mon_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  663. struct cdp_pdev_mon_stats *stats)
  664. {
  665. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  666. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  667. struct dp_mon_pdev *mon_pdev;
  668. if (!pdev)
  669. return QDF_STATUS_E_FAILURE;
  670. mon_pdev = pdev->monitor_pdev;
  671. if (!mon_pdev)
  672. return QDF_STATUS_E_FAILURE;
  673. qdf_mem_copy(stats, &mon_pdev->rx_mon_stats,
  674. sizeof(struct cdp_pdev_mon_stats));
  675. return QDF_STATUS_SUCCESS;
  676. }
  677. #ifdef QCA_UNDECODED_METADATA_SUPPORT
  678. /**
  679. * dp_pdev_get_undecoded_capture_stats() - Get undecoded metadata captured
  680. * monitor pdev stats
  681. * @mon_pdev: Monitor PDEV handle
  682. * @rx_mon_stats: Monitor pdev status/destination ring stats
  683. *
  684. * Return: None
  685. */
  686. static inline void
  687. dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev *mon_pdev,
  688. struct cdp_pdev_mon_stats *rx_mon_stats)
  689. {
  690. char undecoded_error[DP_UNDECODED_ERR_LENGTH];
  691. uint8_t index = 0, i;
  692. DP_PRINT_STATS("Rx Undecoded Frame count:%d",
  693. rx_mon_stats->rx_undecoded_count);
  694. index = 0;
  695. for (i = 0; i < (CDP_PHYRX_ERR_MAX); i++) {
  696. index += qdf_snprint(&undecoded_error[index],
  697. DP_UNDECODED_ERR_LENGTH - index,
  698. " %d", rx_mon_stats->rx_undecoded_error[i]);
  699. }
  700. DP_PRINT_STATS("Undecoded Error (0-63):%s", undecoded_error);
  701. }
  702. #else
  703. static inline void
  704. dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev *mon_pdev,
  705. struct cdp_pdev_mon_stats *rx_mon_stats)
  706. {
  707. }
  708. #endif
  709. void
  710. dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
  711. {
  712. struct cdp_pdev_mon_stats *rx_mon_stats;
  713. uint32_t *stat_ring_ppdu_ids;
  714. uint32_t *dest_ring_ppdu_ids;
  715. int i, idx;
  716. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  717. rx_mon_stats = &mon_pdev->rx_mon_stats;
  718. DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
  719. DP_PRINT_STATS("status_ppdu_compl_cnt = %d",
  720. rx_mon_stats->status_ppdu_compl);
  721. DP_PRINT_STATS("status_ppdu_start_cnt = %d",
  722. rx_mon_stats->status_ppdu_start);
  723. DP_PRINT_STATS("status_ppdu_end_cnt = %d",
  724. rx_mon_stats->status_ppdu_end);
  725. DP_PRINT_STATS("status_ppdu_start_mis_cnt = %d",
  726. rx_mon_stats->status_ppdu_start_mis);
  727. DP_PRINT_STATS("status_ppdu_end_mis_cnt = %d",
  728. rx_mon_stats->status_ppdu_end_mis);
  729. DP_PRINT_STATS("status_ppdu_done_cnt = %d",
  730. rx_mon_stats->status_ppdu_done);
  731. DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
  732. rx_mon_stats->dest_ppdu_done);
  733. DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
  734. rx_mon_stats->dest_mpdu_done);
  735. DP_PRINT_STATS("tlv_tag_status_err_cnt = %u",
  736. rx_mon_stats->tlv_tag_status_err);
  737. DP_PRINT_STATS("mon status DMA not done WAR count= %u",
  738. rx_mon_stats->status_buf_done_war);
  739. DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
  740. rx_mon_stats->dest_mpdu_drop);
  741. DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d",
  742. rx_mon_stats->dup_mon_linkdesc_cnt);
  743. DP_PRINT_STATS("dup_mon_buf_cnt = %d",
  744. rx_mon_stats->dup_mon_buf_cnt);
  745. DP_PRINT_STATS("mon_rx_buf_reaped = %u",
  746. rx_mon_stats->mon_rx_bufs_reaped_dest);
  747. DP_PRINT_STATS("mon_rx_buf_replenished = %u",
  748. rx_mon_stats->mon_rx_bufs_replenished_dest);
  749. DP_PRINT_STATS("ppdu_id_mismatch = %u",
  750. rx_mon_stats->ppdu_id_mismatch);
  751. DP_PRINT_STATS("mpdu_ppdu_id_match_cnt = %d",
  752. rx_mon_stats->ppdu_id_match);
  753. DP_PRINT_STATS("ppdus dropped frm status ring = %d",
  754. rx_mon_stats->status_ppdu_drop);
  755. DP_PRINT_STATS("ppdus dropped frm dest ring = %d",
  756. rx_mon_stats->dest_ppdu_drop);
  757. stat_ring_ppdu_ids =
  758. (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST);
  759. dest_ring_ppdu_ids =
  760. (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST);
  761. if (!stat_ring_ppdu_ids || !dest_ring_ppdu_ids)
  762. DP_PRINT_STATS("Unable to allocate ppdu id hist mem\n");
  763. qdf_spin_lock_bh(&mon_pdev->mon_lock);
  764. idx = rx_mon_stats->ppdu_id_hist_idx;
  765. qdf_mem_copy(stat_ring_ppdu_ids,
  766. rx_mon_stats->stat_ring_ppdu_id_hist,
  767. sizeof(uint32_t) * MAX_PPDU_ID_HIST);
  768. qdf_mem_copy(dest_ring_ppdu_ids,
  769. rx_mon_stats->dest_ring_ppdu_id_hist,
  770. sizeof(uint32_t) * MAX_PPDU_ID_HIST);
  771. qdf_spin_unlock_bh(&mon_pdev->mon_lock);
  772. DP_PRINT_STATS("PPDU Id history:");
  773. DP_PRINT_STATS("stat_ring_ppdu_ids\t dest_ring_ppdu_ids");
  774. for (i = 0; i < MAX_PPDU_ID_HIST; i++) {
  775. idx = (idx + 1) & (MAX_PPDU_ID_HIST - 1);
  776. DP_PRINT_STATS("%*u\t%*u", 16,
  777. rx_mon_stats->stat_ring_ppdu_id_hist[idx], 16,
  778. rx_mon_stats->dest_ring_ppdu_id_hist[idx]);
  779. }
  780. qdf_mem_free(stat_ring_ppdu_ids);
  781. qdf_mem_free(dest_ring_ppdu_ids);
  782. DP_PRINT_STATS("mon_rx_dest_stuck = %d",
  783. rx_mon_stats->mon_rx_dest_stuck);
  784. dp_pdev_get_undecoded_capture_stats(mon_pdev, rx_mon_stats);
  785. }
  786. #ifdef QCA_SUPPORT_BPR
  787. QDF_STATUS
  788. dp_set_bpr_enable(struct dp_pdev *pdev, int val)
  789. {
  790. struct dp_mon_ops *mon_ops;
  791. mon_ops = dp_mon_ops_get(pdev->soc);
  792. if (mon_ops && mon_ops->mon_set_bpr_enable)
  793. return mon_ops->mon_set_bpr_enable(pdev, val);
  794. return QDF_STATUS_E_FAILURE;
  795. }
  796. #endif
  797. #ifdef WDI_EVENT_ENABLE
  798. #ifdef BE_PKTLOG_SUPPORT
  799. static bool
  800. dp_set_hybrid_pktlog_enable(struct dp_pdev *pdev,
  801. struct dp_mon_pdev *mon_pdev,
  802. struct dp_mon_soc *mon_soc)
  803. {
  804. if (mon_pdev->mvdev) {
  805. /* Nothing needs to be done if monitor mode is
  806. * enabled
  807. */
  808. mon_pdev->pktlog_hybrid_mode = true;
  809. return false;
  810. }
  811. if (!mon_pdev->pktlog_hybrid_mode) {
  812. mon_pdev->pktlog_hybrid_mode = true;
  813. dp_mon_filter_setup_pktlog_hybrid(pdev);
  814. if (dp_mon_filter_update(pdev) !=
  815. QDF_STATUS_SUCCESS) {
  816. dp_cdp_err("Set hybrid filters failed");
  817. dp_mon_filter_reset_pktlog_hybrid(pdev);
  818. mon_pdev->rx_pktlog_mode =
  819. DP_RX_PKTLOG_DISABLED;
  820. return false;
  821. }
  822. if (mon_soc->reap_timer_init &&
  823. !dp_mon_is_enable_reap_timer_non_pkt(pdev))
  824. qdf_timer_mod(&mon_soc->mon_reap_timer,
  825. DP_INTR_POLL_TIMER_MS);
  826. }
  827. return true;
  828. }
  829. static void
  830. dp_set_hybrid_pktlog_disable(struct dp_mon_pdev *mon_pdev)
  831. {
  832. mon_pdev->pktlog_hybrid_mode = false;
  833. }
  834. #else
  835. static void
  836. dp_set_hybrid_pktlog_disable(struct dp_mon_pdev *mon_pdev)
  837. {
  838. }
  839. static bool
  840. dp_set_hybrid_pktlog_enable(struct dp_pdev *pdev,
  841. struct dp_mon_pdev *mon_pdev,
  842. struct dp_mon_soc *mon_soc)
  843. {
  844. dp_cdp_err("Hybrid mode is supported only on beryllium");
  845. return true;
  846. }
  847. #endif
  848. int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
  849. bool enable)
  850. {
  851. struct dp_soc *soc = NULL;
  852. int max_mac_rings = wlan_cfg_get_num_mac_rings
  853. (pdev->wlan_cfg_ctx);
  854. uint8_t mac_id = 0;
  855. struct dp_mon_soc *mon_soc;
  856. struct dp_mon_ops *mon_ops;
  857. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  858. soc = pdev->soc;
  859. mon_soc = soc->monitor_soc;
  860. mon_ops = dp_mon_ops_get(soc);
  861. if (!mon_ops)
  862. return 0;
  863. dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
  864. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  865. FL("Max_mac_rings %d "),
  866. max_mac_rings);
  867. if (enable) {
  868. switch (event) {
  869. case WDI_EVENT_RX_DESC:
  870. if (mon_pdev->mvdev) {
  871. /* Nothing needs to be done if monitor mode is
  872. * enabled
  873. */
  874. mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
  875. return 0;
  876. }
  877. if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
  878. mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
  879. dp_mon_filter_setup_rx_pkt_log_full(pdev);
  880. if (dp_mon_filter_update(pdev) !=
  881. QDF_STATUS_SUCCESS) {
  882. dp_cdp_err("%pK: Pktlog full filters set failed", soc);
  883. dp_mon_filter_reset_rx_pkt_log_full(pdev);
  884. mon_pdev->rx_pktlog_mode =
  885. DP_RX_PKTLOG_DISABLED;
  886. return 0;
  887. }
  888. if (mon_soc->reap_timer_init &&
  889. (!dp_mon_is_enable_reap_timer_non_pkt(pdev)))
  890. qdf_timer_mod(&mon_soc->mon_reap_timer,
  891. DP_INTR_POLL_TIMER_MS);
  892. }
  893. break;
  894. case WDI_EVENT_LITE_RX:
  895. if (mon_pdev->mvdev) {
  896. /* Nothing needs to be done if monitor mode is
  897. * enabled
  898. */
  899. mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
  900. return 0;
  901. }
  902. if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
  903. mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
  904. /*
  905. * Set the packet log lite mode filter.
  906. */
  907. dp_mon_filter_setup_rx_pkt_log_lite(pdev);
  908. if (dp_mon_filter_update(pdev) !=
  909. QDF_STATUS_SUCCESS) {
  910. dp_cdp_err("%pK: Pktlog lite filters set failed", soc);
  911. dp_mon_filter_reset_rx_pkt_log_lite(pdev);
  912. mon_pdev->rx_pktlog_mode =
  913. DP_RX_PKTLOG_DISABLED;
  914. return 0;
  915. }
  916. if (mon_soc->reap_timer_init &&
  917. (!dp_mon_is_enable_reap_timer_non_pkt(pdev)))
  918. qdf_timer_mod(&mon_soc->mon_reap_timer,
  919. DP_INTR_POLL_TIMER_MS);
  920. }
  921. break;
  922. case WDI_EVENT_LITE_T2H:
  923. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  924. int mac_for_pdev = dp_get_mac_id_for_pdev(
  925. mac_id, pdev->pdev_id);
  926. mon_pdev->pktlog_ppdu_stats = true;
  927. dp_h2t_cfg_stats_msg_send(pdev,
  928. DP_PPDU_TXLITE_STATS_BITMASK_CFG,
  929. mac_for_pdev);
  930. }
  931. break;
  932. case WDI_EVENT_RX_CBF:
  933. if (mon_pdev->mvdev) {
  934. /* Nothing needs to be done if monitor mode is
  935. * enabled
  936. */
  937. dp_mon_info("Mon mode, CBF setting filters");
  938. mon_pdev->rx_pktlog_cbf = true;
  939. return 0;
  940. }
  941. if (!mon_pdev->rx_pktlog_cbf) {
  942. mon_pdev->rx_pktlog_cbf = true;
  943. mon_pdev->monitor_configured = true;
  944. if (mon_ops->mon_vdev_set_monitor_mode_buf_rings)
  945. mon_ops->mon_vdev_set_monitor_mode_buf_rings(pdev);
  946. /*
  947. * Set the packet log lite mode filter.
  948. */
  949. qdf_info("Non mon mode: Enable destination ring");
  950. dp_mon_filter_setup_rx_pkt_log_cbf(pdev);
  951. if (dp_mon_filter_update(pdev) !=
  952. QDF_STATUS_SUCCESS) {
  953. dp_mon_err("Pktlog set CBF filters failed");
  954. dp_mon_filter_reset_rx_pktlog_cbf(pdev);
  955. mon_pdev->rx_pktlog_mode =
  956. DP_RX_PKTLOG_DISABLED;
  957. mon_pdev->monitor_configured = false;
  958. return 0;
  959. }
  960. if (mon_soc->reap_timer_init &&
  961. !dp_mon_is_enable_reap_timer_non_pkt(pdev))
  962. qdf_timer_mod(&mon_soc->mon_reap_timer,
  963. DP_INTR_POLL_TIMER_MS);
  964. }
  965. break;
  966. case WDI_EVENT_HYBRID_TX:
  967. if (!dp_set_hybrid_pktlog_enable(pdev,
  968. mon_pdev, mon_soc))
  969. return 0;
  970. break;
  971. default:
  972. /* Nothing needs to be done for other pktlog types */
  973. break;
  974. }
  975. } else {
  976. switch (event) {
  977. case WDI_EVENT_RX_DESC:
  978. case WDI_EVENT_LITE_RX:
  979. if (mon_pdev->mvdev) {
  980. /* Nothing needs to be done if monitor mode is
  981. * enabled
  982. */
  983. mon_pdev->rx_pktlog_mode =
  984. DP_RX_PKTLOG_DISABLED;
  985. return 0;
  986. }
  987. if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
  988. mon_pdev->rx_pktlog_mode =
  989. DP_RX_PKTLOG_DISABLED;
  990. dp_mon_filter_reset_rx_pkt_log_full(pdev);
  991. if (dp_mon_filter_update(pdev) !=
  992. QDF_STATUS_SUCCESS) {
  993. dp_cdp_err("%pK: Pktlog filters reset failed", soc);
  994. return 0;
  995. }
  996. dp_mon_filter_reset_rx_pkt_log_lite(pdev);
  997. if (dp_mon_filter_update(pdev) !=
  998. QDF_STATUS_SUCCESS) {
  999. dp_cdp_err("%pK: Pktlog filters reset failed", soc);
  1000. return 0;
  1001. }
  1002. if (mon_soc->reap_timer_init &&
  1003. (!dp_mon_is_enable_reap_timer_non_pkt(pdev)))
  1004. qdf_timer_stop(&mon_soc->mon_reap_timer);
  1005. }
  1006. break;
  1007. case WDI_EVENT_LITE_T2H:
  1008. /*
  1009. * To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
  1010. * passing value 0. Once these macros will define in htt
  1011. * header file will use proper macros
  1012. */
  1013. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  1014. int mac_for_pdev =
  1015. dp_get_mac_id_for_pdev(mac_id,
  1016. pdev->pdev_id);
  1017. mon_pdev->pktlog_ppdu_stats = false;
  1018. if (!mon_pdev->enhanced_stats_en &&
  1019. !mon_pdev->tx_sniffer_enable &&
  1020. !mon_pdev->mcopy_mode) {
  1021. dp_h2t_cfg_stats_msg_send(pdev, 0,
  1022. mac_for_pdev);
  1023. } else if (mon_pdev->tx_sniffer_enable ||
  1024. mon_pdev->mcopy_mode) {
  1025. dp_h2t_cfg_stats_msg_send(pdev,
  1026. DP_PPDU_STATS_CFG_SNIFFER,
  1027. mac_for_pdev);
  1028. } else if (mon_pdev->enhanced_stats_en) {
  1029. dp_h2t_cfg_stats_msg_send(pdev,
  1030. DP_PPDU_STATS_CFG_ENH_STATS,
  1031. mac_for_pdev);
  1032. }
  1033. }
  1034. break;
  1035. case WDI_EVENT_RX_CBF:
  1036. mon_pdev->rx_pktlog_cbf = false;
  1037. break;
  1038. case WDI_EVENT_HYBRID_TX:
  1039. dp_set_hybrid_pktlog_disable(mon_pdev);
  1040. break;
  1041. default:
  1042. /* Nothing needs to be done for other pktlog types */
  1043. break;
  1044. }
  1045. }
  1046. return 0;
  1047. }
  1048. #endif
  1049. /* MCL specific functions */
  1050. #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
  1051. void dp_pktlogmod_exit(struct dp_pdev *pdev)
  1052. {
  1053. struct dp_soc *soc = pdev->soc;
  1054. struct hif_opaque_softc *scn = soc->hif_handle;
  1055. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  1056. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1057. if (!scn) {
  1058. dp_mon_err("Invalid hif(scn) handle");
  1059. return;
  1060. }
  1061. /* stop mon_reap_timer if it has been started */
  1062. if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED &&
  1063. mon_soc->reap_timer_init &&
  1064. (!dp_mon_is_enable_reap_timer_non_pkt(pdev)))
  1065. qdf_timer_sync_cancel(&mon_soc->mon_reap_timer);
  1066. pktlogmod_exit(scn);
  1067. mon_pdev->pkt_log_init = false;
  1068. }
  1069. #endif /*DP_CON_MON*/
  1070. #if defined(WDI_EVENT_ENABLE) && defined(QCA_ENHANCED_STATS_SUPPORT)
  1071. QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer)
  1072. {
  1073. struct cdp_interface_peer_stats peer_stats_intf;
  1074. struct dp_mon_peer_stats *mon_peer_stats = NULL;
  1075. struct dp_peer *tgt_peer = NULL;
  1076. struct dp_txrx_peer *txrx_peer = NULL;
  1077. if (!peer || !peer->vdev || !peer->monitor_peer)
  1078. return QDF_STATUS_E_FAULT;
  1079. tgt_peer = dp_get_tgt_peer_from_peer(peer);
  1080. if (!tgt_peer)
  1081. return QDF_STATUS_E_FAULT;
  1082. txrx_peer = tgt_peer->txrx_peer;
  1083. if (!txrx_peer)
  1084. return QDF_STATUS_E_FAULT;
  1085. mon_peer_stats = &peer->monitor_peer->stats;
  1086. qdf_mem_zero(&peer_stats_intf, sizeof(peer_stats_intf));
  1087. if (mon_peer_stats->rx.last_snr != mon_peer_stats->rx.snr)
  1088. peer_stats_intf.rssi_changed = true;
  1089. if ((mon_peer_stats->rx.snr && peer_stats_intf.rssi_changed) ||
  1090. (mon_peer_stats->tx.tx_rate &&
  1091. mon_peer_stats->tx.tx_rate != mon_peer_stats->tx.last_tx_rate)) {
  1092. qdf_mem_copy(peer_stats_intf.peer_mac, peer->mac_addr.raw,
  1093. QDF_MAC_ADDR_SIZE);
  1094. peer_stats_intf.vdev_id = peer->vdev->vdev_id;
  1095. peer_stats_intf.last_peer_tx_rate =
  1096. mon_peer_stats->tx.last_tx_rate;
  1097. peer_stats_intf.peer_tx_rate = mon_peer_stats->tx.tx_rate;
  1098. peer_stats_intf.peer_rssi = mon_peer_stats->rx.snr;
  1099. peer_stats_intf.ack_rssi = mon_peer_stats->tx.last_ack_rssi;
  1100. peer_stats_intf.rx_packet_count = txrx_peer->to_stack.num;
  1101. peer_stats_intf.rx_byte_count = txrx_peer->to_stack.bytes;
  1102. peer_stats_intf.tx_packet_count =
  1103. txrx_peer->stats.per_pkt_stats.tx.ucast.num;
  1104. peer_stats_intf.tx_byte_count =
  1105. txrx_peer->stats.per_pkt_stats.tx.tx_success.bytes;
  1106. peer_stats_intf.per = tgt_peer->stats.tx.last_per;
  1107. peer_stats_intf.free_buff = INVALID_FREE_BUFF;
  1108. dp_wdi_event_handler(WDI_EVENT_PEER_STATS, dp_pdev->soc,
  1109. (void *)&peer_stats_intf, 0,
  1110. WDI_NO_VAL, dp_pdev->pdev_id);
  1111. }
  1112. return QDF_STATUS_SUCCESS;
  1113. }
  1114. #endif
  1115. #ifdef FEATURE_NAC_RSSI
  1116. /**
  1117. * dp_rx_nac_filter(): Function to perform filtering of non-associated
  1118. * clients
  1119. * @pdev: DP pdev handle
  1120. * @rx_pkt_hdr: Rx packet Header
  1121. *
  1122. * return: dp_vdev*
  1123. */
  1124. static
  1125. struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
  1126. uint8_t *rx_pkt_hdr)
  1127. {
  1128. struct ieee80211_frame *wh;
  1129. struct dp_neighbour_peer *peer = NULL;
  1130. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1131. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  1132. if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
  1133. return NULL;
  1134. qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
  1135. TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
  1136. neighbour_peer_list_elem) {
  1137. if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  1138. wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) {
  1139. dp_rx_debug("%pK: NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x",
  1140. pdev->soc,
  1141. peer->neighbour_peers_macaddr.raw[0],
  1142. peer->neighbour_peers_macaddr.raw[1],
  1143. peer->neighbour_peers_macaddr.raw[2],
  1144. peer->neighbour_peers_macaddr.raw[3],
  1145. peer->neighbour_peers_macaddr.raw[4],
  1146. peer->neighbour_peers_macaddr.raw[5]);
  1147. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  1148. return mon_pdev->mvdev;
  1149. }
  1150. }
  1151. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  1152. return NULL;
  1153. }
  1154. QDF_STATUS dp_filter_neighbour_peer(struct dp_pdev *pdev,
  1155. uint8_t *rx_pkt_hdr)
  1156. {
  1157. struct dp_vdev *vdev = NULL;
  1158. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1159. if (mon_pdev->filter_neighbour_peers) {
  1160. /* Next Hop scenario not yet handle */
  1161. vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
  1162. if (vdev) {
  1163. dp_rx_mon_deliver(pdev->soc, pdev->pdev_id,
  1164. pdev->invalid_peer_head_msdu,
  1165. pdev->invalid_peer_tail_msdu);
  1166. pdev->invalid_peer_head_msdu = NULL;
  1167. pdev->invalid_peer_tail_msdu = NULL;
  1168. return QDF_STATUS_SUCCESS;
  1169. }
  1170. }
  1171. return QDF_STATUS_E_FAILURE;
  1172. }
  1173. #endif
  1174. #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
  1175. /*
  1176. * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
  1177. * address for smart mesh filtering
  1178. * @txrx_soc: cdp soc handle
  1179. * @vdev_id: id of virtual device object
  1180. * @cmd: Add/Del command
  1181. * @macaddr: nac client mac address
  1182. *
  1183. * Return: success/failure
  1184. */
  1185. static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc_hdl,
  1186. uint8_t vdev_id,
  1187. uint32_t cmd, uint8_t *macaddr)
  1188. {
  1189. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  1190. struct dp_pdev *pdev;
  1191. struct dp_neighbour_peer *peer = NULL;
  1192. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  1193. DP_MOD_ID_CDP);
  1194. struct dp_mon_pdev *mon_pdev;
  1195. if (!vdev || !macaddr)
  1196. goto fail0;
  1197. pdev = vdev->pdev;
  1198. if (!pdev)
  1199. goto fail0;
  1200. mon_pdev = pdev->monitor_pdev;
  1201. /* Store address of NAC (neighbour peer) which will be checked
  1202. * against TA of received packets.
  1203. */
  1204. if (cmd == DP_NAC_PARAM_ADD) {
  1205. peer = (struct dp_neighbour_peer *)qdf_mem_malloc(
  1206. sizeof(*peer));
  1207. if (!peer) {
  1208. dp_cdp_err("%pK: DP neighbour peer node memory allocation failed"
  1209. , soc);
  1210. goto fail0;
  1211. }
  1212. qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
  1213. macaddr, QDF_MAC_ADDR_SIZE);
  1214. peer->vdev = vdev;
  1215. qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
  1216. /* add this neighbour peer into the list */
  1217. TAILQ_INSERT_TAIL(&mon_pdev->neighbour_peers_list, peer,
  1218. neighbour_peer_list_elem);
  1219. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  1220. /* first neighbour */
  1221. if (!mon_pdev->neighbour_peers_added) {
  1222. QDF_STATUS status = QDF_STATUS_SUCCESS;
  1223. mon_pdev->neighbour_peers_added = true;
  1224. dp_mon_filter_setup_smart_monitor(pdev);
  1225. status = dp_mon_filter_update(pdev);
  1226. if (status != QDF_STATUS_SUCCESS) {
  1227. dp_cdp_err("%pK: smart mon filter setup failed",
  1228. soc);
  1229. dp_mon_filter_reset_smart_monitor(pdev);
  1230. mon_pdev->neighbour_peers_added = false;
  1231. }
  1232. }
  1233. } else if (cmd == DP_NAC_PARAM_DEL) {
  1234. qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
  1235. TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
  1236. neighbour_peer_list_elem) {
  1237. if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  1238. macaddr, QDF_MAC_ADDR_SIZE)) {
  1239. /* delete this peer from the list */
  1240. TAILQ_REMOVE(&mon_pdev->neighbour_peers_list,
  1241. peer, neighbour_peer_list_elem);
  1242. qdf_mem_free(peer);
  1243. break;
  1244. }
  1245. }
  1246. /* last neighbour deleted */
  1247. if (TAILQ_EMPTY(&mon_pdev->neighbour_peers_list)) {
  1248. QDF_STATUS status = QDF_STATUS_SUCCESS;
  1249. dp_mon_filter_reset_smart_monitor(pdev);
  1250. status = dp_mon_filter_update(pdev);
  1251. if (status != QDF_STATUS_SUCCESS) {
  1252. dp_cdp_err("%pK: smart mon filter clear failed",
  1253. soc);
  1254. }
  1255. mon_pdev->neighbour_peers_added = false;
  1256. }
  1257. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  1258. }
  1259. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  1260. return 1;
  1261. fail0:
  1262. if (vdev)
  1263. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  1264. return 0;
  1265. }
  1266. #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
  1267. #ifdef ATH_SUPPORT_NAC_RSSI
  1268. /**
  1269. * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
  1270. * @soc_hdl: DP soc handle
  1271. * @vdev_id: id of DP vdev handle
  1272. * @mac_addr: neighbour mac
  1273. * @rssi: rssi value
  1274. *
  1275. * Return: 0 for success. nonzero for failure.
  1276. */
  1277. static QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc_hdl,
  1278. uint8_t vdev_id,
  1279. char *mac_addr,
  1280. uint8_t *rssi)
  1281. {
  1282. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1283. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  1284. DP_MOD_ID_CDP);
  1285. struct dp_pdev *pdev;
  1286. struct dp_neighbour_peer *peer = NULL;
  1287. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  1288. struct dp_mon_pdev *mon_pdev;
  1289. if (!vdev)
  1290. return status;
  1291. pdev = vdev->pdev;
  1292. mon_pdev = pdev->monitor_pdev;
  1293. *rssi = 0;
  1294. qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
  1295. TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
  1296. neighbour_peer_list_elem) {
  1297. if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  1298. mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
  1299. *rssi = peer->rssi;
  1300. status = QDF_STATUS_SUCCESS;
  1301. break;
  1302. }
  1303. }
  1304. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  1305. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  1306. return status;
  1307. }
  1308. static QDF_STATUS
  1309. dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc,
  1310. uint8_t vdev_id,
  1311. enum cdp_nac_param_cmd cmd, char *bssid,
  1312. char *client_macaddr,
  1313. uint8_t chan_num)
  1314. {
  1315. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  1316. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  1317. DP_MOD_ID_CDP);
  1318. struct dp_pdev *pdev;
  1319. struct dp_mon_pdev *mon_pdev;
  1320. if (!vdev)
  1321. return QDF_STATUS_E_FAILURE;
  1322. pdev = (struct dp_pdev *)vdev->pdev;
  1323. mon_pdev = pdev->monitor_pdev;
  1324. mon_pdev->nac_rssi_filtering = 1;
  1325. /* Store address of NAC (neighbour peer) which will be checked
  1326. * against TA of received packets.
  1327. */
  1328. if (cmd == CDP_NAC_PARAM_ADD) {
  1329. dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
  1330. DP_NAC_PARAM_ADD,
  1331. (uint8_t *)client_macaddr);
  1332. } else if (cmd == CDP_NAC_PARAM_DEL) {
  1333. dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
  1334. DP_NAC_PARAM_DEL,
  1335. (uint8_t *)client_macaddr);
  1336. }
  1337. if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
  1338. soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
  1339. (soc->ctrl_psoc, pdev->pdev_id,
  1340. vdev->vdev_id, cmd, bssid, client_macaddr);
  1341. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  1342. return QDF_STATUS_SUCCESS;
  1343. }
  1344. #endif
  1345. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  1346. /*
  1347. * dp_cfr_filter() - Configure HOST RX monitor status ring for CFR
  1348. * @soc_hdl: Datapath soc handle
  1349. * @pdev_id: id of data path pdev handle
  1350. * @enable: Enable/Disable CFR
  1351. * @filter_val: Flag to select Filter for monitor mode
  1352. */
  1353. static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
  1354. uint8_t pdev_id,
  1355. bool enable,
  1356. struct cdp_monitor_filter *filter_val)
  1357. {
  1358. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1359. struct dp_pdev *pdev = NULL;
  1360. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  1361. int max_mac_rings;
  1362. uint8_t mac_id = 0;
  1363. struct dp_mon_pdev *mon_pdev;
  1364. pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1365. if (!pdev) {
  1366. dp_mon_err("pdev is NULL");
  1367. return;
  1368. }
  1369. mon_pdev = pdev->monitor_pdev;
  1370. if (mon_pdev->mvdev) {
  1371. dp_mon_info("No action is needed since mon mode is enabled\n");
  1372. return;
  1373. }
  1374. soc = pdev->soc;
  1375. pdev->cfr_rcc_mode = false;
  1376. max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
  1377. dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
  1378. dp_mon_debug("Max_mac_rings %d", max_mac_rings);
  1379. dp_mon_info("enable : %d, mode: 0x%x", enable, filter_val->mode);
  1380. if (enable) {
  1381. pdev->cfr_rcc_mode = true;
  1382. htt_tlv_filter.ppdu_start = 1;
  1383. htt_tlv_filter.ppdu_end = 1;
  1384. htt_tlv_filter.ppdu_end_user_stats = 1;
  1385. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  1386. htt_tlv_filter.ppdu_end_status_done = 1;
  1387. htt_tlv_filter.mpdu_start = 1;
  1388. htt_tlv_filter.offset_valid = false;
  1389. htt_tlv_filter.enable_fp =
  1390. (filter_val->mode & MON_FILTER_PASS) ? 1 : 0;
  1391. htt_tlv_filter.enable_md = 0;
  1392. htt_tlv_filter.enable_mo =
  1393. (filter_val->mode & MON_FILTER_OTHER) ? 1 : 0;
  1394. htt_tlv_filter.fp_mgmt_filter = filter_val->fp_mgmt;
  1395. htt_tlv_filter.fp_ctrl_filter = filter_val->fp_ctrl;
  1396. htt_tlv_filter.fp_data_filter = filter_val->fp_data;
  1397. htt_tlv_filter.mo_mgmt_filter = filter_val->mo_mgmt;
  1398. htt_tlv_filter.mo_ctrl_filter = filter_val->mo_ctrl;
  1399. htt_tlv_filter.mo_data_filter = filter_val->mo_data;
  1400. }
  1401. for (mac_id = 0;
  1402. mac_id < soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev;
  1403. mac_id++) {
  1404. int mac_for_pdev =
  1405. dp_get_mac_id_for_pdev(mac_id,
  1406. pdev->pdev_id);
  1407. htt_h2t_rx_ring_cfg(soc->htt_handle,
  1408. mac_for_pdev,
  1409. soc->rxdma_mon_status_ring[mac_id]
  1410. .hal_srng,
  1411. RXDMA_MONITOR_STATUS,
  1412. RX_MON_STATUS_BUF_SIZE,
  1413. &htt_tlv_filter);
  1414. }
  1415. }
  1416. #endif
  1417. void
  1418. dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1419. bool enable)
  1420. {
  1421. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1422. struct dp_pdev *pdev = NULL;
  1423. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  1424. struct dp_mon_pdev *mon_pdev;
  1425. pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1426. if (!pdev) {
  1427. dp_mon_err("pdev is NULL");
  1428. return;
  1429. }
  1430. mon_pdev = pdev->monitor_pdev;
  1431. mon_pdev->enable_reap_timer_non_pkt = enable;
  1432. if (mon_pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
  1433. dp_mon_debug("pktlog enabled %d", mon_pdev->rx_pktlog_mode);
  1434. return;
  1435. }
  1436. if (!mon_soc->reap_timer_init) {
  1437. dp_mon_err("reap timer not init");
  1438. return;
  1439. }
  1440. if (enable)
  1441. qdf_timer_mod(&mon_soc->mon_reap_timer,
  1442. DP_INTR_POLL_TIMER_MS);
  1443. else
  1444. qdf_timer_sync_cancel(&mon_soc->mon_reap_timer);
  1445. }
  1446. #if defined(DP_CON_MON)
  1447. #ifndef REMOVE_PKT_LOG
  1448. /**
  1449. * dp_pkt_log_init() - API to initialize packet log
  1450. * @soc_hdl: Datapath soc handle
  1451. * @pdev_id: id of data path pdev handle
  1452. * @scn: HIF context
  1453. *
  1454. * Return: none
  1455. */
  1456. void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
  1457. {
  1458. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1459. struct dp_pdev *handle =
  1460. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1461. struct dp_mon_pdev *mon_pdev;
  1462. if (!handle) {
  1463. dp_mon_err("pdev handle is NULL");
  1464. return;
  1465. }
  1466. mon_pdev = handle->monitor_pdev;
  1467. if (mon_pdev->pkt_log_init) {
  1468. dp_mon_err("%pK: Packet log not initialized", soc);
  1469. return;
  1470. }
  1471. pktlog_sethandle(&mon_pdev->pl_dev, scn);
  1472. pktlog_set_pdev_id(mon_pdev->pl_dev, pdev_id);
  1473. pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
  1474. if (pktlogmod_init(scn)) {
  1475. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1476. "%s: pktlogmod_init failed", __func__);
  1477. mon_pdev->pkt_log_init = false;
  1478. } else {
  1479. mon_pdev->pkt_log_init = true;
  1480. }
  1481. }
  1482. /**
  1483. * dp_pkt_log_con_service() - connect packet log service
  1484. * @soc_hdl: Datapath soc handle
  1485. * @pdev_id: id of data path pdev handle
  1486. * @scn: device context
  1487. *
  1488. * Return: none
  1489. */
  1490. static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
  1491. uint8_t pdev_id, void *scn)
  1492. {
  1493. dp_pkt_log_init(soc_hdl, pdev_id, scn);
  1494. pktlog_htc_attach();
  1495. }
  1496. /**
  1497. * dp_pkt_log_exit() - Wrapper API to cleanup pktlog info
  1498. * @soc_hdl: Datapath soc handle
  1499. * @pdev_id: id of data path pdev handle
  1500. *
  1501. * Return: none
  1502. */
  1503. static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1504. {
  1505. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1506. struct dp_pdev *pdev =
  1507. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1508. if (!pdev) {
  1509. dp_err("pdev handle is NULL");
  1510. return;
  1511. }
  1512. dp_pktlogmod_exit(pdev);
  1513. }
  1514. #else
  1515. static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
  1516. uint8_t pdev_id, void *scn)
  1517. {
  1518. }
  1519. static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1520. {
  1521. }
  1522. #endif
  1523. #endif
  1524. void dp_neighbour_peers_detach(struct dp_pdev *pdev)
  1525. {
  1526. struct dp_neighbour_peer *peer = NULL;
  1527. struct dp_neighbour_peer *temp_peer = NULL;
  1528. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1529. TAILQ_FOREACH_SAFE(peer, &mon_pdev->neighbour_peers_list,
  1530. neighbour_peer_list_elem, temp_peer) {
  1531. /* delete this peer from the list */
  1532. TAILQ_REMOVE(&mon_pdev->neighbour_peers_list,
  1533. peer, neighbour_peer_list_elem);
  1534. qdf_mem_free(peer);
  1535. }
  1536. qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex);
  1537. }
  1538. #ifdef QCA_ENHANCED_STATS_SUPPORT
  1539. /*
  1540. * dp_mon_tx_enable_enhanced_stats() - Enable enhanced Tx stats
  1541. * @pdev: Datapath pdev handle
  1542. *
  1543. * Return: void
  1544. */
  1545. static void dp_mon_tx_enable_enhanced_stats(struct dp_pdev *pdev)
  1546. {
  1547. struct dp_soc *soc = pdev->soc;
  1548. struct dp_mon_ops *mon_ops = NULL;
  1549. mon_ops = dp_mon_ops_get(soc);
  1550. if (mon_ops && mon_ops->mon_tx_enable_enhanced_stats)
  1551. mon_ops->mon_tx_enable_enhanced_stats(pdev);
  1552. }
  1553. /*
  1554. * dp_enable_enhanced_stats()- API to enable enhanced statistcs
  1555. * @soc_handle: DP_SOC handle
  1556. * @pdev_id: id of DP_PDEV handle
  1557. *
  1558. * Return: QDF_STATUS
  1559. */
  1560. static QDF_STATUS
  1561. dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
  1562. {
  1563. struct dp_pdev *pdev = NULL;
  1564. QDF_STATUS status = QDF_STATUS_SUCCESS;
  1565. struct dp_mon_pdev *mon_pdev;
  1566. pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  1567. pdev_id);
  1568. if (!pdev)
  1569. return QDF_STATUS_E_FAILURE;
  1570. mon_pdev = pdev->monitor_pdev;
  1571. if (!mon_pdev)
  1572. return QDF_STATUS_E_FAILURE;
  1573. if (mon_pdev->enhanced_stats_en == 0)
  1574. dp_cal_client_timer_start(mon_pdev->cal_client_ctx);
  1575. mon_pdev->enhanced_stats_en = 1;
  1576. pdev->enhanced_stats_en = true;
  1577. dp_mon_filter_setup_enhanced_stats(pdev);
  1578. status = dp_mon_filter_update(pdev);
  1579. if (status != QDF_STATUS_SUCCESS) {
  1580. dp_cdp_err("%pK: Failed to set enhanced mode filters", soc);
  1581. dp_mon_filter_reset_enhanced_stats(pdev);
  1582. dp_cal_client_timer_stop(mon_pdev->cal_client_ctx);
  1583. mon_pdev->enhanced_stats_en = 0;
  1584. pdev->enhanced_stats_en = false;
  1585. return QDF_STATUS_E_FAILURE;
  1586. }
  1587. dp_mon_tx_enable_enhanced_stats(pdev);
  1588. return QDF_STATUS_SUCCESS;
  1589. }
  1590. /*
  1591. * dp_mon_tx_disable_enhanced_stats() - Disable enhanced Tx stats
  1592. * @pdev: Datapath pdev handle
  1593. *
  1594. * Return: void
  1595. */
  1596. static void dp_mon_tx_disable_enhanced_stats(struct dp_pdev *pdev)
  1597. {
  1598. struct dp_soc *soc = pdev->soc;
  1599. struct dp_mon_ops *mon_ops = NULL;
  1600. mon_ops = dp_mon_ops_get(soc);
  1601. if (mon_ops && mon_ops->mon_tx_disable_enhanced_stats)
  1602. mon_ops->mon_tx_disable_enhanced_stats(pdev);
  1603. }
  1604. /*
  1605. * dp_disable_enhanced_stats()- API to disable enhanced statistcs
  1606. *
  1607. * @param soc - the soc handle
  1608. * @param pdev_id - pdev_id of pdev
  1609. * @return - QDF_STATUS
  1610. */
  1611. static QDF_STATUS
  1612. dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
  1613. {
  1614. struct dp_pdev *pdev =
  1615. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  1616. pdev_id);
  1617. struct dp_mon_pdev *mon_pdev;
  1618. if (!pdev || !pdev->monitor_pdev)
  1619. return QDF_STATUS_E_FAILURE;
  1620. mon_pdev = pdev->monitor_pdev;
  1621. if (mon_pdev->enhanced_stats_en == 1)
  1622. dp_cal_client_timer_stop(mon_pdev->cal_client_ctx);
  1623. mon_pdev->enhanced_stats_en = 0;
  1624. pdev->enhanced_stats_en = false;
  1625. dp_mon_tx_disable_enhanced_stats(pdev);
  1626. dp_mon_filter_reset_enhanced_stats(pdev);
  1627. if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
  1628. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1629. FL("Failed to reset enhanced mode filters"));
  1630. }
  1631. return QDF_STATUS_SUCCESS;
  1632. }
  1633. #ifdef WDI_EVENT_ENABLE
  1634. QDF_STATUS dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
  1635. struct cdp_rx_stats_ppdu_user *ppdu_user)
  1636. {
  1637. struct cdp_interface_peer_qos_stats qos_stats_intf;
  1638. if (ppdu_user->peer_id == HTT_INVALID_PEER) {
  1639. dp_mon_warn("Invalid peer id");
  1640. return QDF_STATUS_E_FAILURE;
  1641. }
  1642. qdf_mem_zero(&qos_stats_intf, sizeof(qos_stats_intf));
  1643. qdf_mem_copy(qos_stats_intf.peer_mac, ppdu_user->mac_addr,
  1644. QDF_MAC_ADDR_SIZE);
  1645. qos_stats_intf.frame_control = ppdu_user->frame_control;
  1646. qos_stats_intf.frame_control_info_valid =
  1647. ppdu_user->frame_control_info_valid;
  1648. qos_stats_intf.qos_control = ppdu_user->qos_control;
  1649. qos_stats_intf.qos_control_info_valid =
  1650. ppdu_user->qos_control_info_valid;
  1651. qos_stats_intf.vdev_id = ppdu_user->vdev_id;
  1652. dp_wdi_event_handler(WDI_EVENT_PEER_QOS_STATS, dp_pdev->soc,
  1653. (void *)&qos_stats_intf, 0,
  1654. WDI_NO_VAL, dp_pdev->pdev_id);
  1655. return QDF_STATUS_SUCCESS;
  1656. }
  1657. #else
  1658. static inline QDF_STATUS
  1659. dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
  1660. struct cdp_rx_stats_ppdu_user *ppdu_user)
  1661. {
  1662. return QDF_STATUS_SUCCESS;
  1663. }
  1664. #endif
  1665. #endif /* QCA_ENHANCED_STATS_SUPPORT */
  1666. /**
  1667. * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
  1668. * for pktlog
  1669. * @soc: cdp_soc handle
  1670. * @pdev_id: id of dp pdev handle
  1671. * @mac_addr: Peer mac address
  1672. * @enb_dsb: Enable or disable peer based filtering
  1673. *
  1674. * Return: QDF_STATUS
  1675. */
  1676. static int
  1677. dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id,
  1678. uint8_t *mac_addr, uint8_t enb_dsb)
  1679. {
  1680. struct dp_peer *peer;
  1681. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  1682. struct dp_pdev *pdev =
  1683. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  1684. pdev_id);
  1685. struct dp_mon_pdev *mon_pdev;
  1686. if (!pdev)
  1687. return QDF_STATUS_E_FAILURE;
  1688. mon_pdev = pdev->monitor_pdev;
  1689. peer = dp_peer_find_hash_find((struct dp_soc *)soc, mac_addr,
  1690. 0, DP_VDEV_ALL, DP_MOD_ID_CDP);
  1691. if (!peer) {
  1692. dp_mon_err("Invalid Peer");
  1693. return QDF_STATUS_E_FAILURE;
  1694. }
  1695. if (!IS_MLO_DP_MLD_PEER(peer) && peer->monitor_peer) {
  1696. peer->monitor_peer->peer_based_pktlog_filter = enb_dsb;
  1697. mon_pdev->dp_peer_based_pktlog = enb_dsb;
  1698. status = QDF_STATUS_SUCCESS;
  1699. }
  1700. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1701. return status;
  1702. }
  1703. /**
  1704. * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
  1705. * @soc: DP_SOC handle
  1706. * @pdev_id: id of DP_PDEV handle
  1707. * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
  1708. * @is_tx_pkt_cap_enable: enable/disable/delete/print
  1709. * Tx packet capture in monitor mode
  1710. * @peer_mac: MAC address for which the above need to be enabled/disabled
  1711. *
  1712. * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
  1713. */
  1714. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  1715. static QDF_STATUS
  1716. dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
  1717. uint8_t pdev_id,
  1718. bool is_rx_pkt_cap_enable,
  1719. uint8_t is_tx_pkt_cap_enable,
  1720. uint8_t *peer_mac)
  1721. {
  1722. struct dp_peer *peer;
  1723. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  1724. struct dp_pdev *pdev =
  1725. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  1726. pdev_id);
  1727. if (!pdev)
  1728. return QDF_STATUS_E_FAILURE;
  1729. peer = dp_peer_find_hash_find((struct dp_soc *)soc,
  1730. peer_mac, 0, DP_VDEV_ALL,
  1731. DP_MOD_ID_CDP);
  1732. if (!peer)
  1733. return QDF_STATUS_E_FAILURE;
  1734. /* we need to set tx pkt capture for non associated peer */
  1735. if (!IS_MLO_DP_MLD_PEER(peer)) {
  1736. status = dp_monitor_tx_peer_filter(pdev, peer,
  1737. is_tx_pkt_cap_enable,
  1738. peer_mac);
  1739. status = dp_peer_set_rx_capture_enabled(pdev, peer,
  1740. is_rx_pkt_cap_enable,
  1741. peer_mac);
  1742. }
  1743. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1744. return status;
  1745. }
  1746. #endif
  1747. #ifdef QCA_MCOPY_SUPPORT
  1748. QDF_STATUS dp_mcopy_check_deliver(struct dp_pdev *pdev,
  1749. uint16_t peer_id,
  1750. uint32_t ppdu_id,
  1751. uint8_t first_msdu)
  1752. {
  1753. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1754. if (mon_pdev->mcopy_mode) {
  1755. if (mon_pdev->mcopy_mode == M_COPY) {
  1756. if ((mon_pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
  1757. (mon_pdev->m_copy_id.tx_peer_id == peer_id)) {
  1758. return QDF_STATUS_E_INVAL;
  1759. }
  1760. }
  1761. if (!first_msdu)
  1762. return QDF_STATUS_E_INVAL;
  1763. mon_pdev->m_copy_id.tx_ppdu_id = ppdu_id;
  1764. mon_pdev->m_copy_id.tx_peer_id = peer_id;
  1765. }
  1766. return QDF_STATUS_SUCCESS;
  1767. }
  1768. #endif
  1769. #ifdef WDI_EVENT_ENABLE
  1770. #ifndef REMOVE_PKT_LOG
  1771. static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1772. {
  1773. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1774. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1775. if (!pdev || !pdev->monitor_pdev)
  1776. return NULL;
  1777. return pdev->monitor_pdev->pl_dev;
  1778. }
  1779. #else
  1780. static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1781. {
  1782. return NULL;
  1783. }
  1784. #endif
  1785. #endif
  1786. QDF_STATUS dp_rx_populate_cbf_hdr(struct dp_soc *soc,
  1787. uint32_t mac_id,
  1788. uint32_t event,
  1789. qdf_nbuf_t mpdu,
  1790. uint32_t msdu_timestamp)
  1791. {
  1792. uint32_t data_size, hdr_size, ppdu_id, align4byte;
  1793. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1794. uint32_t *msg_word;
  1795. if (!pdev)
  1796. return QDF_STATUS_E_INVAL;
  1797. ppdu_id = pdev->monitor_pdev->ppdu_info.com_info.ppdu_id;
  1798. hdr_size = HTT_T2H_PPDU_STATS_IND_HDR_SIZE
  1799. + qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload);
  1800. data_size = qdf_nbuf_len(mpdu);
  1801. qdf_nbuf_push_head(mpdu, hdr_size);
  1802. msg_word = (uint32_t *)qdf_nbuf_data(mpdu);
  1803. /*
  1804. * Populate the PPDU Stats Indication header
  1805. */
  1806. HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_T2H_MSG_TYPE_PPDU_STATS_IND);
  1807. HTT_T2H_PPDU_STATS_MAC_ID_SET(*msg_word, mac_id);
  1808. HTT_T2H_PPDU_STATS_PDEV_ID_SET(*msg_word, pdev->pdev_id);
  1809. align4byte = ((data_size +
  1810. qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload)
  1811. + 3) >> 2) << 2;
  1812. HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_SET(*msg_word, align4byte);
  1813. msg_word++;
  1814. HTT_T2H_PPDU_STATS_PPDU_ID_SET(*msg_word, ppdu_id);
  1815. msg_word++;
  1816. *msg_word = msdu_timestamp;
  1817. msg_word++;
  1818. /* Skip reserved field */
  1819. msg_word++;
  1820. /*
  1821. * Populate MGMT_CTRL Payload TLV first
  1822. */
  1823. HTT_STATS_TLV_TAG_SET(*msg_word,
  1824. HTT_PPDU_STATS_RX_MGMTCTRL_PAYLOAD_TLV);
  1825. align4byte = ((data_size - sizeof(htt_tlv_hdr_t) +
  1826. qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload)
  1827. + 3) >> 2) << 2;
  1828. HTT_STATS_TLV_LENGTH_SET(*msg_word, align4byte);
  1829. msg_word++;
  1830. HTT_PPDU_STATS_RX_MGMTCTRL_TLV_FRAME_LENGTH_SET(
  1831. *msg_word, data_size);
  1832. msg_word++;
  1833. dp_wdi_event_handler(event, soc, (void *)mpdu,
  1834. HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id);
  1835. qdf_nbuf_pull_head(mpdu, hdr_size);
  1836. return QDF_STATUS_SUCCESS;
  1837. }
  1838. #ifdef ATH_SUPPORT_EXT_STAT
  1839. /*dp_peer_cal_clients_stats_update - update peer stats on cal client timer
  1840. * @soc : Datapath SOC
  1841. * @peer : Datapath peer
  1842. * @arg : argument to iter function
  1843. */
  1844. static void
  1845. dp_peer_cal_clients_stats_update(struct dp_soc *soc,
  1846. struct dp_peer *peer,
  1847. void *arg)
  1848. {
  1849. struct cdp_calibr_stats_intf peer_stats_intf = {0};
  1850. struct dp_peer *tgt_peer = NULL;
  1851. struct dp_txrx_peer *txrx_peer = NULL;
  1852. if (!dp_peer_is_primary_link_peer(peer))
  1853. return;
  1854. tgt_peer = dp_get_tgt_peer_from_peer(peer);
  1855. if (!tgt_peer || !(tgt_peer->txrx_peer))
  1856. return;
  1857. txrx_peer = tgt_peer->txrx_peer;
  1858. peer_stats_intf.to_stack = txrx_peer->to_stack;
  1859. peer_stats_intf.tx_success =
  1860. txrx_peer->stats.per_pkt_stats.tx.tx_success;
  1861. peer_stats_intf.tx_ucast =
  1862. txrx_peer->stats.per_pkt_stats.tx.ucast;
  1863. dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf,
  1864. &tgt_peer->stats);
  1865. }
  1866. /*dp_iterate_update_peer_list - update peer stats on cal client timer
  1867. * @pdev_hdl: pdev handle
  1868. */
  1869. static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
  1870. {
  1871. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  1872. dp_pdev_iterate_peer(pdev, dp_peer_cal_clients_stats_update, NULL,
  1873. DP_MOD_ID_CDP);
  1874. }
  1875. #else
  1876. static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
  1877. {
  1878. }
  1879. #endif
  1880. #ifdef ATH_SUPPORT_NAC
  1881. int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
  1882. bool val)
  1883. {
  1884. /* Enable/Disable smart mesh filtering. This flag will be checked
  1885. * during rx processing to check if packets are from NAC clients.
  1886. */
  1887. pdev->monitor_pdev->filter_neighbour_peers = val;
  1888. return 0;
  1889. }
  1890. #endif /* ATH_SUPPORT_NAC */
  1891. #ifdef WLAN_ATF_ENABLE
  1892. void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
  1893. {
  1894. if (!pdev) {
  1895. dp_cdp_err("Invalid pdev");
  1896. return;
  1897. }
  1898. pdev->monitor_pdev->dp_atf_stats_enable = value;
  1899. }
  1900. #endif
  1901. #ifdef QCA_ENHANCED_STATS_SUPPORT
  1902. /*
  1903. * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
  1904. * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
  1905. * @pdev: DP PDEV handle
  1906. * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
  1907. * @length: tlv_length
  1908. *
  1909. * return:QDF_STATUS_SUCCESS if nbuf has to be freed in caller
  1910. */
  1911. QDF_STATUS
  1912. dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
  1913. qdf_nbuf_t tag_buf,
  1914. uint32_t ppdu_id)
  1915. {
  1916. uint32_t *nbuf_ptr;
  1917. uint8_t trim_size;
  1918. size_t head_size;
  1919. struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info;
  1920. uint32_t *msg_word;
  1921. uint32_t tsf_hdr;
  1922. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1923. if ((!mon_pdev->tx_sniffer_enable) && (!mon_pdev->mcopy_mode) &&
  1924. (!mon_pdev->bpr_enable) && (!mon_pdev->tx_capture_enabled))
  1925. return QDF_STATUS_SUCCESS;
  1926. /*
  1927. * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t
  1928. */
  1929. msg_word = (uint32_t *)qdf_nbuf_data(tag_buf);
  1930. msg_word = msg_word + 2;
  1931. tsf_hdr = *msg_word;
  1932. trim_size = ((mon_pdev->mgmtctrl_frm_info.mgmt_buf +
  1933. HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
  1934. qdf_nbuf_data(tag_buf));
  1935. if (!qdf_nbuf_pull_head(tag_buf, trim_size))
  1936. return QDF_STATUS_SUCCESS;
  1937. qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
  1938. mon_pdev->mgmtctrl_frm_info.mgmt_buf_len);
  1939. if (mon_pdev->tx_capture_enabled) {
  1940. head_size = sizeof(struct cdp_tx_mgmt_comp_info);
  1941. if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) {
  1942. qdf_err("Fail to get headroom h_sz %zu h_avail %d\n",
  1943. head_size, qdf_nbuf_headroom(tag_buf));
  1944. qdf_assert_always(0);
  1945. return QDF_STATUS_E_NOMEM;
  1946. }
  1947. ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *)
  1948. qdf_nbuf_push_head(tag_buf, head_size);
  1949. qdf_assert_always(ptr_mgmt_comp_info);
  1950. ptr_mgmt_comp_info->ppdu_id = ppdu_id;
  1951. ptr_mgmt_comp_info->is_sgen_pkt = true;
  1952. ptr_mgmt_comp_info->tx_tsf = tsf_hdr;
  1953. } else {
  1954. head_size = sizeof(ppdu_id);
  1955. nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size);
  1956. *nbuf_ptr = ppdu_id;
  1957. }
  1958. if (mon_pdev->bpr_enable) {
  1959. dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
  1960. tag_buf, HTT_INVALID_PEER,
  1961. WDI_NO_VAL, pdev->pdev_id);
  1962. }
  1963. dp_deliver_mgmt_frm(pdev, tag_buf);
  1964. return QDF_STATUS_E_ALREADY;
  1965. }
  1966. /*
  1967. * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv
  1968. * bitmap for sniffer mode
  1969. * @bitmap: received bitmap
  1970. *
  1971. * Return: expected bitmap value, returns zero if doesn't match with
  1972. * either 64-bit Tx window or 256-bit window tlv bitmap
  1973. */
  1974. int
  1975. dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
  1976. {
  1977. if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
  1978. return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
  1979. else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
  1980. return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
  1981. return 0;
  1982. }
  1983. /*
  1984. * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats.
  1985. * @peer: Datapath peer handle
  1986. * @ppdu: User PPDU Descriptor
  1987. * @cur_ppdu_id: PPDU_ID
  1988. *
  1989. * Return: None
  1990. *
  1991. * on Tx data frame, we may get delayed ba set
  1992. * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we
  1993. * request Block Ack Request(BAR). Successful msdu is received only after Block
  1994. * Ack. To populate peer stats we need successful msdu(data frame).
  1995. * So we hold the Tx data stats on delayed_ba for stats update.
  1996. */
  1997. static void
  1998. dp_peer_copy_delay_stats(struct dp_peer *peer,
  1999. struct cdp_tx_completion_ppdu_user *ppdu,
  2000. uint32_t cur_ppdu_id)
  2001. {
  2002. struct dp_pdev *pdev;
  2003. struct dp_vdev *vdev;
  2004. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  2005. if (mon_peer->last_delayed_ba) {
  2006. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2007. "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]",
  2008. mon_peer->last_delayed_ba_ppduid, cur_ppdu_id);
  2009. vdev = peer->vdev;
  2010. if (vdev) {
  2011. pdev = vdev->pdev;
  2012. pdev->stats.cdp_delayed_ba_not_recev++;
  2013. }
  2014. }
  2015. mon_peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size;
  2016. mon_peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc;
  2017. mon_peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re;
  2018. mon_peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf;
  2019. mon_peer->delayed_ba_ppdu_stats.bw = ppdu->bw;
  2020. mon_peer->delayed_ba_ppdu_stats.nss = ppdu->nss;
  2021. mon_peer->delayed_ba_ppdu_stats.gi = ppdu->gi;
  2022. mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
  2023. mon_peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc;
  2024. mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
  2025. mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast =
  2026. ppdu->mpdu_tried_ucast;
  2027. mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast =
  2028. ppdu->mpdu_tried_mcast;
  2029. mon_peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl;
  2030. mon_peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl;
  2031. mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
  2032. mon_peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start;
  2033. mon_peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones;
  2034. mon_peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast;
  2035. mon_peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos;
  2036. mon_peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id;
  2037. mon_peer->last_delayed_ba = true;
  2038. ppdu->debug_copied = true;
  2039. }
  2040. /*
  2041. * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats.
  2042. * @peer: Datapath peer handle
  2043. * @ppdu: PPDU Descriptor
  2044. *
  2045. * Return: None
  2046. *
  2047. * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info
  2048. * from Tx BAR frame not required to populate peer stats.
  2049. * But we need successful MPDU and MSDU to update previous
  2050. * transmitted Tx data frame. Overwrite ppdu stats with the previous
  2051. * stored ppdu stats.
  2052. */
  2053. static void
  2054. dp_peer_copy_stats_to_bar(struct dp_peer *peer,
  2055. struct cdp_tx_completion_ppdu_user *ppdu)
  2056. {
  2057. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  2058. ppdu->ltf_size = mon_peer->delayed_ba_ppdu_stats.ltf_size;
  2059. ppdu->stbc = mon_peer->delayed_ba_ppdu_stats.stbc;
  2060. ppdu->he_re = mon_peer->delayed_ba_ppdu_stats.he_re;
  2061. ppdu->txbf = mon_peer->delayed_ba_ppdu_stats.txbf;
  2062. ppdu->bw = mon_peer->delayed_ba_ppdu_stats.bw;
  2063. ppdu->nss = mon_peer->delayed_ba_ppdu_stats.nss;
  2064. ppdu->gi = mon_peer->delayed_ba_ppdu_stats.gi;
  2065. ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
  2066. ppdu->ldpc = mon_peer->delayed_ba_ppdu_stats.ldpc;
  2067. ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
  2068. ppdu->mpdu_tried_ucast =
  2069. mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast;
  2070. ppdu->mpdu_tried_mcast =
  2071. mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast;
  2072. ppdu->frame_ctrl = mon_peer->delayed_ba_ppdu_stats.frame_ctrl;
  2073. ppdu->qos_ctrl = mon_peer->delayed_ba_ppdu_stats.qos_ctrl;
  2074. ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
  2075. ppdu->ru_start = mon_peer->delayed_ba_ppdu_stats.ru_start;
  2076. ppdu->ru_tones = mon_peer->delayed_ba_ppdu_stats.ru_tones;
  2077. ppdu->is_mcast = mon_peer->delayed_ba_ppdu_stats.is_mcast;
  2078. ppdu->user_pos = mon_peer->delayed_ba_ppdu_stats.user_pos;
  2079. ppdu->mu_group_id = mon_peer->delayed_ba_ppdu_stats.mu_group_id;
  2080. mon_peer->last_delayed_ba = false;
  2081. ppdu->debug_copied = true;
  2082. }
  2083. /*
  2084. * dp_tx_rate_stats_update() - Update rate per-peer statistics
  2085. * @peer: Datapath peer handle
  2086. * @ppdu: PPDU Descriptor
  2087. *
  2088. * Return: None
  2089. */
  2090. static void
  2091. dp_tx_rate_stats_update(struct dp_peer *peer,
  2092. struct cdp_tx_completion_ppdu_user *ppdu)
  2093. {
  2094. uint32_t ratekbps = 0;
  2095. uint64_t ppdu_tx_rate = 0;
  2096. uint32_t rix;
  2097. uint16_t ratecode = 0;
  2098. struct dp_mon_peer *mon_peer = NULL;
  2099. if (!peer || !ppdu)
  2100. return;
  2101. if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK)
  2102. return;
  2103. mon_peer = peer->monitor_peer;
  2104. if (!mon_peer)
  2105. return;
  2106. ratekbps = dp_getrateindex(ppdu->gi,
  2107. ppdu->mcs,
  2108. ppdu->nss,
  2109. ppdu->preamble,
  2110. ppdu->bw,
  2111. ppdu->punc_mode,
  2112. &rix,
  2113. &ratecode);
  2114. if (!ratekbps)
  2115. return;
  2116. /* Calculate goodput in non-training period
  2117. * In training period, don't do anything as
  2118. * pending pkt is send as goodput.
  2119. */
  2120. if ((!peer->bss_peer) && (!ppdu->sa_is_training)) {
  2121. ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) *
  2122. (CDP_PERCENT_MACRO - ppdu->current_rate_per));
  2123. }
  2124. ppdu->rix = rix;
  2125. ppdu->tx_ratekbps = ratekbps;
  2126. ppdu->tx_ratecode = ratecode;
  2127. DP_STATS_UPD(mon_peer, tx.tx_rate, ratekbps);
  2128. mon_peer->stats.tx.avg_tx_rate =
  2129. dp_ath_rate_lpf(mon_peer->stats.tx.avg_tx_rate, ratekbps);
  2130. ppdu_tx_rate = dp_ath_rate_out(mon_peer->stats.tx.avg_tx_rate);
  2131. DP_STATS_UPD(mon_peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
  2132. mon_peer->stats.tx.bw_info = ppdu->bw;
  2133. mon_peer->stats.tx.gi_info = ppdu->gi;
  2134. mon_peer->stats.tx.nss_info = ppdu->nss;
  2135. mon_peer->stats.tx.mcs_info = ppdu->mcs;
  2136. mon_peer->stats.tx.preamble_info = ppdu->preamble;
  2137. if (peer->vdev) {
  2138. /*
  2139. * In STA mode:
  2140. * We get ucast stats as BSS peer stats.
  2141. *
  2142. * In AP mode:
  2143. * We get mcast stats as BSS peer stats.
  2144. * We get ucast stats as assoc peer stats.
  2145. */
  2146. if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) {
  2147. peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
  2148. peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
  2149. } else {
  2150. peer->vdev->stats.tx.last_tx_rate = ratekbps;
  2151. peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
  2152. }
  2153. }
  2154. }
  2155. #if defined(FEATURE_PERPKT_INFO) && defined(WDI_EVENT_ENABLE)
  2156. void dp_send_stats_event(struct dp_pdev *pdev, struct dp_peer *peer,
  2157. uint16_t peer_id)
  2158. {
  2159. struct cdp_interface_peer_stats peer_stats_intf;
  2160. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  2161. struct dp_txrx_peer *txrx_peer = NULL;
  2162. if (!mon_peer)
  2163. return;
  2164. qdf_mem_zero(&peer_stats_intf,
  2165. sizeof(struct cdp_interface_peer_stats));
  2166. mon_peer->stats.rx.rx_snr_measured_time = qdf_system_ticks();
  2167. peer_stats_intf.rx_avg_snr = mon_peer->stats.rx.avg_snr;
  2168. txrx_peer = dp_get_txrx_peer(peer);
  2169. if (txrx_peer) {
  2170. peer_stats_intf.rx_byte_count = txrx_peer->to_stack.bytes;
  2171. peer_stats_intf.tx_byte_count =
  2172. txrx_peer->stats.per_pkt_stats.tx.tx_success.bytes;
  2173. }
  2174. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
  2175. &peer_stats_intf, peer_id,
  2176. UPDATE_PEER_STATS, pdev->pdev_id);
  2177. }
  2178. #endif
  2179. /*
  2180. * dp_get_ru_index_frm_ru_tones() - get ru index
  2181. * @ru_tones: ru tones
  2182. *
  2183. * Return: ru index
  2184. */
  2185. #ifdef WLAN_FEATURE_11BE
  2186. static inline enum cdp_ru_index dp_get_ru_index_frm_ru_tones(uint16_t ru_tones)
  2187. {
  2188. enum cdp_ru_index ru_index;
  2189. switch (ru_tones) {
  2190. case RU_26:
  2191. ru_index = RU_26_INDEX;
  2192. break;
  2193. case RU_52:
  2194. ru_index = RU_52_INDEX;
  2195. break;
  2196. case RU_52_26:
  2197. ru_index = RU_52_26_INDEX;
  2198. break;
  2199. case RU_106:
  2200. ru_index = RU_106_INDEX;
  2201. break;
  2202. case RU_106_26:
  2203. ru_index = RU_106_26_INDEX;
  2204. break;
  2205. case RU_242:
  2206. ru_index = RU_242_INDEX;
  2207. break;
  2208. case RU_484:
  2209. ru_index = RU_484_INDEX;
  2210. break;
  2211. case RU_484_242:
  2212. ru_index = RU_484_242_INDEX;
  2213. break;
  2214. case RU_996:
  2215. ru_index = RU_996_INDEX;
  2216. break;
  2217. case RU_996_484:
  2218. ru_index = RU_996_484_INDEX;
  2219. break;
  2220. case RU_996_484_242:
  2221. ru_index = RU_996_484_242_INDEX;
  2222. break;
  2223. case RU_2X996:
  2224. ru_index = RU_2X996_INDEX;
  2225. break;
  2226. case RU_2X996_484:
  2227. ru_index = RU_2X996_484_INDEX;
  2228. break;
  2229. case RU_3X996:
  2230. ru_index = RU_3X996_INDEX;
  2231. break;
  2232. case RU_3X996_484:
  2233. ru_index = RU_2X996_484_INDEX;
  2234. break;
  2235. case RU_4X996:
  2236. ru_index = RU_4X996_INDEX;
  2237. break;
  2238. default:
  2239. ru_index = RU_INDEX_MAX;
  2240. break;
  2241. }
  2242. return ru_index;
  2243. }
  2244. #else
  2245. static inline enum cdp_ru_index dp_get_ru_index_frm_ru_tones(uint16_t ru_tones)
  2246. {
  2247. enum cdp_ru_index ru_index;
  2248. switch (ru_tones) {
  2249. case RU_26:
  2250. ru_index = RU_26_INDEX;
  2251. break;
  2252. case RU_52:
  2253. ru_index = RU_52_INDEX;
  2254. break;
  2255. case RU_106:
  2256. ru_index = RU_106_INDEX;
  2257. break;
  2258. case RU_242:
  2259. ru_index = RU_242_INDEX;
  2260. break;
  2261. case RU_484:
  2262. ru_index = RU_484_INDEX;
  2263. break;
  2264. case RU_996:
  2265. ru_index = RU_996_INDEX;
  2266. break;
  2267. default:
  2268. ru_index = RU_INDEX_MAX;
  2269. break;
  2270. }
  2271. return ru_index;
  2272. }
  2273. #endif
  2274. /*
  2275. * dp_tx_stats_update() - Update per-peer statistics
  2276. * @pdev: Datapath pdev handle
  2277. * @peer: Datapath peer handle
  2278. * @ppdu: PPDU Descriptor
  2279. * @ack_rssi: RSSI of last ack received
  2280. *
  2281. * Return: None
  2282. */
  2283. static void
  2284. dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
  2285. struct cdp_tx_completion_ppdu_user *ppdu,
  2286. uint32_t ack_rssi)
  2287. {
  2288. uint8_t preamble, mcs;
  2289. uint16_t num_msdu;
  2290. uint16_t num_mpdu;
  2291. uint16_t mpdu_tried;
  2292. uint16_t mpdu_failed;
  2293. struct dp_mon_ops *mon_ops;
  2294. enum cdp_ru_index ru_index;
  2295. struct dp_mon_peer *mon_peer = NULL;
  2296. uint32_t ratekbps = 0;
  2297. preamble = ppdu->preamble;
  2298. mcs = ppdu->mcs;
  2299. num_msdu = ppdu->num_msdu;
  2300. num_mpdu = ppdu->mpdu_success;
  2301. mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
  2302. mpdu_failed = mpdu_tried - num_mpdu;
  2303. /* If the peer statistics are already processed as part of
  2304. * per-MSDU completion handler, do not process these again in per-PPDU
  2305. * indications
  2306. */
  2307. if (pdev->soc->process_tx_status)
  2308. return;
  2309. mon_peer = peer->monitor_peer;
  2310. if (!mon_peer)
  2311. return;
  2312. if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
  2313. /*
  2314. * All failed mpdu will be retried, so incrementing
  2315. * retries mpdu based on mpdu failed. Even for
  2316. * ack failure i.e for long retries we get
  2317. * mpdu failed equal mpdu tried.
  2318. */
  2319. DP_STATS_INC(mon_peer, tx.retries, mpdu_failed);
  2320. return;
  2321. }
  2322. if (ppdu->is_ppdu_cookie_valid)
  2323. DP_STATS_INC(mon_peer, tx.num_ppdu_cookie_valid, 1);
  2324. if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
  2325. ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
  2326. if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
  2327. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2328. "mu_group_id out of bound!!\n");
  2329. else
  2330. DP_STATS_UPD(mon_peer, tx.mu_group_id[ppdu->mu_group_id],
  2331. (ppdu->user_pos + 1));
  2332. }
  2333. if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
  2334. ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
  2335. DP_STATS_UPD(mon_peer, tx.ru_tones, ppdu->ru_tones);
  2336. DP_STATS_UPD(mon_peer, tx.ru_start, ppdu->ru_start);
  2337. ru_index = dp_get_ru_index_frm_ru_tones(ppdu->ru_tones);
  2338. if (ru_index != RU_INDEX_MAX) {
  2339. DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].num_msdu,
  2340. num_msdu);
  2341. DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].num_mpdu,
  2342. num_mpdu);
  2343. DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].mpdu_tried,
  2344. mpdu_tried);
  2345. }
  2346. }
  2347. /*
  2348. * All failed mpdu will be retried, so incrementing
  2349. * retries mpdu based on mpdu failed. Even for
  2350. * ack failure i.e for long retries we get
  2351. * mpdu failed equal mpdu tried.
  2352. */
  2353. DP_STATS_INC(mon_peer, tx.retries, mpdu_failed);
  2354. DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].num_msdu,
  2355. num_msdu);
  2356. DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu,
  2357. num_mpdu);
  2358. DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried,
  2359. mpdu_tried);
  2360. DP_STATS_INC(mon_peer, tx.sgi_count[ppdu->gi], num_msdu);
  2361. DP_STATS_INC(mon_peer, tx.bw[ppdu->bw], num_msdu);
  2362. DP_STATS_INC(mon_peer, tx.nss[ppdu->nss], num_msdu);
  2363. if (ppdu->tid < CDP_DATA_TID_MAX)
  2364. DP_STATS_INC(mon_peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
  2365. num_msdu);
  2366. DP_STATS_INCC(mon_peer, tx.stbc, num_msdu, ppdu->stbc);
  2367. DP_STATS_INCC(mon_peer, tx.ldpc, num_msdu, ppdu->ldpc);
  2368. if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
  2369. DP_STATS_UPD(mon_peer, tx.last_ack_rssi, ack_rssi);
  2370. DP_STATS_INCC(mon_peer,
  2371. tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  2372. ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
  2373. DP_STATS_INCC(mon_peer,
  2374. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  2375. ((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
  2376. DP_STATS_INCC(mon_peer,
  2377. tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  2378. ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
  2379. DP_STATS_INCC(mon_peer,
  2380. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  2381. ((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
  2382. DP_STATS_INCC(mon_peer,
  2383. tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  2384. ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
  2385. DP_STATS_INCC(mon_peer,
  2386. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  2387. ((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
  2388. DP_STATS_INCC(mon_peer,
  2389. tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  2390. ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
  2391. DP_STATS_INCC(mon_peer,
  2392. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  2393. ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
  2394. DP_STATS_INCC(mon_peer,
  2395. tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  2396. ((mcs >= MAX_MCS_11AX) && (preamble == DOT11_AX)));
  2397. DP_STATS_INCC(mon_peer,
  2398. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  2399. ((mcs < MAX_MCS_11AX) && (preamble == DOT11_AX)));
  2400. DP_STATS_INCC(mon_peer, tx.ampdu_cnt, num_mpdu, ppdu->is_ampdu);
  2401. DP_STATS_INCC(mon_peer, tx.non_ampdu_cnt, num_mpdu, !(ppdu->is_ampdu));
  2402. DP_STATS_INCC(mon_peer, tx.pream_punct_cnt, 1, ppdu->pream_punct);
  2403. DP_STATS_INC(mon_peer, tx.tx_ppdus, 1);
  2404. DP_STATS_INC(mon_peer, tx.tx_mpdus_success, num_mpdu);
  2405. DP_STATS_INC(mon_peer, tx.tx_mpdus_tried, mpdu_tried);
  2406. mon_ops = dp_mon_ops_get(pdev->soc);
  2407. if (mon_ops && mon_ops->mon_tx_stats_update)
  2408. mon_ops->mon_tx_stats_update(mon_peer, ppdu);
  2409. dp_tx_rate_stats_update(peer, ppdu);
  2410. dp_peer_stats_notify(pdev, peer);
  2411. ratekbps = mon_peer->stats.tx.tx_rate;
  2412. DP_STATS_UPD(mon_peer, tx.last_tx_rate, ratekbps);
  2413. dp_send_stats_event(pdev, peer, ppdu->peer_id);
  2414. }
  2415. /*
  2416. * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
  2417. * if a new peer id arrives in a PPDU
  2418. * pdev: DP pdev handle
  2419. * @peer_id : peer unique identifier
  2420. * @ppdu_info: per ppdu tlv structure
  2421. *
  2422. * return:user index to be populated
  2423. */
  2424. static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
  2425. uint16_t peer_id,
  2426. struct ppdu_info *ppdu_info)
  2427. {
  2428. uint8_t user_index = 0;
  2429. struct cdp_tx_completion_ppdu *ppdu_desc;
  2430. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2431. ppdu_desc =
  2432. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2433. while ((user_index + 1) <= ppdu_info->last_user) {
  2434. ppdu_user_desc = &ppdu_desc->user[user_index];
  2435. if (ppdu_user_desc->peer_id != peer_id) {
  2436. user_index++;
  2437. continue;
  2438. } else {
  2439. /* Max users possible is 8 so user array index should
  2440. * not exceed 7
  2441. */
  2442. qdf_assert_always(user_index <= (ppdu_desc->max_users - 1));
  2443. return user_index;
  2444. }
  2445. }
  2446. ppdu_info->last_user++;
  2447. /* Max users possible is 8 so last user should not exceed 8 */
  2448. qdf_assert_always(ppdu_info->last_user <= ppdu_desc->max_users);
  2449. return ppdu_info->last_user - 1;
  2450. }
  2451. /*
  2452. * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
  2453. * pdev: DP pdev handle
  2454. * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
  2455. * @ppdu_info: per ppdu tlv structure
  2456. *
  2457. * return:void
  2458. */
  2459. static void
  2460. dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
  2461. uint32_t *tag_buf,
  2462. struct ppdu_info *ppdu_info)
  2463. {
  2464. uint16_t frame_type;
  2465. uint16_t frame_ctrl;
  2466. uint16_t freq;
  2467. struct dp_soc *soc = NULL;
  2468. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  2469. uint64_t ppdu_start_timestamp;
  2470. uint32_t *start_tag_buf;
  2471. start_tag_buf = tag_buf;
  2472. ppdu_desc =
  2473. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2474. ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
  2475. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID);
  2476. ppdu_info->sched_cmdid =
  2477. HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
  2478. ppdu_desc->num_users =
  2479. HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
  2480. qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
  2481. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE);
  2482. frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
  2483. ppdu_desc->htt_frame_type = frame_type;
  2484. frame_ctrl = ppdu_desc->frame_ctrl;
  2485. ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id;
  2486. switch (frame_type) {
  2487. case HTT_STATS_FTYPE_TIDQ_DATA_SU:
  2488. case HTT_STATS_FTYPE_TIDQ_DATA_MU:
  2489. case HTT_STATS_FTYPE_SGEN_QOS_NULL:
  2490. /*
  2491. * for management packet, frame type come as DATA_SU
  2492. * need to check frame_ctrl before setting frame_type
  2493. */
  2494. if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL)
  2495. ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
  2496. else
  2497. ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
  2498. break;
  2499. case HTT_STATS_FTYPE_SGEN_MU_BAR:
  2500. case HTT_STATS_FTYPE_SGEN_BAR:
  2501. ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
  2502. break;
  2503. default:
  2504. ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
  2505. break;
  2506. }
  2507. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US);
  2508. ppdu_desc->tx_duration = *tag_buf;
  2509. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
  2510. ppdu_desc->ppdu_start_timestamp = *tag_buf;
  2511. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE);
  2512. freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
  2513. if (freq != ppdu_desc->channel) {
  2514. soc = pdev->soc;
  2515. ppdu_desc->channel = freq;
  2516. pdev->operating_channel.freq = freq;
  2517. if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
  2518. pdev->operating_channel.num =
  2519. soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc,
  2520. pdev->pdev_id,
  2521. freq);
  2522. if (soc && soc->cdp_soc.ol_ops->freq_to_band)
  2523. pdev->operating_channel.band =
  2524. soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc,
  2525. pdev->pdev_id,
  2526. freq);
  2527. }
  2528. ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
  2529. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM);
  2530. ppdu_desc->phy_ppdu_tx_time_us =
  2531. HTT_PPDU_STATS_COMMON_TLV_PHY_PPDU_TX_TIME_US_GET(*tag_buf);
  2532. ppdu_desc->beam_change =
  2533. HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf);
  2534. ppdu_desc->doppler =
  2535. HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf);
  2536. ppdu_desc->spatial_reuse =
  2537. HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf);
  2538. dp_tx_capture_htt_frame_counter(pdev, frame_type);
  2539. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US);
  2540. ppdu_start_timestamp = *tag_buf;
  2541. ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp <<
  2542. HTT_SHIFT_UPPER_TIMESTAMP) &
  2543. HTT_MASK_UPPER_TIMESTAMP);
  2544. ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
  2545. ppdu_desc->tx_duration;
  2546. /* Ack time stamp is same as end time stamp*/
  2547. ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
  2548. ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
  2549. ppdu_desc->tx_duration;
  2550. ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp;
  2551. ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp;
  2552. ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration;
  2553. /* Ack time stamp is same as end time stamp*/
  2554. ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
  2555. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(BSSCOLOR_OBSS_PSR);
  2556. ppdu_desc->bss_color =
  2557. HTT_PPDU_STATS_COMMON_TLV_BSS_COLOR_ID_GET(*tag_buf);
  2558. }
  2559. /*
  2560. * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
  2561. * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
  2562. * @ppdu_info: per ppdu tlv structure
  2563. *
  2564. * return:void
  2565. */
  2566. static void dp_process_ppdu_stats_user_common_tlv(
  2567. struct dp_pdev *pdev, uint32_t *tag_buf,
  2568. struct ppdu_info *ppdu_info)
  2569. {
  2570. uint16_t peer_id;
  2571. struct cdp_tx_completion_ppdu *ppdu_desc;
  2572. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2573. uint8_t curr_user_index = 0;
  2574. struct dp_peer *peer;
  2575. struct dp_vdev *vdev;
  2576. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2577. ppdu_desc =
  2578. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2579. tag_buf++;
  2580. peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
  2581. curr_user_index =
  2582. dp_get_ppdu_info_user_index(pdev,
  2583. peer_id, ppdu_info);
  2584. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2585. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2586. ppdu_desc->vdev_id =
  2587. HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
  2588. ppdu_user_desc->peer_id = peer_id;
  2589. tag_buf++;
  2590. if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
  2591. ppdu_user_desc->delayed_ba = 1;
  2592. ppdu_desc->delayed_ba = 1;
  2593. }
  2594. if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
  2595. ppdu_user_desc->is_mcast = true;
  2596. ppdu_user_desc->mpdu_tried_mcast =
  2597. HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
  2598. ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
  2599. } else {
  2600. ppdu_user_desc->mpdu_tried_ucast =
  2601. HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
  2602. }
  2603. ppdu_user_desc->is_seq_num_valid =
  2604. HTT_PPDU_STATS_USER_COMMON_TLV_IS_SQNUM_VALID_IN_BUFFER_GET(*tag_buf);
  2605. tag_buf++;
  2606. ppdu_user_desc->qos_ctrl =
  2607. HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
  2608. ppdu_user_desc->frame_ctrl =
  2609. HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
  2610. ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
  2611. if (ppdu_user_desc->delayed_ba)
  2612. ppdu_user_desc->mpdu_success = 0;
  2613. tag_buf += 3;
  2614. if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
  2615. ppdu_user_desc->ppdu_cookie =
  2616. HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
  2617. ppdu_user_desc->is_ppdu_cookie_valid = 1;
  2618. }
  2619. /* returning earlier causes other feilds unpopulated */
  2620. if (peer_id == DP_SCAN_PEER_ID) {
  2621. vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
  2622. DP_MOD_ID_TX_PPDU_STATS);
  2623. if (!vdev)
  2624. return;
  2625. qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
  2626. QDF_MAC_ADDR_SIZE);
  2627. dp_vdev_unref_delete(pdev->soc, vdev, DP_MOD_ID_TX_PPDU_STATS);
  2628. } else {
  2629. peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
  2630. DP_MOD_ID_TX_PPDU_STATS);
  2631. if (!peer) {
  2632. /*
  2633. * fw sends peer_id which is about to removed but
  2634. * it was already removed in host.
  2635. * eg: for disassoc, fw send ppdu stats
  2636. * with peer id equal to previously associated
  2637. * peer's peer_id but it was removed
  2638. */
  2639. vdev = dp_vdev_get_ref_by_id(pdev->soc,
  2640. ppdu_desc->vdev_id,
  2641. DP_MOD_ID_TX_PPDU_STATS);
  2642. if (!vdev)
  2643. return;
  2644. qdf_mem_copy(ppdu_user_desc->mac_addr,
  2645. vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE);
  2646. dp_vdev_unref_delete(pdev->soc, vdev,
  2647. DP_MOD_ID_TX_PPDU_STATS);
  2648. return;
  2649. }
  2650. qdf_mem_copy(ppdu_user_desc->mac_addr,
  2651. peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
  2652. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  2653. }
  2654. }
  2655. /**
  2656. * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
  2657. * @pdev: DP pdev handle
  2658. * @tag_buf: T2H message buffer carrying the user rate TLV
  2659. * @ppdu_info: per ppdu tlv structure
  2660. *
  2661. * return:void
  2662. */
  2663. static void
  2664. dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
  2665. uint32_t *tag_buf,
  2666. struct ppdu_info *ppdu_info)
  2667. {
  2668. uint16_t peer_id;
  2669. struct cdp_tx_completion_ppdu *ppdu_desc;
  2670. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2671. uint8_t curr_user_index = 0;
  2672. struct dp_vdev *vdev;
  2673. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2674. uint8_t bw;
  2675. ppdu_desc =
  2676. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2677. tag_buf++;
  2678. peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
  2679. curr_user_index =
  2680. dp_get_ppdu_info_user_index(pdev,
  2681. peer_id, ppdu_info);
  2682. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2683. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2684. if (peer_id == DP_SCAN_PEER_ID) {
  2685. vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
  2686. DP_MOD_ID_TX_PPDU_STATS);
  2687. if (!vdev)
  2688. return;
  2689. dp_vdev_unref_delete(pdev->soc, vdev,
  2690. DP_MOD_ID_TX_PPDU_STATS);
  2691. }
  2692. ppdu_user_desc->peer_id = peer_id;
  2693. ppdu_user_desc->tid =
  2694. HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
  2695. tag_buf += 1;
  2696. ppdu_user_desc->user_pos =
  2697. HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
  2698. ppdu_user_desc->mu_group_id =
  2699. HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
  2700. tag_buf += 1;
  2701. ppdu_user_desc->ru_start =
  2702. HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
  2703. ppdu_user_desc->ru_tones =
  2704. (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
  2705. HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
  2706. ppdu_desc->usr_ru_tones_sum += ppdu_user_desc->ru_tones;
  2707. tag_buf += 2;
  2708. ppdu_user_desc->ppdu_type =
  2709. HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
  2710. tag_buf++;
  2711. ppdu_user_desc->tx_rate = *tag_buf;
  2712. ppdu_user_desc->ltf_size =
  2713. HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
  2714. ppdu_user_desc->stbc =
  2715. HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
  2716. ppdu_user_desc->he_re =
  2717. HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
  2718. ppdu_user_desc->txbf =
  2719. HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
  2720. bw = HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf);
  2721. /* Align bw value as per host data structures */
  2722. if (bw == HTT_PPDU_STATS_BANDWIDTH_320MHZ)
  2723. ppdu_user_desc->bw = bw - 3;
  2724. else
  2725. ppdu_user_desc->bw = bw - 2;
  2726. ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
  2727. ppdu_desc->usr_nss_sum += ppdu_user_desc->nss;
  2728. ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
  2729. ppdu_user_desc->preamble =
  2730. HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
  2731. ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
  2732. ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
  2733. ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
  2734. }
  2735. /*
  2736. * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
  2737. * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
  2738. * pdev: DP PDEV handle
  2739. * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
  2740. * @ppdu_info: per ppdu tlv structure
  2741. *
  2742. * return:void
  2743. */
  2744. static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
  2745. struct dp_pdev *pdev, uint32_t *tag_buf,
  2746. struct ppdu_info *ppdu_info)
  2747. {
  2748. htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
  2749. (htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
  2750. struct cdp_tx_completion_ppdu *ppdu_desc;
  2751. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2752. uint8_t curr_user_index = 0;
  2753. uint16_t peer_id;
  2754. uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
  2755. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2756. ppdu_desc =
  2757. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2758. tag_buf++;
  2759. peer_id =
  2760. HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
  2761. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  2762. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2763. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2764. ppdu_user_desc->peer_id = peer_id;
  2765. ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
  2766. qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
  2767. sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
  2768. dp_process_ppdu_stats_update_failed_bitmap(pdev,
  2769. (void *)ppdu_user_desc,
  2770. ppdu_info->ppdu_id,
  2771. size);
  2772. }
  2773. /*
  2774. * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
  2775. * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
  2776. * soc: DP SOC handle
  2777. * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
  2778. * @ppdu_info: per ppdu tlv structure
  2779. *
  2780. * return:void
  2781. */
  2782. static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
  2783. struct dp_pdev *pdev, uint32_t *tag_buf,
  2784. struct ppdu_info *ppdu_info)
  2785. {
  2786. htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
  2787. (htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
  2788. struct cdp_tx_completion_ppdu *ppdu_desc;
  2789. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2790. uint8_t curr_user_index = 0;
  2791. uint16_t peer_id;
  2792. uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
  2793. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2794. ppdu_desc =
  2795. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2796. tag_buf++;
  2797. peer_id =
  2798. HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
  2799. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  2800. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2801. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2802. ppdu_user_desc->peer_id = peer_id;
  2803. ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
  2804. qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
  2805. sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
  2806. dp_process_ppdu_stats_update_failed_bitmap(pdev,
  2807. (void *)ppdu_user_desc,
  2808. ppdu_info->ppdu_id,
  2809. size);
  2810. }
  2811. /*
  2812. * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
  2813. * htt_ppdu_stats_user_cmpltn_common_tlv
  2814. * soc: DP SOC handle
  2815. * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
  2816. * @ppdu_info: per ppdu tlv structure
  2817. *
  2818. * return:void
  2819. */
  2820. static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
  2821. struct dp_pdev *pdev, uint32_t *tag_buf,
  2822. struct ppdu_info *ppdu_info)
  2823. {
  2824. uint16_t peer_id;
  2825. struct cdp_tx_completion_ppdu *ppdu_desc;
  2826. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2827. uint8_t curr_user_index = 0;
  2828. uint8_t bw_iter;
  2829. htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
  2830. (htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
  2831. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2832. ppdu_desc =
  2833. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2834. tag_buf++;
  2835. peer_id =
  2836. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
  2837. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  2838. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2839. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2840. ppdu_user_desc->peer_id = peer_id;
  2841. ppdu_user_desc->completion_status =
  2842. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
  2843. *tag_buf);
  2844. ppdu_user_desc->tid =
  2845. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
  2846. tag_buf++;
  2847. if (qdf_likely(ppdu_user_desc->completion_status ==
  2848. HTT_PPDU_STATS_USER_STATUS_OK)) {
  2849. ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
  2850. ppdu_user_desc->usr_ack_rssi = dp_stats_buf->ack_rssi;
  2851. ppdu_user_desc->ack_rssi_valid = 1;
  2852. } else {
  2853. ppdu_user_desc->ack_rssi_valid = 0;
  2854. }
  2855. tag_buf++;
  2856. ppdu_user_desc->mpdu_success =
  2857. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
  2858. ppdu_user_desc->mpdu_failed =
  2859. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) -
  2860. ppdu_user_desc->mpdu_success;
  2861. tag_buf++;
  2862. ppdu_user_desc->long_retries =
  2863. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
  2864. ppdu_user_desc->short_retries =
  2865. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
  2866. ppdu_user_desc->retry_mpdus =
  2867. ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
  2868. ppdu_user_desc->is_ampdu =
  2869. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
  2870. ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
  2871. ppdu_desc->resp_type =
  2872. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf);
  2873. ppdu_desc->mprot_type =
  2874. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf);
  2875. ppdu_desc->rts_success =
  2876. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf);
  2877. ppdu_desc->rts_failure =
  2878. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf);
  2879. ppdu_user_desc->pream_punct =
  2880. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PREAM_PUNC_TX_GET(*tag_buf);
  2881. ppdu_info->compltn_common_tlv++;
  2882. /*
  2883. * MU BAR may send request to n users but we may received ack only from
  2884. * m users. To have count of number of users respond back, we have a
  2885. * separate counter bar_num_users per PPDU that get increment for every
  2886. * htt_ppdu_stats_user_cmpltn_common_tlv
  2887. */
  2888. ppdu_desc->bar_num_users++;
  2889. tag_buf++;
  2890. for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) {
  2891. ppdu_user_desc->rssi_chain[bw_iter] =
  2892. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf);
  2893. tag_buf++;
  2894. }
  2895. ppdu_user_desc->sa_tx_antenna =
  2896. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf);
  2897. tag_buf++;
  2898. ppdu_user_desc->sa_is_training =
  2899. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf);
  2900. if (ppdu_user_desc->sa_is_training) {
  2901. ppdu_user_desc->sa_goodput =
  2902. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf);
  2903. }
  2904. tag_buf++;
  2905. for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) {
  2906. ppdu_user_desc->sa_max_rates[bw_iter] =
  2907. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]);
  2908. }
  2909. tag_buf += CDP_NUM_SA_BW;
  2910. ppdu_user_desc->current_rate_per =
  2911. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf);
  2912. }
  2913. /*
  2914. * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
  2915. * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
  2916. * pdev: DP PDEV handle
  2917. * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
  2918. * @ppdu_info: per ppdu tlv structure
  2919. *
  2920. * return:void
  2921. */
  2922. static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
  2923. struct dp_pdev *pdev, uint32_t *tag_buf,
  2924. struct ppdu_info *ppdu_info)
  2925. {
  2926. htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
  2927. (htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
  2928. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2929. struct cdp_tx_completion_ppdu *ppdu_desc;
  2930. uint8_t curr_user_index = 0;
  2931. uint16_t peer_id;
  2932. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2933. ppdu_desc =
  2934. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2935. tag_buf++;
  2936. peer_id =
  2937. HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
  2938. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  2939. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2940. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2941. ppdu_user_desc->peer_id = peer_id;
  2942. ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
  2943. qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
  2944. sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
  2945. ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32;
  2946. }
  2947. /*
  2948. * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
  2949. * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
  2950. * pdev: DP PDEV handle
  2951. * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
  2952. * @ppdu_info: per ppdu tlv structure
  2953. *
  2954. * return:void
  2955. */
  2956. static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
  2957. struct dp_pdev *pdev, uint32_t *tag_buf,
  2958. struct ppdu_info *ppdu_info)
  2959. {
  2960. htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
  2961. (htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
  2962. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2963. struct cdp_tx_completion_ppdu *ppdu_desc;
  2964. uint8_t curr_user_index = 0;
  2965. uint16_t peer_id;
  2966. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2967. ppdu_desc =
  2968. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2969. tag_buf++;
  2970. peer_id =
  2971. HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
  2972. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  2973. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2974. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2975. ppdu_user_desc->peer_id = peer_id;
  2976. ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
  2977. qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
  2978. sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
  2979. ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32;
  2980. }
  2981. /*
  2982. * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
  2983. * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
  2984. * pdev: DP PDE handle
  2985. * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
  2986. * @ppdu_info: per ppdu tlv structure
  2987. *
  2988. * return:void
  2989. */
  2990. static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
  2991. struct dp_pdev *pdev, uint32_t *tag_buf,
  2992. struct ppdu_info *ppdu_info)
  2993. {
  2994. uint16_t peer_id;
  2995. struct cdp_tx_completion_ppdu *ppdu_desc;
  2996. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2997. uint8_t curr_user_index = 0;
  2998. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2999. ppdu_desc =
  3000. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  3001. tag_buf += 2;
  3002. peer_id =
  3003. HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
  3004. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  3005. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  3006. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  3007. if (!ppdu_user_desc->ack_ba_tlv) {
  3008. ppdu_user_desc->ack_ba_tlv = 1;
  3009. } else {
  3010. pdev->stats.ack_ba_comes_twice++;
  3011. return;
  3012. }
  3013. ppdu_user_desc->peer_id = peer_id;
  3014. tag_buf++;
  3015. /* not to update ppdu_desc->tid from this TLV */
  3016. ppdu_user_desc->num_mpdu =
  3017. HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
  3018. ppdu_user_desc->num_msdu =
  3019. HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
  3020. ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
  3021. tag_buf++;
  3022. ppdu_user_desc->start_seq =
  3023. HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(
  3024. *tag_buf);
  3025. tag_buf++;
  3026. ppdu_user_desc->success_bytes = *tag_buf;
  3027. /* increase ack ba tlv counter on successful mpdu */
  3028. if (ppdu_user_desc->num_mpdu)
  3029. ppdu_info->ack_ba_tlv++;
  3030. if (ppdu_user_desc->ba_size == 0) {
  3031. ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq;
  3032. ppdu_user_desc->ba_bitmap[0] = 1;
  3033. ppdu_user_desc->ba_size = 1;
  3034. }
  3035. }
  3036. /*
  3037. * dp_process_ppdu_stats_user_common_array_tlv: Process
  3038. * htt_ppdu_stats_user_common_array_tlv
  3039. * pdev: DP PDEV handle
  3040. * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
  3041. * @ppdu_info: per ppdu tlv structure
  3042. *
  3043. * return:void
  3044. */
  3045. static void dp_process_ppdu_stats_user_common_array_tlv(
  3046. struct dp_pdev *pdev, uint32_t *tag_buf,
  3047. struct ppdu_info *ppdu_info)
  3048. {
  3049. uint32_t peer_id;
  3050. struct cdp_tx_completion_ppdu *ppdu_desc;
  3051. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  3052. uint8_t curr_user_index = 0;
  3053. struct htt_tx_ppdu_stats_info *dp_stats_buf;
  3054. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  3055. ppdu_desc =
  3056. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  3057. tag_buf++;
  3058. dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
  3059. tag_buf += 3;
  3060. peer_id =
  3061. HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
  3062. if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
  3063. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  3064. "Invalid peer");
  3065. return;
  3066. }
  3067. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  3068. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  3069. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  3070. ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
  3071. ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
  3072. tag_buf++;
  3073. ppdu_user_desc->success_msdus =
  3074. HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
  3075. ppdu_user_desc->retry_bytes =
  3076. HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
  3077. tag_buf++;
  3078. ppdu_user_desc->failed_msdus =
  3079. HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
  3080. }
  3081. /*
  3082. * dp_process_ppdu_stats_flush_tlv: Process
  3083. * htt_ppdu_stats_flush_tlv
  3084. * @pdev: DP PDEV handle
  3085. * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
  3086. * @ppdu_info: per ppdu tlv structure
  3087. *
  3088. * return:void
  3089. */
  3090. static void
  3091. dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
  3092. uint32_t *tag_buf,
  3093. struct ppdu_info *ppdu_info)
  3094. {
  3095. struct cdp_tx_completion_ppdu *ppdu_desc;
  3096. uint32_t peer_id;
  3097. uint8_t tid;
  3098. struct dp_peer *peer;
  3099. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3100. struct dp_mon_peer *mon_peer = NULL;
  3101. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  3102. qdf_nbuf_data(ppdu_info->nbuf);
  3103. ppdu_desc->is_flush = 1;
  3104. tag_buf++;
  3105. ppdu_desc->drop_reason = *tag_buf;
  3106. tag_buf++;
  3107. ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
  3108. ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf);
  3109. ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf);
  3110. tag_buf++;
  3111. peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
  3112. tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
  3113. ppdu_desc->num_users = 1;
  3114. ppdu_desc->user[0].peer_id = peer_id;
  3115. ppdu_desc->user[0].tid = tid;
  3116. ppdu_desc->queue_type =
  3117. HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf);
  3118. peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
  3119. DP_MOD_ID_TX_PPDU_STATS);
  3120. if (!peer)
  3121. goto add_ppdu_to_sched_list;
  3122. if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
  3123. mon_peer = peer->monitor_peer;
  3124. DP_STATS_INC(mon_peer,
  3125. tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
  3126. ppdu_desc->num_msdu);
  3127. }
  3128. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  3129. add_ppdu_to_sched_list:
  3130. ppdu_info->done = 1;
  3131. TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
  3132. mon_pdev->list_depth--;
  3133. TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info,
  3134. ppdu_info_list_elem);
  3135. mon_pdev->sched_comp_list_depth++;
  3136. }
  3137. /**
  3138. * dp_process_ppdu_stats_sch_cmd_status_tlv: Process schedule command status tlv
  3139. * Here we are not going to process the buffer.
  3140. * @pdev: DP PDEV handle
  3141. * @ppdu_info: per ppdu tlv structure
  3142. *
  3143. * return:void
  3144. */
  3145. static void
  3146. dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev,
  3147. struct ppdu_info *ppdu_info)
  3148. {
  3149. struct cdp_tx_completion_ppdu *ppdu_desc;
  3150. struct dp_peer *peer;
  3151. uint8_t num_users;
  3152. uint8_t i;
  3153. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3154. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  3155. qdf_nbuf_data(ppdu_info->nbuf);
  3156. num_users = ppdu_desc->bar_num_users;
  3157. for (i = 0; i < num_users; i++) {
  3158. if (ppdu_desc->user[i].user_pos == 0) {
  3159. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
  3160. /* update phy mode for bar frame */
  3161. ppdu_desc->phy_mode =
  3162. ppdu_desc->user[i].preamble;
  3163. ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs;
  3164. break;
  3165. }
  3166. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) {
  3167. ppdu_desc->frame_ctrl =
  3168. ppdu_desc->user[i].frame_ctrl;
  3169. break;
  3170. }
  3171. }
  3172. }
  3173. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
  3174. ppdu_desc->delayed_ba) {
  3175. qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
  3176. for (i = 0; i < ppdu_desc->num_users; i++) {
  3177. struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
  3178. uint64_t start_tsf;
  3179. uint64_t end_tsf;
  3180. uint32_t ppdu_id;
  3181. struct dp_mon_peer *mon_peer;
  3182. ppdu_id = ppdu_desc->ppdu_id;
  3183. peer = dp_peer_get_ref_by_id
  3184. (pdev->soc, ppdu_desc->user[i].peer_id,
  3185. DP_MOD_ID_TX_PPDU_STATS);
  3186. /**
  3187. * This check is to make sure peer is not deleted
  3188. * after processing the TLVs.
  3189. */
  3190. if (!peer)
  3191. continue;
  3192. mon_peer = peer->monitor_peer;
  3193. delay_ppdu = &mon_peer->delayed_ba_ppdu_stats;
  3194. start_tsf = ppdu_desc->ppdu_start_timestamp;
  3195. end_tsf = ppdu_desc->ppdu_end_timestamp;
  3196. /**
  3197. * save delayed ba user info
  3198. */
  3199. if (ppdu_desc->user[i].delayed_ba) {
  3200. dp_peer_copy_delay_stats(peer,
  3201. &ppdu_desc->user[i],
  3202. ppdu_id);
  3203. mon_peer->last_delayed_ba_ppduid = ppdu_id;
  3204. delay_ppdu->ppdu_start_timestamp = start_tsf;
  3205. delay_ppdu->ppdu_end_timestamp = end_tsf;
  3206. }
  3207. ppdu_desc->user[i].peer_last_delayed_ba =
  3208. mon_peer->last_delayed_ba;
  3209. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  3210. if (ppdu_desc->user[i].delayed_ba &&
  3211. !ppdu_desc->user[i].debug_copied) {
  3212. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3213. QDF_TRACE_LEVEL_INFO_MED,
  3214. "%s: %d ppdu_id[%d] bar_ppdu_id[%d] num_users[%d] usr[%d] htt_frame_type[%d]\n",
  3215. __func__, __LINE__,
  3216. ppdu_desc->ppdu_id,
  3217. ppdu_desc->bar_ppdu_id,
  3218. ppdu_desc->num_users,
  3219. i,
  3220. ppdu_desc->htt_frame_type);
  3221. }
  3222. }
  3223. }
  3224. /*
  3225. * when frame type is BAR and STATS_COMMON_TLV is set
  3226. * copy the store peer delayed info to BAR status
  3227. */
  3228. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
  3229. for (i = 0; i < ppdu_desc->bar_num_users; i++) {
  3230. struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
  3231. uint64_t start_tsf;
  3232. uint64_t end_tsf;
  3233. struct dp_mon_peer *mon_peer;
  3234. peer = dp_peer_get_ref_by_id
  3235. (pdev->soc,
  3236. ppdu_desc->user[i].peer_id,
  3237. DP_MOD_ID_TX_PPDU_STATS);
  3238. /**
  3239. * This check is to make sure peer is not deleted
  3240. * after processing the TLVs.
  3241. */
  3242. if (!peer)
  3243. continue;
  3244. mon_peer = peer->monitor_peer;
  3245. if (ppdu_desc->user[i].completion_status !=
  3246. HTT_PPDU_STATS_USER_STATUS_OK) {
  3247. dp_peer_unref_delete(peer,
  3248. DP_MOD_ID_TX_PPDU_STATS);
  3249. continue;
  3250. }
  3251. delay_ppdu = &mon_peer->delayed_ba_ppdu_stats;
  3252. start_tsf = delay_ppdu->ppdu_start_timestamp;
  3253. end_tsf = delay_ppdu->ppdu_end_timestamp;
  3254. if (mon_peer->last_delayed_ba) {
  3255. dp_peer_copy_stats_to_bar(peer,
  3256. &ppdu_desc->user[i]);
  3257. ppdu_desc->ppdu_id =
  3258. mon_peer->last_delayed_ba_ppduid;
  3259. ppdu_desc->ppdu_start_timestamp = start_tsf;
  3260. ppdu_desc->ppdu_end_timestamp = end_tsf;
  3261. }
  3262. ppdu_desc->user[i].peer_last_delayed_ba =
  3263. mon_peer->last_delayed_ba;
  3264. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  3265. }
  3266. }
  3267. TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
  3268. mon_pdev->list_depth--;
  3269. TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info,
  3270. ppdu_info_list_elem);
  3271. mon_pdev->sched_comp_list_depth++;
  3272. }
  3273. /**
  3274. * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU
  3275. *
  3276. * If the TLV length sent as part of PPDU TLV is less that expected size i.e
  3277. * size of corresponding data structure, pad the remaining bytes with zeros
  3278. * and continue processing the TLVs
  3279. *
  3280. * @pdev: DP pdev handle
  3281. * @tag_buf: TLV buffer
  3282. * @tlv_expected_size: Expected size of Tag
  3283. * @tlv_len: TLV length received from FW
  3284. *
  3285. * Return: Pointer to updated TLV
  3286. */
  3287. static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev,
  3288. uint32_t *tag_buf,
  3289. uint16_t tlv_expected_size,
  3290. uint16_t tlv_len)
  3291. {
  3292. uint32_t *tlv_desc = tag_buf;
  3293. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3294. qdf_assert_always(tlv_len != 0);
  3295. if (tlv_len < tlv_expected_size) {
  3296. qdf_mem_zero(mon_pdev->ppdu_tlv_buf, tlv_expected_size);
  3297. qdf_mem_copy(mon_pdev->ppdu_tlv_buf, tag_buf, tlv_len);
  3298. tlv_desc = mon_pdev->ppdu_tlv_buf;
  3299. }
  3300. return tlv_desc;
  3301. }
  3302. /**
  3303. * dp_process_ppdu_tag(): Function to process the PPDU TLVs
  3304. * @pdev: DP pdev handle
  3305. * @tag_buf: TLV buffer
  3306. * @tlv_len: length of tlv
  3307. * @ppdu_info: per ppdu tlv structure
  3308. *
  3309. * return: void
  3310. */
  3311. static void dp_process_ppdu_tag(struct dp_pdev *pdev,
  3312. uint32_t *tag_buf,
  3313. uint32_t tlv_len,
  3314. struct ppdu_info *ppdu_info)
  3315. {
  3316. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  3317. uint16_t tlv_expected_size;
  3318. uint32_t *tlv_desc;
  3319. switch (tlv_type) {
  3320. case HTT_PPDU_STATS_COMMON_TLV:
  3321. tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv);
  3322. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3323. tlv_expected_size, tlv_len);
  3324. dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info);
  3325. break;
  3326. case HTT_PPDU_STATS_USR_COMMON_TLV:
  3327. tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv);
  3328. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3329. tlv_expected_size, tlv_len);
  3330. dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc,
  3331. ppdu_info);
  3332. break;
  3333. case HTT_PPDU_STATS_USR_RATE_TLV:
  3334. tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv);
  3335. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3336. tlv_expected_size, tlv_len);
  3337. dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc,
  3338. ppdu_info);
  3339. break;
  3340. case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
  3341. tlv_expected_size =
  3342. sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv);
  3343. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3344. tlv_expected_size, tlv_len);
  3345. dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
  3346. pdev, tlv_desc, ppdu_info);
  3347. break;
  3348. case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
  3349. tlv_expected_size =
  3350. sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv);
  3351. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3352. tlv_expected_size, tlv_len);
  3353. dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
  3354. pdev, tlv_desc, ppdu_info);
  3355. break;
  3356. case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
  3357. tlv_expected_size =
  3358. sizeof(htt_ppdu_stats_user_cmpltn_common_tlv);
  3359. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3360. tlv_expected_size, tlv_len);
  3361. dp_process_ppdu_stats_user_cmpltn_common_tlv(
  3362. pdev, tlv_desc, ppdu_info);
  3363. break;
  3364. case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
  3365. tlv_expected_size =
  3366. sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv);
  3367. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3368. tlv_expected_size, tlv_len);
  3369. dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
  3370. pdev, tlv_desc, ppdu_info);
  3371. break;
  3372. case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
  3373. tlv_expected_size =
  3374. sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv);
  3375. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3376. tlv_expected_size, tlv_len);
  3377. dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
  3378. pdev, tlv_desc, ppdu_info);
  3379. break;
  3380. case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
  3381. tlv_expected_size =
  3382. sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv);
  3383. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3384. tlv_expected_size, tlv_len);
  3385. dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
  3386. pdev, tlv_desc, ppdu_info);
  3387. break;
  3388. case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
  3389. tlv_expected_size =
  3390. sizeof(htt_ppdu_stats_usr_common_array_tlv_v);
  3391. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3392. tlv_expected_size, tlv_len);
  3393. dp_process_ppdu_stats_user_common_array_tlv(
  3394. pdev, tlv_desc, ppdu_info);
  3395. break;
  3396. case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
  3397. tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv);
  3398. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3399. tlv_expected_size, tlv_len);
  3400. dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc,
  3401. ppdu_info);
  3402. break;
  3403. case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV:
  3404. dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info);
  3405. break;
  3406. default:
  3407. break;
  3408. }
  3409. }
  3410. #ifdef WLAN_ATF_ENABLE
  3411. static void
  3412. dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
  3413. struct cdp_tx_completion_ppdu *ppdu_desc,
  3414. struct cdp_tx_completion_ppdu_user *user)
  3415. {
  3416. uint32_t nss_ru_width_sum = 0;
  3417. struct dp_mon_pdev *mon_pdev = NULL;
  3418. if (!pdev || !ppdu_desc || !user)
  3419. return;
  3420. mon_pdev = pdev->monitor_pdev;
  3421. if (!mon_pdev || !mon_pdev->dp_atf_stats_enable)
  3422. return;
  3423. if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA)
  3424. return;
  3425. nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum;
  3426. if (!nss_ru_width_sum)
  3427. nss_ru_width_sum = 1;
  3428. /**
  3429. * For SU-MIMO PPDU phy Tx time is same for the single user.
  3430. * For MU-MIMO phy Tx time is calculated per user as below
  3431. * user phy tx time =
  3432. * Entire PPDU duration * MU Ratio * OFDMA Ratio
  3433. * MU Ratio = usr_nss / Sum_of_nss_of_all_users
  3434. * OFDMA_ratio = usr_ru_width / Sum_of_ru_width_of_all_users
  3435. * usr_ru_widt = ru_end – ru_start + 1
  3436. */
  3437. if (ppdu_desc->htt_frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) {
  3438. user->phy_tx_time_us = ppdu_desc->phy_ppdu_tx_time_us;
  3439. } else {
  3440. user->phy_tx_time_us = (ppdu_desc->phy_ppdu_tx_time_us *
  3441. user->nss * user->ru_tones) / nss_ru_width_sum;
  3442. }
  3443. }
  3444. #else
  3445. static void
  3446. dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
  3447. struct cdp_tx_completion_ppdu *ppdu_desc,
  3448. struct cdp_tx_completion_ppdu_user *user)
  3449. {
  3450. }
  3451. #endif
  3452. /**
  3453. * dp_ppdu_desc_user_stats_update(): Function to update TX user stats
  3454. * @pdev: DP pdev handle
  3455. * @ppdu_info: per PPDU TLV descriptor
  3456. *
  3457. * return: void
  3458. */
  3459. void
  3460. dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
  3461. struct ppdu_info *ppdu_info)
  3462. {
  3463. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  3464. struct dp_peer *peer = NULL;
  3465. uint32_t tlv_bitmap_expected;
  3466. uint32_t tlv_bitmap_default;
  3467. uint16_t i;
  3468. uint32_t num_users;
  3469. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3470. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  3471. qdf_nbuf_data(ppdu_info->nbuf);
  3472. if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR)
  3473. ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
  3474. tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
  3475. if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode ||
  3476. mon_pdev->tx_capture_enabled) {
  3477. if (ppdu_info->is_ampdu)
  3478. tlv_bitmap_expected =
  3479. dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
  3480. ppdu_info->tlv_bitmap);
  3481. }
  3482. tlv_bitmap_default = tlv_bitmap_expected;
  3483. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
  3484. num_users = ppdu_desc->bar_num_users;
  3485. ppdu_desc->num_users = ppdu_desc->bar_num_users;
  3486. } else {
  3487. num_users = ppdu_desc->num_users;
  3488. }
  3489. qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
  3490. for (i = 0; i < num_users; i++) {
  3491. ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
  3492. ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
  3493. peer = dp_peer_get_ref_by_id(pdev->soc,
  3494. ppdu_desc->user[i].peer_id,
  3495. DP_MOD_ID_TX_PPDU_STATS);
  3496. /**
  3497. * This check is to make sure peer is not deleted
  3498. * after processing the TLVs.
  3499. */
  3500. if (!peer)
  3501. continue;
  3502. ppdu_desc->user[i].is_bss_peer = peer->bss_peer;
  3503. /*
  3504. * different frame like DATA, BAR or CTRL has different
  3505. * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we
  3506. * receive other tlv in-order/sequential from fw.
  3507. * Since ACK_BA_STATUS TLV come from Hardware it is
  3508. * asynchronous So we need to depend on some tlv to confirm
  3509. * all tlv is received for a ppdu.
  3510. * So we depend on both SCHED_CMD_STATUS_TLV and
  3511. * ACK_BA_STATUS_TLV. for failure packet we won't get
  3512. * ACK_BA_STATUS_TLV.
  3513. */
  3514. if (!(ppdu_info->tlv_bitmap &
  3515. (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) ||
  3516. (!(ppdu_info->tlv_bitmap &
  3517. (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) &&
  3518. (ppdu_desc->user[i].completion_status ==
  3519. HTT_PPDU_STATS_USER_STATUS_OK))) {
  3520. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  3521. continue;
  3522. }
  3523. /**
  3524. * Update tx stats for data frames having Qos as well as
  3525. * non-Qos data tid
  3526. */
  3527. if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
  3528. (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) ||
  3529. (ppdu_desc->htt_frame_type ==
  3530. HTT_STATS_FTYPE_SGEN_QOS_NULL) ||
  3531. ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) &&
  3532. (ppdu_desc->num_mpdu > 1))) &&
  3533. (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) {
  3534. dp_tx_stats_update(pdev, peer,
  3535. &ppdu_desc->user[i],
  3536. ppdu_desc->ack_rssi);
  3537. }
  3538. dp_ppdu_desc_user_phy_tx_time_update(pdev, ppdu_desc,
  3539. &ppdu_desc->user[i]);
  3540. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  3541. tlv_bitmap_expected = tlv_bitmap_default;
  3542. }
  3543. }
  3544. #if !defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(QCA_MONITOR_2_0_SUPPORT)
  3545. /*
  3546. * dp_tx_ppdu_desc_notify() - Notify to upper layer about PPDU via WDI
  3547. *
  3548. * @pdev: Datapath pdev handle
  3549. * @nbuf: Buffer to be delivered to upper layer
  3550. *
  3551. * Return: void
  3552. */
  3553. static void dp_tx_ppdu_desc_notify(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
  3554. {
  3555. struct dp_soc *soc = pdev->soc;
  3556. struct dp_mon_ops *mon_ops = NULL;
  3557. mon_ops = dp_mon_ops_get(soc);
  3558. if (mon_ops && mon_ops->mon_ppdu_desc_notify)
  3559. mon_ops->mon_ppdu_desc_notify(pdev, nbuf);
  3560. else
  3561. qdf_nbuf_free(nbuf);
  3562. }
  3563. void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
  3564. struct ppdu_info *ppdu_info)
  3565. {
  3566. struct ppdu_info *s_ppdu_info = NULL;
  3567. struct ppdu_info *ppdu_info_next = NULL;
  3568. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  3569. qdf_nbuf_t nbuf;
  3570. uint32_t time_delta = 0;
  3571. bool starved = 0;
  3572. bool matched = 0;
  3573. bool recv_ack_ba_done = 0;
  3574. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3575. if (ppdu_info->tlv_bitmap &
  3576. (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
  3577. ppdu_info->done)
  3578. recv_ack_ba_done = 1;
  3579. mon_pdev->last_sched_cmdid = ppdu_info->sched_cmdid;
  3580. s_ppdu_info = TAILQ_FIRST(&mon_pdev->sched_comp_ppdu_list);
  3581. TAILQ_FOREACH_SAFE(s_ppdu_info, &mon_pdev->sched_comp_ppdu_list,
  3582. ppdu_info_list_elem, ppdu_info_next) {
  3583. if (s_ppdu_info->tsf_l32 > ppdu_info->tsf_l32)
  3584. time_delta = (MAX_TSF_32 - s_ppdu_info->tsf_l32) +
  3585. ppdu_info->tsf_l32;
  3586. else
  3587. time_delta = ppdu_info->tsf_l32 - s_ppdu_info->tsf_l32;
  3588. if (!s_ppdu_info->done && !recv_ack_ba_done) {
  3589. if (time_delta < MAX_SCHED_STARVE) {
  3590. dp_mon_info("pdev[%d] ppdu_id[%d] sched_cmdid[%d] TLV_B[0x%x] TSF[%u] D[%d]",
  3591. pdev->pdev_id,
  3592. s_ppdu_info->ppdu_id,
  3593. s_ppdu_info->sched_cmdid,
  3594. s_ppdu_info->tlv_bitmap,
  3595. s_ppdu_info->tsf_l32,
  3596. s_ppdu_info->done);
  3597. break;
  3598. }
  3599. starved = 1;
  3600. }
  3601. mon_pdev->delivered_sched_cmdid = s_ppdu_info->sched_cmdid;
  3602. TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list, s_ppdu_info,
  3603. ppdu_info_list_elem);
  3604. mon_pdev->sched_comp_list_depth--;
  3605. nbuf = s_ppdu_info->nbuf;
  3606. qdf_assert_always(nbuf);
  3607. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  3608. qdf_nbuf_data(nbuf);
  3609. ppdu_desc->tlv_bitmap = s_ppdu_info->tlv_bitmap;
  3610. if (starved) {
  3611. dp_mon_info("ppdu starved fc[0x%x] h_ftype[%d] tlv_bitmap[0x%x] cs[%d]\n",
  3612. ppdu_desc->frame_ctrl,
  3613. ppdu_desc->htt_frame_type,
  3614. ppdu_desc->tlv_bitmap,
  3615. ppdu_desc->user[0].completion_status);
  3616. starved = 0;
  3617. }
  3618. if (ppdu_info->ppdu_id == s_ppdu_info->ppdu_id &&
  3619. ppdu_info->sched_cmdid == s_ppdu_info->sched_cmdid)
  3620. matched = 1;
  3621. dp_ppdu_desc_user_stats_update(pdev, s_ppdu_info);
  3622. qdf_mem_free(s_ppdu_info);
  3623. dp_tx_ppdu_desc_notify(pdev, nbuf);
  3624. if (matched)
  3625. break;
  3626. }
  3627. }
  3628. #endif
  3629. /*
  3630. * dp_tx_ppdu_desc_deliver() - Deliver PPDU desc to upper layer
  3631. *
  3632. * @pdev: Datapath pdev handle
  3633. * @ppdu_info: per PPDU TLV descriptor
  3634. *
  3635. * Return: void
  3636. */
  3637. static void dp_tx_ppdu_desc_deliver(struct dp_pdev *pdev,
  3638. struct ppdu_info *ppdu_info)
  3639. {
  3640. struct dp_soc *soc = pdev->soc;
  3641. struct dp_mon_ops *mon_ops = NULL;
  3642. mon_ops = dp_mon_ops_get(soc);
  3643. if (mon_ops && mon_ops->mon_ppdu_desc_deliver) {
  3644. mon_ops->mon_ppdu_desc_deliver(pdev, ppdu_info);
  3645. } else {
  3646. qdf_nbuf_free(ppdu_info->nbuf);
  3647. ppdu_info->nbuf = NULL;
  3648. qdf_mem_free(ppdu_info);
  3649. }
  3650. }
  3651. /**
  3652. * dp_get_ppdu_desc(): Function to allocate new PPDU status
  3653. * desc for new ppdu id
  3654. * @pdev: DP pdev handle
  3655. * @ppdu_id: PPDU unique identifier
  3656. * @tlv_type: TLV type received
  3657. * @tsf_l32: timestamp received along with ppdu stats indication header
  3658. * @max_users: Maximum user for that particular ppdu
  3659. *
  3660. * return: ppdu_info per ppdu tlv structure
  3661. */
  3662. static
  3663. struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
  3664. uint8_t tlv_type, uint32_t tsf_l32,
  3665. uint8_t max_users)
  3666. {
  3667. struct ppdu_info *ppdu_info = NULL;
  3668. struct ppdu_info *s_ppdu_info = NULL;
  3669. struct ppdu_info *ppdu_info_next = NULL;
  3670. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  3671. uint32_t size = 0;
  3672. struct cdp_tx_completion_ppdu *tmp_ppdu_desc = NULL;
  3673. struct cdp_tx_completion_ppdu_user *tmp_user;
  3674. uint32_t time_delta;
  3675. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3676. /*
  3677. * Find ppdu_id node exists or not
  3678. */
  3679. TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list,
  3680. ppdu_info_list_elem, ppdu_info_next) {
  3681. if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
  3682. if (ppdu_info->tsf_l32 > tsf_l32)
  3683. time_delta = (MAX_TSF_32 -
  3684. ppdu_info->tsf_l32) + tsf_l32;
  3685. else
  3686. time_delta = tsf_l32 - ppdu_info->tsf_l32;
  3687. if (time_delta > WRAP_DROP_TSF_DELTA) {
  3688. TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
  3689. ppdu_info, ppdu_info_list_elem);
  3690. mon_pdev->list_depth--;
  3691. pdev->stats.ppdu_wrap_drop++;
  3692. tmp_ppdu_desc =
  3693. (struct cdp_tx_completion_ppdu *)
  3694. qdf_nbuf_data(ppdu_info->nbuf);
  3695. tmp_user = &tmp_ppdu_desc->user[0];
  3696. dp_htt_tx_stats_info("S_PID [%d] S_TSF[%u] TLV_BITMAP[0x%x] [CMPLTN - %d ACK_BA - %d] CS[%d] - R_PID[%d] R_TSF[%u] R_TLV_TAG[0x%x]\n",
  3697. ppdu_info->ppdu_id,
  3698. ppdu_info->tsf_l32,
  3699. ppdu_info->tlv_bitmap,
  3700. tmp_user->completion_status,
  3701. ppdu_info->compltn_common_tlv,
  3702. ppdu_info->ack_ba_tlv,
  3703. ppdu_id, tsf_l32,
  3704. tlv_type);
  3705. qdf_nbuf_free(ppdu_info->nbuf);
  3706. ppdu_info->nbuf = NULL;
  3707. qdf_mem_free(ppdu_info);
  3708. } else {
  3709. break;
  3710. }
  3711. }
  3712. }
  3713. /*
  3714. * check if it is ack ba tlv and if it is not there in ppdu info
  3715. * list then check it in sched completion ppdu list
  3716. */
  3717. if (!ppdu_info &&
  3718. tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) {
  3719. TAILQ_FOREACH(s_ppdu_info,
  3720. &mon_pdev->sched_comp_ppdu_list,
  3721. ppdu_info_list_elem) {
  3722. if (s_ppdu_info && (s_ppdu_info->ppdu_id == ppdu_id)) {
  3723. if (s_ppdu_info->tsf_l32 > tsf_l32)
  3724. time_delta = (MAX_TSF_32 -
  3725. s_ppdu_info->tsf_l32) +
  3726. tsf_l32;
  3727. else
  3728. time_delta = tsf_l32 -
  3729. s_ppdu_info->tsf_l32;
  3730. if (time_delta < WRAP_DROP_TSF_DELTA) {
  3731. ppdu_info = s_ppdu_info;
  3732. break;
  3733. }
  3734. } else {
  3735. /*
  3736. * ACK BA STATUS TLV comes sequential order
  3737. * if we received ack ba status tlv for second
  3738. * ppdu and first ppdu is still waiting for
  3739. * ACK BA STATUS TLV. Based on fw comment
  3740. * we won't receive it tlv later. So we can
  3741. * set ppdu info done.
  3742. */
  3743. if (s_ppdu_info)
  3744. s_ppdu_info->done = 1;
  3745. }
  3746. }
  3747. }
  3748. if (ppdu_info) {
  3749. if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
  3750. /**
  3751. * if we get tlv_type that is already been processed
  3752. * for ppdu, that means we got a new ppdu with same
  3753. * ppdu id. Hence Flush the older ppdu
  3754. * for MUMIMO and OFDMA, In a PPDU we have
  3755. * multiple user with same tlv types. tlv bitmap is
  3756. * used to check whether SU or MU_MIMO/OFDMA
  3757. */
  3758. if (!(ppdu_info->tlv_bitmap &
  3759. (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
  3760. return ppdu_info;
  3761. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  3762. qdf_nbuf_data(ppdu_info->nbuf);
  3763. /**
  3764. * apart from ACK BA STATUS TLV rest all comes in order
  3765. * so if tlv type not ACK BA STATUS TLV we can deliver
  3766. * ppdu_info
  3767. */
  3768. if ((tlv_type ==
  3769. HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
  3770. (ppdu_desc->htt_frame_type ==
  3771. HTT_STATS_FTYPE_SGEN_MU_BAR))
  3772. return ppdu_info;
  3773. dp_tx_ppdu_desc_deliver(pdev, ppdu_info);
  3774. } else {
  3775. return ppdu_info;
  3776. }
  3777. }
  3778. /**
  3779. * Flush the head ppdu descriptor if ppdu desc list reaches max
  3780. * threshold
  3781. */
  3782. if (mon_pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
  3783. ppdu_info = TAILQ_FIRST(&mon_pdev->ppdu_info_list);
  3784. TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
  3785. ppdu_info, ppdu_info_list_elem);
  3786. mon_pdev->list_depth--;
  3787. pdev->stats.ppdu_drop++;
  3788. qdf_nbuf_free(ppdu_info->nbuf);
  3789. ppdu_info->nbuf = NULL;
  3790. qdf_mem_free(ppdu_info);
  3791. }
  3792. size = sizeof(struct cdp_tx_completion_ppdu) +
  3793. (max_users * sizeof(struct cdp_tx_completion_ppdu_user));
  3794. /*
  3795. * Allocate new ppdu_info node
  3796. */
  3797. ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
  3798. if (!ppdu_info)
  3799. return NULL;
  3800. ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, size,
  3801. 0, 4, TRUE);
  3802. if (!ppdu_info->nbuf) {
  3803. qdf_mem_free(ppdu_info);
  3804. return NULL;
  3805. }
  3806. ppdu_info->ppdu_desc =
  3807. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  3808. qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), size);
  3809. if (!qdf_nbuf_put_tail(ppdu_info->nbuf, size)) {
  3810. dp_mon_err("No tailroom for HTT PPDU");
  3811. qdf_nbuf_free(ppdu_info->nbuf);
  3812. ppdu_info->nbuf = NULL;
  3813. ppdu_info->last_user = 0;
  3814. qdf_mem_free(ppdu_info);
  3815. return NULL;
  3816. }
  3817. ppdu_info->ppdu_desc->max_users = max_users;
  3818. ppdu_info->tsf_l32 = tsf_l32;
  3819. /**
  3820. * No lock is needed because all PPDU TLVs are processed in
  3821. * same context and this list is updated in same context
  3822. */
  3823. TAILQ_INSERT_TAIL(&mon_pdev->ppdu_info_list, ppdu_info,
  3824. ppdu_info_list_elem);
  3825. mon_pdev->list_depth++;
  3826. return ppdu_info;
  3827. }
  3828. /**
  3829. * dp_htt_process_tlv(): Function to process each PPDU TLVs
  3830. * @pdev: DP pdev handle
  3831. * @htt_t2h_msg: HTT target to host message
  3832. *
  3833. * return: ppdu_info per ppdu tlv structure
  3834. */
  3835. static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
  3836. qdf_nbuf_t htt_t2h_msg)
  3837. {
  3838. uint32_t length;
  3839. uint32_t ppdu_id;
  3840. uint8_t tlv_type;
  3841. uint32_t tlv_length, tlv_bitmap_expected;
  3842. uint8_t *tlv_buf;
  3843. struct ppdu_info *ppdu_info = NULL;
  3844. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  3845. uint8_t max_users = CDP_MU_MAX_USERS;
  3846. uint32_t tsf_l32;
  3847. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3848. uint32_t *msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
  3849. length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
  3850. msg_word = msg_word + 1;
  3851. ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
  3852. msg_word = msg_word + 1;
  3853. tsf_l32 = (uint32_t)(*msg_word);
  3854. msg_word = msg_word + 2;
  3855. while (length > 0) {
  3856. tlv_buf = (uint8_t *)msg_word;
  3857. tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
  3858. tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
  3859. if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
  3860. pdev->stats.ppdu_stats_counter[tlv_type]++;
  3861. if (tlv_length == 0)
  3862. break;
  3863. tlv_length += HTT_TLV_HDR_LEN;
  3864. /**
  3865. * Not allocating separate ppdu descriptor for MGMT Payload
  3866. * TLV as this is sent as separate WDI indication and it
  3867. * doesn't contain any ppdu information
  3868. */
  3869. if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
  3870. mon_pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
  3871. mon_pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
  3872. mon_pdev->mgmtctrl_frm_info.mgmt_buf_len =
  3873. HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET
  3874. (*(msg_word + 1));
  3875. msg_word =
  3876. (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
  3877. length -= (tlv_length);
  3878. continue;
  3879. }
  3880. /*
  3881. * retrieve max_users if it's USERS_INFO,
  3882. * else, it's 1 for COMPLTN_FLUSH,
  3883. * else, use CDP_MU_MAX_USERS
  3884. */
  3885. if (tlv_type == HTT_PPDU_STATS_USERS_INFO_TLV) {
  3886. max_users =
  3887. HTT_PPDU_STATS_USERS_INFO_TLV_MAX_USERS_GET(*(msg_word + 1));
  3888. } else if (tlv_type == HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) {
  3889. max_users = 1;
  3890. }
  3891. ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type,
  3892. tsf_l32, max_users);
  3893. if (!ppdu_info)
  3894. return NULL;
  3895. ppdu_info->ppdu_id = ppdu_id;
  3896. ppdu_info->tlv_bitmap |= (1 << tlv_type);
  3897. dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
  3898. /**
  3899. * Increment pdev level tlv count to monitor
  3900. * missing TLVs
  3901. */
  3902. mon_pdev->tlv_count++;
  3903. ppdu_info->last_tlv_cnt = mon_pdev->tlv_count;
  3904. msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
  3905. length -= (tlv_length);
  3906. }
  3907. if (!ppdu_info)
  3908. return NULL;
  3909. mon_pdev->last_ppdu_id = ppdu_id;
  3910. tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
  3911. if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode ||
  3912. mon_pdev->tx_capture_enabled) {
  3913. if (ppdu_info->is_ampdu)
  3914. tlv_bitmap_expected =
  3915. dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
  3916. ppdu_info->tlv_bitmap);
  3917. }
  3918. ppdu_desc = ppdu_info->ppdu_desc;
  3919. if (!ppdu_desc)
  3920. return NULL;
  3921. if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
  3922. HTT_PPDU_STATS_USER_STATUS_OK) {
  3923. tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
  3924. }
  3925. /*
  3926. * for frame type DATA and BAR, we update stats based on MSDU,
  3927. * successful msdu and mpdu are populate from ACK BA STATUS TLV
  3928. * which comes out of order. successful mpdu also populated from
  3929. * COMPLTN COMMON TLV which comes in order. for every ppdu_info
  3930. * we store successful mpdu from both tlv and compare before delivering
  3931. * to make sure we received ACK BA STATUS TLV. For some self generated
  3932. * frame we won't get ack ba status tlv so no need to wait for
  3933. * ack ba status tlv.
  3934. */
  3935. if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL &&
  3936. ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) {
  3937. /*
  3938. * most of the time bar frame will have duplicate ack ba
  3939. * status tlv
  3940. */
  3941. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR &&
  3942. (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv))
  3943. return NULL;
  3944. /*
  3945. * For data frame, compltn common tlv should match ack ba status
  3946. * tlv and completion status. Reason we are checking first user
  3947. * for ofdma, completion seen at next MU BAR frm, for mimo
  3948. * only for first user completion will be immediate.
  3949. */
  3950. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
  3951. (ppdu_desc->user[0].completion_status == 0 &&
  3952. (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)))
  3953. return NULL;
  3954. }
  3955. /**
  3956. * Once all the TLVs for a given PPDU has been processed,
  3957. * return PPDU status to be delivered to higher layer.
  3958. * tlv_bitmap_expected can't be available for different frame type.
  3959. * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu.
  3960. * apart from ACK BA TLV, FW sends other TLV in sequential order.
  3961. * flush tlv comes separate.
  3962. */
  3963. if ((ppdu_info->tlv_bitmap != 0 &&
  3964. (ppdu_info->tlv_bitmap &
  3965. (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) ||
  3966. (ppdu_info->tlv_bitmap &
  3967. (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) {
  3968. ppdu_info->done = 1;
  3969. return ppdu_info;
  3970. }
  3971. return NULL;
  3972. }
  3973. #endif /* QCA_ENHANCED_STATS_SUPPORT */
  3974. #ifdef QCA_ENHANCED_STATS_SUPPORT
  3975. /**
  3976. * dp_tx_ppdu_stats_feat_enable_check() - Check if feature(s) is enabled to
  3977. * consume stats received from FW via HTT
  3978. * @pdev: Datapath pdev handle
  3979. *
  3980. * Return: void
  3981. */
  3982. static bool dp_tx_ppdu_stats_feat_enable_check(struct dp_pdev *pdev)
  3983. {
  3984. struct dp_soc *soc = pdev->soc;
  3985. struct dp_mon_ops *mon_ops = NULL;
  3986. mon_ops = dp_mon_ops_get(soc);
  3987. if (mon_ops && mon_ops->mon_ppdu_stats_feat_enable_check)
  3988. return mon_ops->mon_ppdu_stats_feat_enable_check(pdev);
  3989. else
  3990. return false;
  3991. }
  3992. #endif
  3993. #if defined(WDI_EVENT_ENABLE)
  3994. #ifdef QCA_ENHANCED_STATS_SUPPORT
  3995. /**
  3996. * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
  3997. * @soc: DP SOC handle
  3998. * @pdev_id: pdev id
  3999. * @htt_t2h_msg: HTT message nbuf
  4000. *
  4001. * return:void
  4002. */
  4003. static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
  4004. uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
  4005. {
  4006. struct dp_pdev *pdev;
  4007. struct ppdu_info *ppdu_info = NULL;
  4008. bool free_buf = true;
  4009. struct dp_mon_pdev *mon_pdev;
  4010. if (pdev_id >= MAX_PDEV_CNT)
  4011. return true;
  4012. pdev = soc->pdev_list[pdev_id];
  4013. if (!pdev)
  4014. return true;
  4015. mon_pdev = pdev->monitor_pdev;
  4016. if (!mon_pdev)
  4017. return true;
  4018. if (!dp_tx_ppdu_stats_feat_enable_check(pdev))
  4019. return free_buf;
  4020. qdf_spin_lock_bh(&mon_pdev->ppdu_stats_lock);
  4021. ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
  4022. if (mon_pdev->mgmtctrl_frm_info.mgmt_buf) {
  4023. if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
  4024. (pdev, htt_t2h_msg, mon_pdev->mgmtctrl_frm_info.ppdu_id) !=
  4025. QDF_STATUS_SUCCESS)
  4026. free_buf = false;
  4027. }
  4028. if (ppdu_info)
  4029. dp_tx_ppdu_desc_deliver(pdev, ppdu_info);
  4030. mon_pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
  4031. mon_pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
  4032. mon_pdev->mgmtctrl_frm_info.ppdu_id = 0;
  4033. qdf_spin_unlock_bh(&mon_pdev->ppdu_stats_lock);
  4034. return free_buf;
  4035. }
  4036. #else
  4037. static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
  4038. uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
  4039. {
  4040. return true;
  4041. }
  4042. #endif/* QCA_ENHANCED_STATS_SUPPORT */
  4043. #endif
  4044. #if defined(WDI_EVENT_ENABLE) &&\
  4045. (defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
  4046. /*
  4047. * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
  4048. * @htt_soc: HTT SOC handle
  4049. * @msg_word: Pointer to payload
  4050. * @htt_t2h_msg: HTT msg nbuf
  4051. *
  4052. * Return: True if buffer should be freed by caller.
  4053. */
  4054. bool
  4055. dp_ppdu_stats_ind_handler(struct htt_soc *soc,
  4056. uint32_t *msg_word,
  4057. qdf_nbuf_t htt_t2h_msg)
  4058. {
  4059. u_int8_t pdev_id;
  4060. u_int8_t target_pdev_id;
  4061. bool free_buf;
  4062. target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
  4063. pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
  4064. target_pdev_id);
  4065. dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
  4066. htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
  4067. pdev_id);
  4068. free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
  4069. htt_t2h_msg);
  4070. return free_buf;
  4071. }
  4072. #endif
  4073. void
  4074. dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor)
  4075. {
  4076. pdev->monitor_pdev->rx_mon_recv_status.bsscolor = bsscolor;
  4077. }
  4078. bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle)
  4079. {
  4080. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4081. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4082. if ((mon_pdev->fp_data_filter & FILTER_DATA_UCAST) ||
  4083. (mon_pdev->mo_data_filter & FILTER_DATA_UCAST))
  4084. return true;
  4085. return false;
  4086. }
  4087. bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle)
  4088. {
  4089. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4090. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4091. if ((mon_pdev->fp_data_filter & FILTER_DATA_MCAST) ||
  4092. (mon_pdev->mo_data_filter & FILTER_DATA_MCAST))
  4093. return true;
  4094. return false;
  4095. }
  4096. bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle)
  4097. {
  4098. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4099. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4100. if ((mon_pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
  4101. (mon_pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
  4102. if ((mon_pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
  4103. (mon_pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
  4104. return true;
  4105. }
  4106. }
  4107. return false;
  4108. }
  4109. QDF_STATUS dp_mon_soc_cfg_init(struct dp_soc *soc)
  4110. {
  4111. int target_type;
  4112. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4113. struct cdp_mon_ops *cdp_ops;
  4114. cdp_ops = dp_mon_cdp_ops_get(soc);
  4115. target_type = hal_get_target_type(soc->hal_soc);
  4116. switch (target_type) {
  4117. case TARGET_TYPE_QCA6290:
  4118. case TARGET_TYPE_QCA6390:
  4119. case TARGET_TYPE_QCA6490:
  4120. case TARGET_TYPE_QCA6750:
  4121. case TARGET_TYPE_KIWI:
  4122. /* do nothing */
  4123. break;
  4124. case TARGET_TYPE_QCA8074:
  4125. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4126. MON_BUF_MIN_ENTRIES);
  4127. break;
  4128. case TARGET_TYPE_QCA8074V2:
  4129. case TARGET_TYPE_QCA6018:
  4130. case TARGET_TYPE_QCA9574:
  4131. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4132. MON_BUF_MIN_ENTRIES);
  4133. mon_soc->hw_nac_monitor_support = 1;
  4134. break;
  4135. case TARGET_TYPE_QCN9000:
  4136. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4137. MON_BUF_MIN_ENTRIES);
  4138. mon_soc->hw_nac_monitor_support = 1;
  4139. if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE)) {
  4140. if (cdp_ops && cdp_ops->config_full_mon_mode)
  4141. cdp_ops->config_full_mon_mode((struct cdp_soc_t *)soc, 1);
  4142. }
  4143. break;
  4144. case TARGET_TYPE_QCA5018:
  4145. case TARGET_TYPE_QCN6122:
  4146. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4147. MON_BUF_MIN_ENTRIES);
  4148. mon_soc->hw_nac_monitor_support = 1;
  4149. break;
  4150. case TARGET_TYPE_QCN9224:
  4151. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4152. MON_BUF_MIN_ENTRIES);
  4153. mon_soc->hw_nac_monitor_support = 1;
  4154. mon_soc->monitor_mode_v2 = 1;
  4155. break;
  4156. default:
  4157. dp_mon_info("%s: Unknown tgt type %d\n", __func__, target_type);
  4158. qdf_assert_always(0);
  4159. break;
  4160. }
  4161. dp_mon_info("hw_nac_monitor_support = %d",
  4162. mon_soc->hw_nac_monitor_support);
  4163. return QDF_STATUS_SUCCESS;
  4164. }
  4165. /**
  4166. * dp_mon_pdev_per_target_config() - Target specific monitor pdev configuration
  4167. * @pdev: PDEV handle [Should be valid]
  4168. *
  4169. * Return: None
  4170. */
  4171. static void dp_mon_pdev_per_target_config(struct dp_pdev *pdev)
  4172. {
  4173. struct dp_soc *soc = pdev->soc;
  4174. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4175. int target_type;
  4176. target_type = hal_get_target_type(soc->hal_soc);
  4177. switch (target_type) {
  4178. case TARGET_TYPE_KIWI:
  4179. mon_pdev->is_tlv_hdr_64_bit = true;
  4180. break;
  4181. default:
  4182. mon_pdev->is_tlv_hdr_64_bit = false;
  4183. break;
  4184. }
  4185. }
  4186. QDF_STATUS dp_mon_pdev_attach(struct dp_pdev *pdev)
  4187. {
  4188. struct dp_soc *soc;
  4189. struct dp_mon_pdev *mon_pdev;
  4190. struct dp_mon_ops *mon_ops;
  4191. qdf_size_t mon_pdev_context_size;
  4192. if (!pdev) {
  4193. dp_mon_err("pdev is NULL");
  4194. goto fail0;
  4195. }
  4196. soc = pdev->soc;
  4197. mon_pdev_context_size = soc->arch_ops.txrx_get_mon_context_size(DP_CONTEXT_TYPE_MON_PDEV);
  4198. mon_pdev = dp_context_alloc_mem(soc, DP_MON_PDEV_TYPE, mon_pdev_context_size);
  4199. if (!mon_pdev) {
  4200. dp_mon_err("%pK: MONITOR pdev allocation failed", pdev);
  4201. goto fail0;
  4202. }
  4203. pdev->monitor_pdev = mon_pdev;
  4204. mon_ops = dp_mon_ops_get(pdev->soc);
  4205. if (!mon_ops) {
  4206. dp_mon_err("%pK: Invalid monitor ops", pdev);
  4207. goto fail1;
  4208. }
  4209. if (mon_ops->mon_pdev_alloc) {
  4210. if (mon_ops->mon_pdev_alloc(pdev)) {
  4211. dp_mon_err("%pK: MONITOR pdev alloc failed", pdev);
  4212. goto fail1;
  4213. }
  4214. }
  4215. if (mon_ops->mon_rings_alloc) {
  4216. if (mon_ops->mon_rings_alloc(pdev)) {
  4217. dp_mon_err("%pK: MONITOR rings setup failed", pdev);
  4218. goto fail2;
  4219. }
  4220. }
  4221. /* Rx monitor mode specific init */
  4222. if (mon_ops->rx_mon_desc_pool_alloc) {
  4223. if (mon_ops->rx_mon_desc_pool_alloc(pdev)) {
  4224. dp_mon_err("%pK: dp_rx_pdev_mon_attach failed", pdev);
  4225. goto fail3;
  4226. }
  4227. }
  4228. pdev->monitor_pdev = mon_pdev;
  4229. dp_mon_pdev_per_target_config(pdev);
  4230. return QDF_STATUS_SUCCESS;
  4231. fail3:
  4232. if (mon_ops->mon_rings_free)
  4233. mon_ops->mon_rings_free(pdev);
  4234. fail2:
  4235. if (mon_ops->mon_pdev_free)
  4236. mon_ops->mon_pdev_free(pdev);
  4237. fail1:
  4238. pdev->monitor_pdev = NULL;
  4239. qdf_mem_free(mon_pdev);
  4240. fail0:
  4241. return QDF_STATUS_E_NOMEM;
  4242. }
  4243. QDF_STATUS dp_mon_pdev_detach(struct dp_pdev *pdev)
  4244. {
  4245. struct dp_mon_pdev *mon_pdev;
  4246. struct dp_mon_ops *mon_ops = NULL;
  4247. if (!pdev) {
  4248. dp_mon_err("pdev is NULL");
  4249. return QDF_STATUS_E_FAILURE;
  4250. }
  4251. mon_pdev = pdev->monitor_pdev;
  4252. if (!mon_pdev) {
  4253. dp_mon_err("Monitor pdev is NULL");
  4254. return QDF_STATUS_E_FAILURE;
  4255. }
  4256. mon_ops = dp_mon_ops_get(pdev->soc);
  4257. if (!mon_ops) {
  4258. dp_mon_err("Monitor ops is NULL");
  4259. return QDF_STATUS_E_FAILURE;
  4260. }
  4261. if (mon_ops->rx_mon_desc_pool_free)
  4262. mon_ops->rx_mon_desc_pool_free(pdev);
  4263. if (mon_ops->mon_rings_free)
  4264. mon_ops->mon_rings_free(pdev);
  4265. if (mon_ops->mon_pdev_free)
  4266. mon_ops->mon_pdev_free(pdev);
  4267. qdf_mem_free(mon_pdev);
  4268. pdev->monitor_pdev = NULL;
  4269. return QDF_STATUS_SUCCESS;
  4270. }
  4271. QDF_STATUS dp_mon_pdev_init(struct dp_pdev *pdev)
  4272. {
  4273. struct dp_soc *soc;
  4274. struct dp_mon_pdev *mon_pdev;
  4275. struct dp_mon_ops *mon_ops = NULL;
  4276. if (!pdev) {
  4277. dp_mon_err("pdev is NULL");
  4278. return QDF_STATUS_E_FAILURE;
  4279. }
  4280. soc = pdev->soc;
  4281. mon_pdev = pdev->monitor_pdev;
  4282. mon_pdev->invalid_mon_peer = qdf_mem_malloc(sizeof(struct dp_mon_peer));
  4283. if (!mon_pdev->invalid_mon_peer) {
  4284. dp_mon_err("%pK: Memory allocation failed for invalid "
  4285. "monitor peer", pdev);
  4286. return QDF_STATUS_E_NOMEM;
  4287. }
  4288. mon_ops = dp_mon_ops_get(pdev->soc);
  4289. if (!mon_ops) {
  4290. dp_mon_err("Monitor ops is NULL");
  4291. goto fail0;
  4292. }
  4293. mon_pdev->filter = dp_mon_filter_alloc(mon_pdev);
  4294. if (!mon_pdev->filter) {
  4295. dp_mon_err("%pK: Memory allocation failed for monitor filter",
  4296. pdev);
  4297. goto fail0;
  4298. }
  4299. if (mon_ops->tx_mon_filter_alloc) {
  4300. if (mon_ops->tx_mon_filter_alloc(pdev)) {
  4301. dp_mon_err("%pK: Memory allocation failed for tx monitor "
  4302. "filter", pdev);
  4303. goto fail1;
  4304. }
  4305. }
  4306. qdf_spinlock_create(&mon_pdev->ppdu_stats_lock);
  4307. qdf_spinlock_create(&mon_pdev->neighbour_peer_mutex);
  4308. mon_pdev->monitor_configured = false;
  4309. mon_pdev->mon_chan_band = REG_BAND_UNKNOWN;
  4310. TAILQ_INIT(&mon_pdev->neighbour_peers_list);
  4311. mon_pdev->neighbour_peers_added = false;
  4312. mon_pdev->monitor_configured = false;
  4313. /* Monitor filter init */
  4314. mon_pdev->mon_filter_mode = MON_FILTER_ALL;
  4315. mon_pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
  4316. mon_pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
  4317. mon_pdev->fp_data_filter = FILTER_DATA_ALL;
  4318. mon_pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
  4319. mon_pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
  4320. mon_pdev->mo_data_filter = FILTER_DATA_ALL;
  4321. /*
  4322. * initialize ppdu tlv list
  4323. */
  4324. TAILQ_INIT(&mon_pdev->ppdu_info_list);
  4325. TAILQ_INIT(&mon_pdev->sched_comp_ppdu_list);
  4326. mon_pdev->list_depth = 0;
  4327. mon_pdev->tlv_count = 0;
  4328. /* initlialize cal client timer */
  4329. dp_cal_client_attach(&mon_pdev->cal_client_ctx,
  4330. dp_pdev_to_cdp_pdev(pdev),
  4331. pdev->soc->osdev,
  4332. &dp_iterate_update_peer_list);
  4333. if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
  4334. goto fail2;
  4335. if (mon_ops->mon_lite_mon_alloc) {
  4336. if (mon_ops->mon_lite_mon_alloc(pdev) != QDF_STATUS_SUCCESS) {
  4337. dp_mon_err("%pK: lite mon alloc failed", pdev);
  4338. goto fail3;
  4339. }
  4340. }
  4341. if (mon_ops->mon_rings_init) {
  4342. if (mon_ops->mon_rings_init(pdev)) {
  4343. dp_mon_err("%pK: MONITOR rings setup failed", pdev);
  4344. goto fail4;
  4345. }
  4346. }
  4347. /* initialize sw monitor rx descriptors */
  4348. if (mon_ops->rx_mon_desc_pool_init)
  4349. mon_ops->rx_mon_desc_pool_init(pdev);
  4350. /* allocate buffers and replenish the monitor RxDMA ring */
  4351. if (mon_ops->rx_mon_buffers_alloc) {
  4352. if (mon_ops->rx_mon_buffers_alloc(pdev)) {
  4353. dp_mon_err("%pK: rx mon buffers alloc failed", pdev);
  4354. goto fail5;
  4355. }
  4356. }
  4357. /* attach monitor function */
  4358. dp_monitor_tx_ppdu_stats_attach(pdev);
  4359. /* mon pdev extended init */
  4360. if (mon_ops->mon_pdev_ext_init)
  4361. mon_ops->mon_pdev_ext_init(pdev);
  4362. mon_pdev->is_dp_mon_pdev_initialized = true;
  4363. return QDF_STATUS_SUCCESS;
  4364. fail5:
  4365. if (mon_ops->rx_mon_desc_pool_deinit)
  4366. mon_ops->rx_mon_desc_pool_deinit(pdev);
  4367. if (mon_ops->mon_rings_deinit)
  4368. mon_ops->mon_rings_deinit(pdev);
  4369. fail4:
  4370. if (mon_ops->mon_lite_mon_dealloc)
  4371. mon_ops->mon_lite_mon_dealloc(pdev);
  4372. fail3:
  4373. dp_htt_ppdu_stats_detach(pdev);
  4374. fail2:
  4375. qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex);
  4376. qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock);
  4377. if (mon_ops->tx_mon_filter_dealloc)
  4378. mon_ops->tx_mon_filter_dealloc(pdev);
  4379. fail1:
  4380. dp_mon_filter_dealloc(mon_pdev);
  4381. fail0:
  4382. qdf_mem_free(mon_pdev->invalid_mon_peer);
  4383. return QDF_STATUS_E_FAILURE;
  4384. }
  4385. QDF_STATUS dp_mon_pdev_deinit(struct dp_pdev *pdev)
  4386. {
  4387. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4388. struct dp_mon_ops *mon_ops = NULL;
  4389. mon_ops = dp_mon_ops_get(pdev->soc);
  4390. if (!mon_ops) {
  4391. dp_mon_err("Monitor ops is NULL");
  4392. return QDF_STATUS_E_FAILURE;
  4393. }
  4394. if (!mon_pdev->is_dp_mon_pdev_initialized)
  4395. return QDF_STATUS_SUCCESS;
  4396. dp_mon_filters_reset(pdev);
  4397. /* detach monitor function */
  4398. dp_monitor_tx_ppdu_stats_detach(pdev);
  4399. if (mon_ops->rx_mon_buffers_free)
  4400. mon_ops->rx_mon_buffers_free(pdev);
  4401. if (mon_ops->rx_mon_desc_pool_deinit)
  4402. mon_ops->rx_mon_desc_pool_deinit(pdev);
  4403. if (mon_ops->mon_rings_deinit)
  4404. mon_ops->mon_rings_deinit(pdev);
  4405. dp_cal_client_detach(&mon_pdev->cal_client_ctx);
  4406. if (mon_ops->mon_lite_mon_dealloc)
  4407. mon_ops->mon_lite_mon_dealloc(pdev);
  4408. dp_htt_ppdu_stats_detach(pdev);
  4409. qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock);
  4410. dp_neighbour_peers_detach(pdev);
  4411. dp_pktlogmod_exit(pdev);
  4412. if (mon_ops->tx_mon_filter_dealloc)
  4413. mon_ops->tx_mon_filter_dealloc(pdev);
  4414. if (mon_pdev->filter)
  4415. dp_mon_filter_dealloc(mon_pdev);
  4416. if (mon_ops->mon_rings_deinit)
  4417. mon_ops->mon_rings_deinit(pdev);
  4418. if (mon_pdev->invalid_mon_peer)
  4419. qdf_mem_free(mon_pdev->invalid_mon_peer);
  4420. mon_pdev->is_dp_mon_pdev_initialized = false;
  4421. return QDF_STATUS_SUCCESS;
  4422. }
  4423. QDF_STATUS dp_mon_vdev_attach(struct dp_vdev *vdev)
  4424. {
  4425. struct dp_mon_vdev *mon_vdev;
  4426. struct dp_pdev *pdev = vdev->pdev;
  4427. mon_vdev = (struct dp_mon_vdev *)qdf_mem_malloc(sizeof(*mon_vdev));
  4428. if (!mon_vdev) {
  4429. dp_mon_err("%pK: Monitor vdev allocation failed", vdev);
  4430. return QDF_STATUS_E_NOMEM;
  4431. }
  4432. if (pdev->monitor_pdev->scan_spcl_vap_configured)
  4433. dp_scan_spcl_vap_stats_attach(mon_vdev);
  4434. vdev->monitor_vdev = mon_vdev;
  4435. return QDF_STATUS_SUCCESS;
  4436. }
  4437. QDF_STATUS dp_mon_vdev_detach(struct dp_vdev *vdev)
  4438. {
  4439. struct dp_mon_vdev *mon_vdev = vdev->monitor_vdev;
  4440. struct dp_pdev *pdev = vdev->pdev;
  4441. struct dp_mon_ops *mon_ops = dp_mon_ops_get(pdev->soc);
  4442. if (!mon_ops)
  4443. return QDF_STATUS_E_FAILURE;
  4444. if (!mon_vdev)
  4445. return QDF_STATUS_E_FAILURE;
  4446. if (pdev->monitor_pdev->scan_spcl_vap_configured)
  4447. dp_scan_spcl_vap_stats_detach(mon_vdev);
  4448. qdf_mem_free(mon_vdev);
  4449. vdev->monitor_vdev = NULL;
  4450. /* set mvdev to NULL only if detach is called for monitor/special vap
  4451. */
  4452. if (pdev->monitor_pdev->mvdev == vdev)
  4453. pdev->monitor_pdev->mvdev = NULL;
  4454. if (mon_ops->mon_lite_mon_vdev_delete)
  4455. mon_ops->mon_lite_mon_vdev_delete(pdev, vdev);
  4456. return QDF_STATUS_SUCCESS;
  4457. }
  4458. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  4459. /**
  4460. * dp_mon_peer_attach_notify() - Raise WDI event for peer create
  4461. * @peer: DP Peer handle
  4462. *
  4463. * Return: none
  4464. */
  4465. static inline
  4466. void dp_mon_peer_attach_notify(struct dp_peer *peer)
  4467. {
  4468. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  4469. struct dp_pdev *pdev;
  4470. struct dp_soc *soc;
  4471. struct cdp_peer_cookie peer_cookie;
  4472. pdev = peer->vdev->pdev;
  4473. soc = pdev->soc;
  4474. qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
  4475. QDF_MAC_ADDR_SIZE);
  4476. peer_cookie.ctx = NULL;
  4477. peer_cookie.pdev_id = pdev->pdev_id;
  4478. peer_cookie.cookie = pdev->next_peer_cookie++;
  4479. dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, soc,
  4480. (void *)&peer_cookie,
  4481. peer->peer_id, WDI_NO_VAL, pdev->pdev_id);
  4482. if (soc->peerstats_enabled) {
  4483. if (!peer_cookie.ctx) {
  4484. pdev->next_peer_cookie--;
  4485. qdf_err("Failed to initialize peer rate stats");
  4486. mon_peer->peerstats_ctx = NULL;
  4487. } else {
  4488. mon_peer->peerstats_ctx =
  4489. (struct cdp_peer_rate_stats_ctx *)
  4490. peer_cookie.ctx;
  4491. }
  4492. }
  4493. }
  4494. /**
  4495. * dp_mon_peer_detach_notify() - Raise WDI event for peer destroy
  4496. * @peer: DP Peer handle
  4497. *
  4498. * Return: none
  4499. */
  4500. static inline
  4501. void dp_mon_peer_detach_notify(struct dp_peer *peer)
  4502. {
  4503. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  4504. struct dp_pdev *pdev;
  4505. struct dp_soc *soc;
  4506. struct cdp_peer_cookie peer_cookie;
  4507. pdev = peer->vdev->pdev;
  4508. soc = pdev->soc;
  4509. /* send peer destroy event to upper layer */
  4510. qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
  4511. QDF_MAC_ADDR_SIZE);
  4512. peer_cookie.ctx = NULL;
  4513. peer_cookie.ctx = (struct cdp_stats_cookie *)mon_peer->peerstats_ctx;
  4514. dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
  4515. soc,
  4516. (void *)&peer_cookie,
  4517. peer->peer_id,
  4518. WDI_NO_VAL,
  4519. pdev->pdev_id);
  4520. mon_peer->peerstats_ctx = NULL;
  4521. }
  4522. #else
  4523. static inline
  4524. void dp_mon_peer_attach_notify(struct dp_peer *peer)
  4525. {
  4526. peer->monitor_peer->peerstats_ctx = NULL;
  4527. }
  4528. static inline
  4529. void dp_mon_peer_detach_notify(struct dp_peer *peer)
  4530. {
  4531. peer->monitor_peer->peerstats_ctx = NULL;
  4532. }
  4533. #endif
  4534. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
  4535. QDF_STATUS dp_mon_peer_attach(struct dp_peer *peer)
  4536. {
  4537. struct dp_mon_peer *mon_peer;
  4538. struct dp_pdev *pdev;
  4539. mon_peer = (struct dp_mon_peer *)qdf_mem_malloc(sizeof(*mon_peer));
  4540. if (!mon_peer) {
  4541. dp_mon_err("%pK: MONITOR peer allocation failed", peer);
  4542. return QDF_STATUS_E_NOMEM;
  4543. }
  4544. peer->monitor_peer = mon_peer;
  4545. pdev = peer->vdev->pdev;
  4546. /*
  4547. * In tx_monitor mode, filter may be set for unassociated peer
  4548. * when unassociated peer get associated peer need to
  4549. * update tx_cap_enabled flag to support peer filter.
  4550. */
  4551. dp_monitor_peer_tx_capture_filter_check(pdev, peer);
  4552. DP_STATS_INIT(mon_peer);
  4553. DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR);
  4554. dp_mon_peer_attach_notify(peer);
  4555. return QDF_STATUS_SUCCESS;
  4556. }
  4557. #endif
  4558. QDF_STATUS dp_mon_peer_detach(struct dp_peer *peer)
  4559. {
  4560. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  4561. if (!mon_peer)
  4562. return QDF_STATUS_SUCCESS;
  4563. dp_mon_peer_detach_notify(peer);
  4564. qdf_mem_free(mon_peer);
  4565. peer->monitor_peer = NULL;
  4566. return QDF_STATUS_SUCCESS;
  4567. }
  4568. #ifndef DISABLE_MON_CONFIG
  4569. void dp_mon_register_intr_ops(struct dp_soc *soc)
  4570. {
  4571. struct dp_mon_ops *mon_ops = NULL;
  4572. mon_ops = dp_mon_ops_get(soc);
  4573. if (!mon_ops) {
  4574. dp_mon_err("Monitor ops is NULL");
  4575. return;
  4576. }
  4577. if (mon_ops->mon_register_intr_ops)
  4578. mon_ops->mon_register_intr_ops(soc);
  4579. }
  4580. #endif
  4581. struct cdp_peer_rate_stats_ctx *dp_mon_peer_get_peerstats_ctx(struct
  4582. dp_peer *peer)
  4583. {
  4584. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  4585. if (mon_peer)
  4586. return mon_peer->peerstats_ctx;
  4587. else
  4588. return NULL;
  4589. }
  4590. #ifdef QCA_ENHANCED_STATS_SUPPORT
  4591. void dp_mon_peer_reset_stats(struct dp_peer *peer)
  4592. {
  4593. struct dp_mon_peer *mon_peer = NULL;
  4594. mon_peer = peer->monitor_peer;
  4595. if (!mon_peer)
  4596. return;
  4597. DP_STATS_CLR(mon_peer);
  4598. DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR);
  4599. }
  4600. void dp_mon_peer_get_stats(struct dp_peer *peer, void *arg,
  4601. enum cdp_stat_update_type type)
  4602. {
  4603. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  4604. struct dp_mon_peer_stats *mon_peer_stats;
  4605. if (!mon_peer || !arg)
  4606. return;
  4607. mon_peer_stats = &mon_peer->stats;
  4608. switch (type) {
  4609. case UPDATE_PEER_STATS:
  4610. {
  4611. struct cdp_peer_stats *peer_stats =
  4612. (struct cdp_peer_stats *)arg;
  4613. DP_UPDATE_MON_STATS(peer_stats, mon_peer_stats);
  4614. break;
  4615. }
  4616. case UPDATE_VDEV_STATS:
  4617. {
  4618. struct cdp_vdev_stats *vdev_stats =
  4619. (struct cdp_vdev_stats *)arg;
  4620. DP_UPDATE_MON_STATS(vdev_stats, mon_peer_stats);
  4621. break;
  4622. }
  4623. default:
  4624. dp_mon_err("Invalid stats_update_type");
  4625. }
  4626. }
  4627. void dp_mon_invalid_peer_update_pdev_stats(struct dp_pdev *pdev)
  4628. {
  4629. struct dp_mon_peer *mon_peer;
  4630. struct dp_mon_peer_stats *mon_peer_stats;
  4631. struct cdp_pdev_stats *pdev_stats;
  4632. if (!pdev || !pdev->monitor_pdev)
  4633. return;
  4634. mon_peer = pdev->monitor_pdev->invalid_mon_peer;
  4635. if (!mon_peer)
  4636. return;
  4637. mon_peer_stats = &mon_peer->stats;
  4638. pdev_stats = &pdev->stats;
  4639. DP_UPDATE_MON_STATS(pdev_stats, mon_peer_stats);
  4640. }
  4641. QDF_STATUS
  4642. dp_mon_peer_get_stats_param(struct dp_peer *peer, enum cdp_peer_stats_type type,
  4643. cdp_peer_stats_param_t *buf)
  4644. {
  4645. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  4646. struct dp_mon_peer *mon_peer;
  4647. mon_peer = peer->monitor_peer;
  4648. if (!mon_peer)
  4649. return QDF_STATUS_E_FAILURE;
  4650. switch (type) {
  4651. case cdp_peer_tx_rate:
  4652. buf->tx_rate = mon_peer->stats.tx.tx_rate;
  4653. break;
  4654. case cdp_peer_tx_last_tx_rate:
  4655. buf->last_tx_rate = mon_peer->stats.tx.last_tx_rate;
  4656. break;
  4657. case cdp_peer_tx_ratecode:
  4658. buf->tx_ratecode = mon_peer->stats.tx.tx_ratecode;
  4659. break;
  4660. case cdp_peer_rx_rate:
  4661. buf->rx_rate = mon_peer->stats.rx.rx_rate;
  4662. break;
  4663. case cdp_peer_rx_last_rx_rate:
  4664. buf->last_rx_rate = mon_peer->stats.rx.last_rx_rate;
  4665. break;
  4666. case cdp_peer_rx_ratecode:
  4667. buf->rx_ratecode = mon_peer->stats.rx.rx_ratecode;
  4668. break;
  4669. case cdp_peer_rx_avg_snr:
  4670. buf->rx_avg_snr = mon_peer->stats.rx.avg_snr;
  4671. break;
  4672. case cdp_peer_rx_snr:
  4673. buf->rx_snr = mon_peer->stats.rx.snr;
  4674. break;
  4675. default:
  4676. dp_err("Invalid stats type requested");
  4677. ret = QDF_STATUS_E_FAILURE;
  4678. }
  4679. return ret;
  4680. }
  4681. #endif
  4682. void dp_mon_ops_register(struct dp_soc *soc)
  4683. {
  4684. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4685. uint32_t target_type;
  4686. target_type = hal_get_target_type(soc->hal_soc);
  4687. switch (target_type) {
  4688. case TARGET_TYPE_QCA6290:
  4689. case TARGET_TYPE_QCA6390:
  4690. case TARGET_TYPE_QCA6490:
  4691. case TARGET_TYPE_QCA6750:
  4692. case TARGET_TYPE_KIWI:
  4693. case TARGET_TYPE_QCA8074:
  4694. case TARGET_TYPE_QCA8074V2:
  4695. case TARGET_TYPE_QCA6018:
  4696. case TARGET_TYPE_QCA9574:
  4697. case TARGET_TYPE_QCN9000:
  4698. case TARGET_TYPE_QCA5018:
  4699. case TARGET_TYPE_QCN6122:
  4700. dp_mon_ops_register_1_0(mon_soc);
  4701. break;
  4702. case TARGET_TYPE_QCN9224:
  4703. #ifdef QCA_MONITOR_2_0_SUPPORT
  4704. dp_mon_ops_register_2_0(mon_soc);
  4705. #endif
  4706. break;
  4707. default:
  4708. dp_mon_err("%s: Unknown tgt type %d", __func__, target_type);
  4709. qdf_assert_always(0);
  4710. break;
  4711. }
  4712. }
  4713. #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
  4714. void dp_mon_ops_free(struct dp_soc *soc)
  4715. {
  4716. struct cdp_ops *ops = soc->cdp_soc.ops;
  4717. struct cdp_mon_ops *cdp_mon_ops = ops->mon_ops;
  4718. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4719. struct dp_mon_ops *mon_ops = mon_soc->mon_ops;
  4720. if (cdp_mon_ops)
  4721. qdf_mem_free(cdp_mon_ops);
  4722. if (mon_ops)
  4723. qdf_mem_free(mon_ops);
  4724. }
  4725. #else
  4726. void dp_mon_ops_free(struct dp_soc *soc)
  4727. {
  4728. }
  4729. #endif
  4730. void dp_mon_cdp_ops_register(struct dp_soc *soc)
  4731. {
  4732. struct cdp_ops *ops = soc->cdp_soc.ops;
  4733. uint32_t target_type;
  4734. if (!ops) {
  4735. dp_mon_err("cdp_ops is NULL");
  4736. return;
  4737. }
  4738. target_type = hal_get_target_type(soc->hal_soc);
  4739. switch (target_type) {
  4740. case TARGET_TYPE_QCA6290:
  4741. case TARGET_TYPE_QCA6390:
  4742. case TARGET_TYPE_QCA6490:
  4743. case TARGET_TYPE_QCA6750:
  4744. case TARGET_TYPE_KIWI:
  4745. case TARGET_TYPE_QCA8074:
  4746. case TARGET_TYPE_QCA8074V2:
  4747. case TARGET_TYPE_QCA6018:
  4748. case TARGET_TYPE_QCA9574:
  4749. case TARGET_TYPE_QCN9000:
  4750. case TARGET_TYPE_QCA5018:
  4751. case TARGET_TYPE_QCN6122:
  4752. dp_mon_cdp_ops_register_1_0(ops);
  4753. #ifdef ATH_SUPPORT_NAC_RSSI
  4754. ops->ctrl_ops->txrx_vdev_config_for_nac_rssi =
  4755. dp_config_for_nac_rssi;
  4756. ops->ctrl_ops->txrx_vdev_get_neighbour_rssi =
  4757. dp_vdev_get_neighbour_rssi;
  4758. #endif
  4759. #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
  4760. ops->ctrl_ops->txrx_update_filter_neighbour_peers =
  4761. dp_update_filter_neighbour_peers;
  4762. #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
  4763. break;
  4764. case TARGET_TYPE_QCN9224:
  4765. #ifdef QCA_MONITOR_2_0_SUPPORT
  4766. dp_mon_cdp_ops_register_2_0(ops);
  4767. #ifdef ATH_SUPPORT_NAC_RSSI
  4768. ops->ctrl_ops->txrx_vdev_config_for_nac_rssi =
  4769. dp_lite_mon_config_nac_rssi_peer;
  4770. ops->ctrl_ops->txrx_vdev_get_neighbour_rssi =
  4771. dp_lite_mon_get_nac_peer_rssi;
  4772. #endif
  4773. #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
  4774. ops->ctrl_ops->txrx_update_filter_neighbour_peers =
  4775. dp_lite_mon_config_nac_peer;
  4776. #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
  4777. #endif
  4778. break;
  4779. default:
  4780. dp_mon_err("%s: Unknown tgt type %d", __func__, target_type);
  4781. qdf_assert_always(0);
  4782. break;
  4783. }
  4784. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  4785. ops->cfr_ops->txrx_cfr_filter = dp_cfr_filter;
  4786. #endif
  4787. ops->cmn_drv_ops->txrx_set_monitor_mode = dp_vdev_set_monitor_mode;
  4788. ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev =
  4789. dp_get_mon_vdev_from_pdev_wifi3;
  4790. #ifdef DP_PEER_EXTENDED_API
  4791. ops->misc_ops->pkt_log_init = dp_pkt_log_init;
  4792. ops->misc_ops->pkt_log_con_service = dp_pkt_log_con_service;
  4793. ops->misc_ops->pkt_log_exit = dp_pkt_log_exit;
  4794. #endif
  4795. ops->ctrl_ops->enable_peer_based_pktlog =
  4796. dp_enable_peer_based_pktlog;
  4797. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  4798. ops->ctrl_ops->txrx_update_peer_pkt_capture_params =
  4799. dp_peer_update_pkt_capture_params;
  4800. #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
  4801. #ifdef QCA_ENHANCED_STATS_SUPPORT
  4802. ops->host_stats_ops->txrx_enable_enhanced_stats =
  4803. dp_enable_enhanced_stats;
  4804. ops->host_stats_ops->txrx_disable_enhanced_stats =
  4805. dp_disable_enhanced_stats;
  4806. #endif /* QCA_ENHANCED_STATS_SUPPORT */
  4807. #ifdef WDI_EVENT_ENABLE
  4808. ops->ctrl_ops->txrx_get_pldev = dp_get_pldev;
  4809. #endif
  4810. #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
  4811. ops->host_stats_ops->txrx_get_scan_spcl_vap_stats =
  4812. dp_get_scan_spcl_vap_stats;
  4813. #endif
  4814. return;
  4815. }
  4816. #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
  4817. static inline void
  4818. dp_mon_cdp_mon_ops_deregister(struct cdp_ops *ops)
  4819. {
  4820. if (ops->mon_ops) {
  4821. qdf_mem_free(ops->mon_ops);
  4822. ops->mon_ops = NULL;
  4823. }
  4824. }
  4825. #else
  4826. static inline void
  4827. dp_mon_cdp_mon_ops_deregister(struct cdp_ops *ops)
  4828. {
  4829. ops->mon_ops = NULL;
  4830. }
  4831. #endif
  4832. void dp_mon_cdp_ops_deregister(struct dp_soc *soc)
  4833. {
  4834. struct cdp_ops *ops = soc->cdp_soc.ops;
  4835. if (!ops) {
  4836. dp_mon_err("cdp_ops is NULL");
  4837. return;
  4838. }
  4839. dp_mon_cdp_mon_ops_deregister(ops);
  4840. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  4841. ops->cfr_ops->txrx_cfr_filter = NULL;
  4842. #endif
  4843. ops->cmn_drv_ops->txrx_set_monitor_mode = NULL;
  4844. ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev = NULL;
  4845. #ifdef DP_PEER_EXTENDED_API
  4846. ops->misc_ops->pkt_log_init = NULL;
  4847. ops->misc_ops->pkt_log_con_service = NULL;
  4848. ops->misc_ops->pkt_log_exit = NULL;
  4849. #endif
  4850. #ifdef ATH_SUPPORT_NAC_RSSI
  4851. ops->ctrl_ops->txrx_vdev_config_for_nac_rssi = NULL;
  4852. ops->ctrl_ops->txrx_vdev_get_neighbour_rssi = NULL;
  4853. #endif
  4854. #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
  4855. ops->ctrl_ops->txrx_update_filter_neighbour_peers = NULL;
  4856. #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
  4857. ops->ctrl_ops->enable_peer_based_pktlog = NULL;
  4858. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  4859. ops->ctrl_ops->txrx_update_peer_pkt_capture_params = NULL;
  4860. #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
  4861. #ifdef FEATURE_PERPKT_INFO
  4862. ops->host_stats_ops->txrx_enable_enhanced_stats = NULL;
  4863. ops->host_stats_ops->txrx_disable_enhanced_stats = NULL;
  4864. #endif /* FEATURE_PERPKT_INFO */
  4865. #ifdef WDI_EVENT_ENABLE
  4866. ops->ctrl_ops->txrx_get_pldev = NULL;
  4867. #endif
  4868. return;
  4869. }
  4870. void dp_mon_intr_ops_deregister(struct dp_soc *soc)
  4871. {
  4872. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4873. mon_soc->mon_rx_process = NULL;
  4874. }
  4875. void dp_mon_feature_ops_deregister(struct dp_soc *soc)
  4876. {
  4877. struct dp_mon_ops *mon_ops = dp_mon_ops_get(soc);
  4878. if (!mon_ops) {
  4879. dp_err("mon_ops is NULL");
  4880. return;
  4881. }
  4882. mon_ops->mon_config_debug_sniffer = NULL;
  4883. mon_ops->mon_peer_tx_init = NULL;
  4884. mon_ops->mon_peer_tx_cleanup = NULL;
  4885. mon_ops->mon_htt_ppdu_stats_attach = NULL;
  4886. mon_ops->mon_htt_ppdu_stats_detach = NULL;
  4887. mon_ops->mon_print_pdev_rx_mon_stats = NULL;
  4888. mon_ops->mon_set_bsscolor = NULL;
  4889. mon_ops->mon_pdev_get_filter_ucast_data = NULL;
  4890. mon_ops->mon_pdev_get_filter_mcast_data = NULL;
  4891. mon_ops->mon_pdev_get_filter_non_data = NULL;
  4892. mon_ops->mon_neighbour_peer_add_ast = NULL;
  4893. #ifdef WLAN_TX_PKT_CAPTURE_ENH
  4894. mon_ops->mon_peer_tid_peer_id_update = NULL;
  4895. mon_ops->mon_tx_ppdu_stats_attach = NULL;
  4896. mon_ops->mon_tx_ppdu_stats_detach = NULL;
  4897. mon_ops->mon_tx_capture_debugfs_init = NULL;
  4898. mon_ops->mon_tx_add_to_comp_queue = NULL;
  4899. mon_ops->mon_peer_tx_capture_filter_check = NULL;
  4900. mon_ops->mon_print_pdev_tx_capture_stats = NULL;
  4901. mon_ops->mon_config_enh_tx_capture = NULL;
  4902. #endif
  4903. #if defined(WDI_EVENT_ENABLE) &&\
  4904. (defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
  4905. mon_ops->mon_ppdu_stats_ind_handler = NULL;
  4906. #endif
  4907. #ifdef WLAN_RX_PKT_CAPTURE_ENH
  4908. mon_ops->mon_config_enh_rx_capture = NULL;
  4909. #endif
  4910. #ifdef QCA_SUPPORT_BPR
  4911. mon_ops->mon_set_bpr_enable = NULL;
  4912. #endif
  4913. #ifdef ATH_SUPPORT_NAC
  4914. mon_ops->mon_set_filter_neigh_peers = NULL;
  4915. #endif
  4916. #ifdef WLAN_ATF_ENABLE
  4917. mon_ops->mon_set_atf_stats_enable = NULL;
  4918. #endif
  4919. #ifdef FEATURE_NAC_RSSI
  4920. mon_ops->mon_filter_neighbour_peer = NULL;
  4921. #endif
  4922. #ifdef QCA_MCOPY_SUPPORT
  4923. mon_ops->mon_filter_setup_mcopy_mode = NULL;
  4924. mon_ops->mon_filter_reset_mcopy_mode = NULL;
  4925. mon_ops->mon_mcopy_check_deliver = NULL;
  4926. #endif
  4927. #ifdef QCA_ENHANCED_STATS_SUPPORT
  4928. mon_ops->mon_filter_setup_enhanced_stats = NULL;
  4929. mon_ops->mon_tx_enable_enhanced_stats = NULL;
  4930. mon_ops->mon_tx_disable_enhanced_stats = NULL;
  4931. mon_ops->mon_ppdu_desc_deliver = NULL;
  4932. mon_ops->mon_ppdu_desc_notify = NULL;
  4933. mon_ops->mon_ppdu_stats_feat_enable_check = NULL;
  4934. #ifdef WLAN_FEATURE_11BE
  4935. mon_ops->mon_tx_stats_update = NULL;
  4936. #endif
  4937. #endif
  4938. #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
  4939. mon_ops->mon_filter_setup_smart_monitor = NULL;
  4940. #endif
  4941. #ifdef WLAN_RX_PKT_CAPTURE_ENH
  4942. mon_ops->mon_filter_setup_rx_enh_capture = NULL;
  4943. #endif
  4944. #ifdef WDI_EVENT_ENABLE
  4945. mon_ops->mon_set_pktlog_wifi3 = NULL;
  4946. mon_ops->mon_filter_setup_rx_pkt_log_full = NULL;
  4947. mon_ops->mon_filter_reset_rx_pkt_log_full = NULL;
  4948. mon_ops->mon_filter_setup_rx_pkt_log_lite = NULL;
  4949. mon_ops->mon_filter_reset_rx_pkt_log_lite = NULL;
  4950. mon_ops->mon_filter_setup_rx_pkt_log_cbf = NULL;
  4951. mon_ops->mon_filter_reset_rx_pkt_log_cbf = NULL;
  4952. #ifdef BE_PKTLOG_SUPPORT
  4953. mon_ops->mon_filter_setup_pktlog_hybrid = NULL;
  4954. mon_ops->mon_filter_reset_pktlog_hybrid = NULL;
  4955. #endif
  4956. #endif
  4957. #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
  4958. mon_ops->mon_pktlogmod_exit = NULL;
  4959. #endif
  4960. mon_ops->rx_hdr_length_set = NULL;
  4961. mon_ops->rx_packet_length_set = NULL;
  4962. mon_ops->rx_wmask_subscribe = NULL;
  4963. mon_ops->rx_enable_mpdu_logging = NULL;
  4964. mon_ops->mon_neighbour_peers_detach = NULL;
  4965. mon_ops->mon_vdev_set_monitor_mode_buf_rings = NULL;
  4966. mon_ops->mon_vdev_set_monitor_mode_rings = NULL;
  4967. #ifdef QCA_ENHANCED_STATS_SUPPORT
  4968. mon_ops->mon_rx_stats_update = NULL;
  4969. mon_ops->mon_rx_populate_ppdu_usr_info = NULL;
  4970. mon_ops->mon_rx_populate_ppdu_info = NULL;
  4971. #endif
  4972. }
  4973. QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc)
  4974. {
  4975. struct dp_mon_soc *mon_soc;
  4976. if (!soc) {
  4977. dp_mon_err("dp_soc is NULL");
  4978. return QDF_STATUS_E_FAILURE;
  4979. }
  4980. mon_soc = (struct dp_mon_soc *)qdf_mem_malloc(sizeof(*mon_soc));
  4981. if (!mon_soc) {
  4982. dp_mon_err("%pK: mem allocation failed", soc);
  4983. return QDF_STATUS_E_NOMEM;
  4984. }
  4985. /* register monitor ops */
  4986. soc->monitor_soc = mon_soc;
  4987. dp_mon_ops_register(soc);
  4988. dp_mon_register_intr_ops(soc);
  4989. dp_mon_cdp_ops_register(soc);
  4990. dp_mon_register_feature_ops(soc);
  4991. return QDF_STATUS_SUCCESS;
  4992. }
  4993. QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc)
  4994. {
  4995. struct dp_mon_soc *mon_soc;
  4996. if (!soc) {
  4997. dp_mon_err("dp_soc is NULL");
  4998. return QDF_STATUS_E_FAILURE;
  4999. }
  5000. mon_soc = soc->monitor_soc;
  5001. dp_monitor_vdev_timer_deinit(soc);
  5002. dp_mon_cdp_ops_deregister(soc);
  5003. soc->monitor_soc = NULL;
  5004. qdf_mem_free(mon_soc);
  5005. return QDF_STATUS_SUCCESS;
  5006. }