dp_mon.c 167 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082
  1. /*
  2. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include <dp_types.h>
  18. #include "dp_rx.h"
  19. #include "dp_peer.h"
  20. #include <dp_htt.h>
  21. #include <dp_mon_filter.h>
  22. #include <dp_htt.h>
  23. #include <dp_mon.h>
  24. #include <dp_rx_mon.h>
  25. #include <dp_internal.h>
  26. #include "htt_ppdu_stats.h"
  27. #include "dp_cal_client_api.h"
  28. #if defined(DP_CON_MON)
  29. #ifndef REMOVE_PKT_LOG
  30. #include <pktlog_ac_api.h>
  31. #include <pktlog_ac.h>
  32. #endif
  33. #endif
  34. #ifdef FEATURE_PERPKT_INFO
  35. #include "dp_ratetable.h"
  36. #endif
  37. #ifdef QCA_SUPPORT_LITE_MONITOR
  38. #include "dp_lite_mon.h"
  39. #endif
  40. #define DP_INTR_POLL_TIMER_MS 5
  41. #define INVALID_FREE_BUFF 0xffffffff
  42. #ifdef WLAN_RX_PKT_CAPTURE_ENH
  43. #include "dp_rx_mon_feature.h"
  44. #endif /* WLAN_RX_PKT_CAPTURE_ENH */
  45. #ifdef QCA_UNDECODED_METADATA_SUPPORT
  46. #define MAX_STRING_LEN_PER_FIELD 6
  47. #define DP_UNDECODED_ERR_LENGTH (MAX_STRING_LEN_PER_FIELD * CDP_PHYRX_ERR_MAX)
  48. #endif
  49. #ifdef QCA_MCOPY_SUPPORT
  50. static inline void
  51. dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
  52. {
  53. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  54. mon_pdev->mcopy_mode = M_COPY_DISABLED;
  55. mon_pdev->mvdev = NULL;
  56. }
  57. static inline void
  58. dp_reset_mcopy_mode(struct dp_pdev *pdev)
  59. {
  60. QDF_STATUS status = QDF_STATUS_SUCCESS;
  61. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  62. struct cdp_mon_ops *cdp_ops;
  63. if (mon_pdev->mcopy_mode) {
  64. cdp_ops = dp_mon_cdp_ops_get(pdev->soc);
  65. if (cdp_ops && cdp_ops->config_full_mon_mode)
  66. cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
  67. DP_FULL_MON_ENABLE);
  68. dp_pdev_disable_mcopy_code(pdev);
  69. dp_mon_filter_reset_mcopy_mode(pdev);
  70. status = dp_mon_filter_update(pdev);
  71. if (status != QDF_STATUS_SUCCESS) {
  72. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  73. FL("Failed to reset AM copy mode filters"));
  74. }
  75. mon_pdev->monitor_configured = false;
  76. }
  77. }
  78. static QDF_STATUS
  79. dp_config_mcopy_mode(struct dp_pdev *pdev, int val)
  80. {
  81. QDF_STATUS status = QDF_STATUS_SUCCESS;
  82. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  83. struct dp_mon_ops *mon_ops;
  84. struct cdp_mon_ops *cdp_ops;
  85. if (mon_pdev->mvdev)
  86. return QDF_STATUS_E_RESOURCES;
  87. mon_pdev->mcopy_mode = val;
  88. mon_pdev->tx_sniffer_enable = 0;
  89. mon_pdev->monitor_configured = true;
  90. mon_ops = dp_mon_ops_get(pdev->soc);
  91. if (!wlan_cfg_is_delay_mon_replenish(pdev->soc->wlan_cfg_ctx)) {
  92. if (mon_ops && mon_ops->mon_vdev_set_monitor_mode_rings)
  93. mon_ops->mon_vdev_set_monitor_mode_rings(pdev, true);
  94. }
  95. /*
  96. * Setup the M copy mode filter.
  97. */
  98. cdp_ops = dp_mon_cdp_ops_get(pdev->soc);
  99. if (cdp_ops && cdp_ops->config_full_mon_mode)
  100. cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
  101. DP_FULL_MON_ENABLE);
  102. dp_mon_filter_setup_mcopy_mode(pdev);
  103. status = dp_mon_filter_update(pdev);
  104. if (status != QDF_STATUS_SUCCESS) {
  105. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  106. FL("Failed to set M_copy mode filters"));
  107. dp_mon_filter_reset_mcopy_mode(pdev);
  108. dp_pdev_disable_mcopy_code(pdev);
  109. return status;
  110. }
  111. if (!mon_pdev->pktlog_ppdu_stats)
  112. dp_h2t_cfg_stats_msg_send(pdev,
  113. DP_PPDU_STATS_CFG_SNIFFER,
  114. pdev->pdev_id);
  115. return status;
  116. }
  117. #else
  118. static inline void
  119. dp_reset_mcopy_mode(struct dp_pdev *pdev)
  120. {
  121. }
  122. static inline QDF_STATUS
  123. dp_config_mcopy_mode(struct dp_pdev *pdev, int val)
  124. {
  125. return QDF_STATUS_E_INVAL;
  126. }
  127. #endif /* QCA_MCOPY_SUPPORT */
  128. #ifdef QCA_UNDECODED_METADATA_SUPPORT
  129. static QDF_STATUS
  130. dp_reset_undecoded_metadata_capture(struct dp_pdev *pdev)
  131. {
  132. QDF_STATUS status = QDF_STATUS_SUCCESS;
  133. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  134. if (mon_pdev->undecoded_metadata_capture) {
  135. dp_mon_filter_reset_undecoded_metadata_mode(pdev);
  136. status = dp_mon_filter_update(pdev);
  137. if (status != QDF_STATUS_SUCCESS) {
  138. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  139. FL("Undecoded capture filter reset failed"));
  140. }
  141. }
  142. mon_pdev->undecoded_metadata_capture = 0;
  143. return status;
  144. }
  145. static QDF_STATUS
  146. dp_enable_undecoded_metadata_capture(struct dp_pdev *pdev, int val)
  147. {
  148. QDF_STATUS status = QDF_STATUS_SUCCESS;
  149. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  150. struct dp_mon_ops *mon_ops;
  151. if (!mon_pdev->mvdev) {
  152. qdf_err("monitor_pdev is NULL");
  153. return QDF_STATUS_E_RESOURCES;
  154. }
  155. mon_pdev->undecoded_metadata_capture = val;
  156. mon_pdev->monitor_configured = true;
  157. mon_ops = dp_mon_ops_get(pdev->soc);
  158. /* Setup the undecoded metadata capture mode filter. */
  159. dp_mon_filter_setup_undecoded_metadata_mode(pdev);
  160. status = dp_mon_filter_update(pdev);
  161. if (status != QDF_STATUS_SUCCESS) {
  162. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  163. FL("Failed to set Undecoded capture filters"));
  164. dp_mon_filter_reset_undecoded_metadata_mode(pdev);
  165. return status;
  166. }
  167. return status;
  168. }
  169. #else
  170. static inline QDF_STATUS
  171. dp_reset_undecoded_metadata_capture(struct dp_pdev *pdev)
  172. {
  173. return QDF_STATUS_E_INVAL;
  174. }
  175. static inline QDF_STATUS
  176. dp_enable_undecoded_metadata_capture(struct dp_pdev *pdev, int val)
  177. {
  178. return QDF_STATUS_E_INVAL;
  179. }
  180. #endif /* QCA_UNDECODED_METADATA_SUPPORT */
  181. QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl,
  182. uint8_t pdev_id,
  183. uint8_t special_monitor)
  184. {
  185. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  186. struct dp_pdev *pdev =
  187. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  188. pdev_id);
  189. QDF_STATUS status = QDF_STATUS_SUCCESS;
  190. struct dp_mon_pdev *mon_pdev;
  191. struct cdp_mon_ops *cdp_ops;
  192. if (!pdev)
  193. return QDF_STATUS_E_FAILURE;
  194. mon_pdev = pdev->monitor_pdev;
  195. qdf_spin_lock_bh(&mon_pdev->mon_lock);
  196. cdp_ops = dp_mon_cdp_ops_get(soc);
  197. if (cdp_ops && cdp_ops->soc_config_full_mon_mode)
  198. cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
  199. DP_FULL_MON_DISABLE);
  200. mon_pdev->mvdev = NULL;
  201. /*
  202. * Lite monitor mode, smart monitor mode and monitor
  203. * mode uses this APIs to filter reset and mode disable
  204. */
  205. if (mon_pdev->mcopy_mode) {
  206. #if defined(QCA_MCOPY_SUPPORT)
  207. dp_pdev_disable_mcopy_code(pdev);
  208. dp_mon_filter_reset_mcopy_mode(pdev);
  209. #endif /* QCA_MCOPY_SUPPORT */
  210. } else if (special_monitor) {
  211. #if defined(ATH_SUPPORT_NAC)
  212. dp_mon_filter_reset_smart_monitor(pdev);
  213. #endif /* ATH_SUPPORT_NAC */
  214. /* for mon 2.0 we make use of lite mon to
  215. * set filters for smart monitor use case.
  216. */
  217. dp_monitor_lite_mon_disable_rx(pdev);
  218. } else if (mon_pdev->undecoded_metadata_capture) {
  219. #ifdef QCA_UNDECODED_METADATA_SUPPORT
  220. dp_reset_undecoded_metadata_capture(pdev);
  221. #endif
  222. } else {
  223. dp_mon_filter_reset_mon_mode(pdev);
  224. }
  225. status = dp_mon_filter_update(pdev);
  226. if (status != QDF_STATUS_SUCCESS) {
  227. dp_rx_mon_dest_err("%pK: Failed to reset monitor filters",
  228. soc);
  229. }
  230. mon_pdev->monitor_configured = false;
  231. qdf_spin_unlock_bh(&mon_pdev->mon_lock);
  232. return QDF_STATUS_SUCCESS;
  233. }
  234. #ifdef QCA_ADVANCE_MON_FILTER_SUPPORT
  235. QDF_STATUS
  236. dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  237. struct cdp_monitor_filter *filter_val)
  238. {
  239. /* Many monitor VAPs can exists in a system but only one can be up at
  240. * anytime
  241. */
  242. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  243. struct dp_vdev *vdev;
  244. struct dp_pdev *pdev =
  245. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  246. pdev_id);
  247. QDF_STATUS status = QDF_STATUS_SUCCESS;
  248. struct dp_mon_pdev *mon_pdev;
  249. if (!pdev || !pdev->monitor_pdev)
  250. return QDF_STATUS_E_FAILURE;
  251. mon_pdev = pdev->monitor_pdev;
  252. vdev = mon_pdev->mvdev;
  253. if (!vdev)
  254. return QDF_STATUS_E_FAILURE;
  255. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  256. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
  257. pdev, pdev_id, soc, vdev);
  258. /*Check if current pdev's monitor_vdev exists */
  259. if (!mon_pdev->mvdev) {
  260. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  261. "vdev=%pK", vdev);
  262. qdf_assert(vdev);
  263. }
  264. /* update filter mode, type in pdev structure */
  265. mon_pdev->mon_filter_mode = filter_val->mode;
  266. mon_pdev->fp_mgmt_filter = filter_val->fp_mgmt;
  267. mon_pdev->fp_ctrl_filter = filter_val->fp_ctrl;
  268. mon_pdev->fp_data_filter = filter_val->fp_data;
  269. mon_pdev->mo_mgmt_filter = filter_val->mo_mgmt;
  270. mon_pdev->mo_ctrl_filter = filter_val->mo_ctrl;
  271. mon_pdev->mo_data_filter = filter_val->mo_data;
  272. dp_mon_filter_setup_mon_mode(pdev);
  273. status = dp_mon_filter_update(pdev);
  274. if (status != QDF_STATUS_SUCCESS) {
  275. dp_rx_mon_dest_err("%pK: Failed to set filter for adv mon mode",
  276. soc);
  277. dp_mon_filter_reset_mon_mode(pdev);
  278. }
  279. return status;
  280. }
  281. #endif
  282. QDF_STATUS
  283. dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf)
  284. {
  285. struct dp_pdev *pdev =
  286. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
  287. pdev_id);
  288. if (!pdev)
  289. return QDF_STATUS_E_FAILURE;
  290. dp_deliver_mgmt_frm(pdev, nbuf);
  291. return QDF_STATUS_SUCCESS;
  292. }
  293. #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
  294. /**
  295. * dp_scan_spcl_vap_stats_attach() - alloc spcl vap stats struct
  296. * @mon_vdev: Datapath mon VDEV handle
  297. *
  298. * Return: 0 on success, not 0 on failure
  299. */
  300. static inline QDF_STATUS
  301. dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev)
  302. {
  303. mon_vdev->scan_spcl_vap_stats =
  304. qdf_mem_malloc(sizeof(struct cdp_scan_spcl_vap_stats));
  305. if (!mon_vdev->scan_spcl_vap_stats) {
  306. dp_mon_err("scan spcl vap stats attach fail");
  307. return QDF_STATUS_E_NOMEM;
  308. }
  309. return QDF_STATUS_SUCCESS;
  310. }
  311. /**
  312. * dp_scan_spcl_vap_stats_detach() - free spcl vap stats struct
  313. * @mon_vdev: Datapath mon VDEV handle
  314. *
  315. * Return: void
  316. */
  317. static inline void
  318. dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev)
  319. {
  320. if (mon_vdev->scan_spcl_vap_stats) {
  321. qdf_mem_free(mon_vdev->scan_spcl_vap_stats);
  322. mon_vdev->scan_spcl_vap_stats = NULL;
  323. }
  324. }
  325. /**
  326. * dp_reset_scan_spcl_vap_stats() - reset spcl vap rx stats
  327. * @vdev: Datapath VDEV handle
  328. *
  329. * Return: void
  330. */
  331. static inline void
  332. dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev)
  333. {
  334. struct dp_mon_vdev *mon_vdev;
  335. struct dp_mon_pdev *mon_pdev;
  336. mon_pdev = vdev->pdev->monitor_pdev;
  337. if (!mon_pdev || !mon_pdev->reset_scan_spcl_vap_stats_enable)
  338. return;
  339. mon_vdev = vdev->monitor_vdev;
  340. if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats)
  341. return;
  342. qdf_mem_zero(mon_vdev->scan_spcl_vap_stats,
  343. sizeof(struct cdp_scan_spcl_vap_stats));
  344. }
  345. /**
  346. * dp_get_scan_spcl_vap_stats() - get spcl vap rx stats
  347. * @soc_hdl: Datapath soc handle
  348. * @vdev_id: vdev id
  349. * @stats: structure to hold spcl vap stats
  350. *
  351. * Return: 0 on success, not 0 on failure
  352. */
  353. static QDF_STATUS
  354. dp_get_scan_spcl_vap_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  355. struct cdp_scan_spcl_vap_stats *stats)
  356. {
  357. struct dp_mon_vdev *mon_vdev = NULL;
  358. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  359. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  360. DP_MOD_ID_CDP);
  361. if (!vdev || !stats) {
  362. if (vdev)
  363. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  364. return QDF_STATUS_E_INVAL;
  365. }
  366. mon_vdev = vdev->monitor_vdev;
  367. if (!mon_vdev || !mon_vdev->scan_spcl_vap_stats) {
  368. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  369. return QDF_STATUS_E_INVAL;
  370. }
  371. qdf_mem_copy(stats, mon_vdev->scan_spcl_vap_stats,
  372. sizeof(struct cdp_scan_spcl_vap_stats));
  373. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  374. return QDF_STATUS_SUCCESS;
  375. }
  376. #else
  377. static inline void
  378. dp_reset_scan_spcl_vap_stats(struct dp_vdev *vdev)
  379. {
  380. }
  381. static inline QDF_STATUS
  382. dp_scan_spcl_vap_stats_attach(struct dp_mon_vdev *mon_vdev)
  383. {
  384. return QDF_STATUS_SUCCESS;
  385. }
  386. static inline void
  387. dp_scan_spcl_vap_stats_detach(struct dp_mon_vdev *mon_vdev)
  388. {
  389. }
  390. #endif
  391. /**
  392. * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
  393. * @vdev_handle: Datapath VDEV handle
  394. * @smart_monitor: Flag to denote if its smart monitor mode
  395. *
  396. * Return: 0 on success, not 0 on failure
  397. */
  398. static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *dp_soc,
  399. uint8_t vdev_id,
  400. uint8_t special_monitor)
  401. {
  402. struct dp_soc *soc = (struct dp_soc *)dp_soc;
  403. struct dp_pdev *pdev;
  404. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  405. DP_MOD_ID_CDP);
  406. QDF_STATUS status = QDF_STATUS_SUCCESS;
  407. struct dp_mon_pdev *mon_pdev;
  408. struct cdp_mon_ops *cdp_ops;
  409. if (!vdev)
  410. return QDF_STATUS_E_FAILURE;
  411. pdev = vdev->pdev;
  412. if (!pdev || !pdev->monitor_pdev)
  413. return QDF_STATUS_E_FAILURE;
  414. mon_pdev = pdev->monitor_pdev;
  415. mon_pdev->mvdev = vdev;
  416. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  417. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
  418. pdev, pdev->pdev_id, pdev->soc, vdev);
  419. /*
  420. * do not configure monitor buf ring and filter for smart and
  421. * lite monitor
  422. * for smart monitor filters are added along with first NAC
  423. * for lite monitor required configuration done through
  424. * dp_set_pdev_param
  425. */
  426. if (special_monitor) {
  427. status = QDF_STATUS_SUCCESS;
  428. goto fail;
  429. }
  430. if (mon_pdev->scan_spcl_vap_configured)
  431. dp_reset_scan_spcl_vap_stats(vdev);
  432. /*Check if current pdev's monitor_vdev exists */
  433. if (mon_pdev->monitor_configured) {
  434. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  435. "monitor vap already created vdev=%pK\n", vdev);
  436. status = QDF_STATUS_E_RESOURCES;
  437. goto fail;
  438. }
  439. mon_pdev->monitor_configured = true;
  440. /* disable lite mon if configured, monitor vap takes
  441. * priority over lite mon when its created. Lite mon
  442. * can be configured later again.
  443. */
  444. dp_monitor_lite_mon_disable_rx(pdev);
  445. cdp_ops = dp_mon_cdp_ops_get(soc);
  446. if (cdp_ops && cdp_ops->soc_config_full_mon_mode)
  447. cdp_ops->soc_config_full_mon_mode((struct cdp_pdev *)pdev,
  448. DP_FULL_MON_ENABLE);
  449. dp_mon_filter_setup_mon_mode(pdev);
  450. status = dp_mon_filter_update(pdev);
  451. if (status != QDF_STATUS_SUCCESS) {
  452. dp_cdp_err("%pK: Failed to reset monitor filters", soc);
  453. dp_mon_filter_reset_mon_mode(pdev);
  454. mon_pdev->monitor_configured = false;
  455. mon_pdev->mvdev = NULL;
  456. }
  457. fail:
  458. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  459. return status;
  460. }
  461. #ifdef QCA_TX_CAPTURE_SUPPORT
  462. static QDF_STATUS
  463. dp_config_tx_capture_mode(struct dp_pdev *pdev)
  464. {
  465. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  466. mon_pdev->tx_sniffer_enable = 1;
  467. mon_pdev->monitor_configured = false;
  468. if (!mon_pdev->pktlog_ppdu_stats)
  469. dp_h2t_cfg_stats_msg_send(pdev,
  470. DP_PPDU_STATS_CFG_SNIFFER,
  471. pdev->pdev_id);
  472. return QDF_STATUS_SUCCESS;
  473. }
  474. #else
  475. #ifdef QCA_MCOPY_SUPPORT
  476. static QDF_STATUS
  477. dp_config_tx_capture_mode(struct dp_pdev *pdev)
  478. {
  479. return QDF_STATUS_E_INVAL;
  480. }
  481. #endif
  482. #endif
  483. #if defined(QCA_MCOPY_SUPPORT) || defined(QCA_TX_CAPTURE_SUPPORT)
  484. QDF_STATUS
  485. dp_config_debug_sniffer(struct dp_pdev *pdev, int val)
  486. {
  487. QDF_STATUS status = QDF_STATUS_SUCCESS;
  488. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  489. /*
  490. * Note: The mirror copy mode cannot co-exist with any other
  491. * monitor modes. Hence disabling the filter for this mode will
  492. * reset the monitor destination ring filters.
  493. */
  494. dp_reset_mcopy_mode(pdev);
  495. switch (val) {
  496. case 0:
  497. mon_pdev->tx_sniffer_enable = 0;
  498. mon_pdev->monitor_configured = false;
  499. /*
  500. * We don't need to reset the Rx monitor status ring or call
  501. * the API dp_ppdu_ring_reset() if all debug sniffer mode is
  502. * disabled. The Rx monitor status ring will be disabled when
  503. * the last mode using the monitor status ring get disabled.
  504. */
  505. if (!mon_pdev->pktlog_ppdu_stats &&
  506. !mon_pdev->enhanced_stats_en &&
  507. !mon_pdev->bpr_enable) {
  508. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  509. } else if (mon_pdev->enhanced_stats_en &&
  510. !mon_pdev->bpr_enable) {
  511. dp_h2t_cfg_stats_msg_send(pdev,
  512. DP_PPDU_STATS_CFG_ENH_STATS,
  513. pdev->pdev_id);
  514. } else if (!mon_pdev->enhanced_stats_en &&
  515. mon_pdev->bpr_enable) {
  516. dp_h2t_cfg_stats_msg_send(pdev,
  517. DP_PPDU_STATS_CFG_BPR_ENH,
  518. pdev->pdev_id);
  519. } else {
  520. dp_h2t_cfg_stats_msg_send(pdev,
  521. DP_PPDU_STATS_CFG_BPR,
  522. pdev->pdev_id);
  523. }
  524. break;
  525. case 1:
  526. status = dp_config_tx_capture_mode(pdev);
  527. break;
  528. case 2:
  529. case 4:
  530. status = dp_config_mcopy_mode(pdev, val);
  531. break;
  532. default:
  533. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  534. "Invalid value, mode not supported");
  535. status = QDF_STATUS_E_INVAL;
  536. break;
  537. }
  538. return status;
  539. }
  540. #endif
  541. #ifdef QCA_UNDECODED_METADATA_SUPPORT
  542. QDF_STATUS
  543. dp_mon_config_undecoded_metadata_capture(struct dp_pdev *pdev, int val)
  544. {
  545. QDF_STATUS status = QDF_STATUS_SUCCESS;
  546. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  547. if (!mon_pdev->mvdev && !mon_pdev->scan_spcl_vap_configured) {
  548. qdf_err("No monitor or Special vap, undecoded capture not supported");
  549. return QDF_STATUS_E_RESOURCES;
  550. }
  551. if (val)
  552. status = dp_enable_undecoded_metadata_capture(pdev, val);
  553. else
  554. status = dp_reset_undecoded_metadata_capture(pdev);
  555. return status;
  556. }
  557. #endif
  558. /**
  559. * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
  560. * ring based on target
  561. * @soc: soc handle
  562. * @mac_for_pdev: WIN- pdev_id, MCL- mac id
  563. * @pdev: physical device handle
  564. * @ring_num: mac id
  565. * @htt_tlv_filter: tlv filter
  566. *
  567. * Return: zero on success, non-zero on failure
  568. */
  569. static inline QDF_STATUS
  570. dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
  571. struct dp_pdev *pdev, uint8_t ring_num,
  572. struct htt_rx_ring_tlv_filter htt_tlv_filter)
  573. {
  574. QDF_STATUS status;
  575. if (soc->wlan_cfg_ctx->rxdma1_enable)
  576. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  577. soc->rxdma_mon_buf_ring[ring_num]
  578. .hal_srng,
  579. RXDMA_MONITOR_BUF,
  580. RX_MONITOR_BUFFER_SIZE,
  581. &htt_tlv_filter);
  582. else
  583. status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  584. pdev->rx_mac_buf_ring[ring_num]
  585. .hal_srng,
  586. RXDMA_BUF, RX_DATA_BUFFER_SIZE,
  587. &htt_tlv_filter);
  588. return status;
  589. }
  590. /**
  591. * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode
  592. * @soc_hdl: datapath soc handle
  593. * @pdev_id: physical device instance id
  594. *
  595. * Return: virtual interface id
  596. */
  597. static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl,
  598. uint8_t pdev_id)
  599. {
  600. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  601. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  602. if (qdf_unlikely(!pdev || !pdev->monitor_pdev ||
  603. !pdev->monitor_pdev->mvdev))
  604. return -EINVAL;
  605. return pdev->monitor_pdev->mvdev->vdev_id;
  606. }
  607. #if defined(QCA_TX_CAPTURE_SUPPORT) || defined(QCA_ENHANCED_STATS_SUPPORT)
  608. #ifndef WLAN_TX_PKT_CAPTURE_ENH
  609. void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
  610. {
  611. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  612. if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode) {
  613. dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
  614. nbuf, HTT_INVALID_PEER,
  615. WDI_NO_VAL, pdev->pdev_id);
  616. } else {
  617. if (!mon_pdev->bpr_enable)
  618. qdf_nbuf_free(nbuf);
  619. }
  620. }
  621. #endif
  622. #endif
  623. QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
  624. {
  625. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  626. mon_pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
  627. if (!mon_pdev->ppdu_tlv_buf) {
  628. QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
  629. return QDF_STATUS_E_NOMEM;
  630. }
  631. return QDF_STATUS_SUCCESS;
  632. }
  633. void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
  634. {
  635. struct ppdu_info *ppdu_info, *ppdu_info_next;
  636. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  637. TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list,
  638. ppdu_info_list_elem, ppdu_info_next) {
  639. if (!ppdu_info)
  640. break;
  641. TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
  642. ppdu_info, ppdu_info_list_elem);
  643. mon_pdev->list_depth--;
  644. qdf_assert_always(ppdu_info->nbuf);
  645. qdf_nbuf_free(ppdu_info->nbuf);
  646. qdf_mem_free(ppdu_info);
  647. }
  648. TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->sched_comp_ppdu_list,
  649. ppdu_info_list_elem, ppdu_info_next) {
  650. if (!ppdu_info)
  651. break;
  652. TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list,
  653. ppdu_info, ppdu_info_list_elem);
  654. mon_pdev->sched_comp_list_depth--;
  655. qdf_assert_always(ppdu_info->nbuf);
  656. qdf_nbuf_free(ppdu_info->nbuf);
  657. qdf_mem_free(ppdu_info);
  658. }
  659. if (mon_pdev->ppdu_tlv_buf)
  660. qdf_mem_free(mon_pdev->ppdu_tlv_buf);
  661. }
  662. QDF_STATUS dp_pdev_get_rx_mon_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  663. struct cdp_pdev_mon_stats *stats)
  664. {
  665. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  666. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  667. struct dp_mon_pdev *mon_pdev;
  668. if (!pdev)
  669. return QDF_STATUS_E_FAILURE;
  670. mon_pdev = pdev->monitor_pdev;
  671. if (!mon_pdev)
  672. return QDF_STATUS_E_FAILURE;
  673. qdf_mem_copy(stats, &mon_pdev->rx_mon_stats,
  674. sizeof(struct cdp_pdev_mon_stats));
  675. return QDF_STATUS_SUCCESS;
  676. }
  677. #ifdef QCA_UNDECODED_METADATA_SUPPORT
  678. /**
  679. * dp_pdev_get_undecoded_capture_stats() - Get undecoded metadata captured
  680. * monitor pdev stats
  681. * @mon_pdev: Monitor PDEV handle
  682. * @rx_mon_stats: Monitor pdev status/destination ring stats
  683. *
  684. * Return: None
  685. */
  686. static inline void
  687. dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev *mon_pdev,
  688. struct cdp_pdev_mon_stats *rx_mon_stats)
  689. {
  690. char undecoded_error[DP_UNDECODED_ERR_LENGTH];
  691. uint8_t index = 0, i;
  692. DP_PRINT_STATS("Rx Undecoded Frame count:%d",
  693. rx_mon_stats->rx_undecoded_count);
  694. index = 0;
  695. for (i = 0; i < (CDP_PHYRX_ERR_MAX); i++) {
  696. index += qdf_snprint(&undecoded_error[index],
  697. DP_UNDECODED_ERR_LENGTH - index,
  698. " %d", rx_mon_stats->rx_undecoded_error[i]);
  699. }
  700. DP_PRINT_STATS("Undecoded Error (0-63):%s", undecoded_error);
  701. }
  702. #else
  703. static inline void
  704. dp_pdev_get_undecoded_capture_stats(struct dp_mon_pdev *mon_pdev,
  705. struct cdp_pdev_mon_stats *rx_mon_stats)
  706. {
  707. }
  708. #endif
  709. void
  710. dp_print_pdev_rx_mon_stats(struct dp_pdev *pdev)
  711. {
  712. struct cdp_pdev_mon_stats *rx_mon_stats;
  713. uint32_t *stat_ring_ppdu_ids;
  714. uint32_t *dest_ring_ppdu_ids;
  715. int i, idx;
  716. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  717. struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc;
  718. rx_mon_stats = &mon_pdev->rx_mon_stats;
  719. DP_PRINT_STATS("PDEV Rx Monitor Stats:\n");
  720. DP_PRINT_STATS("status_ppdu_compl_cnt = %d",
  721. rx_mon_stats->status_ppdu_compl);
  722. DP_PRINT_STATS("status_ppdu_start_cnt = %d",
  723. rx_mon_stats->status_ppdu_start);
  724. DP_PRINT_STATS("status_ppdu_end_cnt = %d",
  725. rx_mon_stats->status_ppdu_end);
  726. DP_PRINT_STATS("status_ppdu_start_mis_cnt = %d",
  727. rx_mon_stats->status_ppdu_start_mis);
  728. DP_PRINT_STATS("status_ppdu_end_mis_cnt = %d",
  729. rx_mon_stats->status_ppdu_end_mis);
  730. DP_PRINT_STATS("status_ppdu_done_cnt = %d",
  731. rx_mon_stats->status_ppdu_done);
  732. DP_PRINT_STATS("dest_ppdu_done_cnt = %d",
  733. rx_mon_stats->dest_ppdu_done);
  734. DP_PRINT_STATS("dest_mpdu_done_cnt = %d",
  735. rx_mon_stats->dest_mpdu_done);
  736. DP_PRINT_STATS("tlv_tag_status_err_cnt = %u",
  737. rx_mon_stats->tlv_tag_status_err);
  738. DP_PRINT_STATS("mon status DMA not done WAR count= %u",
  739. rx_mon_stats->status_buf_done_war);
  740. DP_PRINT_STATS("dest_mpdu_drop_cnt = %d",
  741. rx_mon_stats->dest_mpdu_drop);
  742. DP_PRINT_STATS("dup_mon_linkdesc_cnt = %d",
  743. rx_mon_stats->dup_mon_linkdesc_cnt);
  744. DP_PRINT_STATS("dup_mon_buf_cnt = %d",
  745. rx_mon_stats->dup_mon_buf_cnt);
  746. DP_PRINT_STATS("mon_rx_buf_reaped = %u",
  747. rx_mon_stats->mon_rx_bufs_reaped_dest);
  748. DP_PRINT_STATS("mon_rx_buf_replenished = %u",
  749. rx_mon_stats->mon_rx_bufs_replenished_dest);
  750. DP_PRINT_STATS("ppdu_id_mismatch = %u",
  751. rx_mon_stats->ppdu_id_mismatch);
  752. DP_PRINT_STATS("mpdu_ppdu_id_match_cnt = %d",
  753. rx_mon_stats->ppdu_id_match);
  754. DP_PRINT_STATS("ppdus dropped frm status ring = %d",
  755. rx_mon_stats->status_ppdu_drop);
  756. DP_PRINT_STATS("ppdus dropped frm dest ring = %d",
  757. rx_mon_stats->dest_ppdu_drop);
  758. DP_PRINT_STATS("mpdu_ppdu_id_mismatch_drop = %u",
  759. rx_mon_stats->mpdu_ppdu_id_mismatch_drop);
  760. DP_PRINT_STATS("mpdu_decap_type_invalid = %u",
  761. rx_mon_stats->mpdu_decap_type_invalid);
  762. stat_ring_ppdu_ids =
  763. (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST);
  764. dest_ring_ppdu_ids =
  765. (uint32_t *)qdf_mem_malloc(sizeof(uint32_t) * MAX_PPDU_ID_HIST);
  766. if (!stat_ring_ppdu_ids || !dest_ring_ppdu_ids)
  767. DP_PRINT_STATS("Unable to allocate ppdu id hist mem\n");
  768. qdf_spin_lock_bh(&mon_pdev->mon_lock);
  769. idx = rx_mon_stats->ppdu_id_hist_idx;
  770. qdf_mem_copy(stat_ring_ppdu_ids,
  771. rx_mon_stats->stat_ring_ppdu_id_hist,
  772. sizeof(uint32_t) * MAX_PPDU_ID_HIST);
  773. qdf_mem_copy(dest_ring_ppdu_ids,
  774. rx_mon_stats->dest_ring_ppdu_id_hist,
  775. sizeof(uint32_t) * MAX_PPDU_ID_HIST);
  776. qdf_spin_unlock_bh(&mon_pdev->mon_lock);
  777. DP_PRINT_STATS("PPDU Id history:");
  778. DP_PRINT_STATS("stat_ring_ppdu_ids\t dest_ring_ppdu_ids");
  779. for (i = 0; i < MAX_PPDU_ID_HIST; i++) {
  780. idx = (idx + 1) & (MAX_PPDU_ID_HIST - 1);
  781. DP_PRINT_STATS("%*u\t%*u", 16,
  782. rx_mon_stats->stat_ring_ppdu_id_hist[idx], 16,
  783. rx_mon_stats->dest_ring_ppdu_id_hist[idx]);
  784. }
  785. qdf_mem_free(stat_ring_ppdu_ids);
  786. qdf_mem_free(dest_ring_ppdu_ids);
  787. DP_PRINT_STATS("mon_rx_dest_stuck = %d",
  788. rx_mon_stats->mon_rx_dest_stuck);
  789. DP_PRINT_STATS("rx_hdr_not_received = %d",
  790. rx_mon_stats->rx_hdr_not_received);
  791. DP_PRINT_STATS("parent_buf_alloc = %d",
  792. rx_mon_stats->parent_buf_alloc);
  793. DP_PRINT_STATS("parent_buf_free = %d",
  794. rx_mon_stats->parent_buf_free);
  795. DP_PRINT_STATS("mpdus_buf_to_stack = %d",
  796. rx_mon_stats->mpdus_buf_to_stack);
  797. DP_PRINT_STATS("frag_alloc = %d",
  798. mon_soc->stats.frag_alloc);
  799. DP_PRINT_STATS("frag_free = %d",
  800. mon_soc->stats.frag_free);
  801. DP_PRINT_STATS("status_buf_count = %d",
  802. rx_mon_stats->status_buf_count);
  803. DP_PRINT_STATS("pkt_buf_count = %d",
  804. rx_mon_stats->pkt_buf_count);
  805. dp_pdev_get_undecoded_capture_stats(mon_pdev, rx_mon_stats);
  806. }
  807. #ifdef QCA_SUPPORT_BPR
  808. QDF_STATUS
  809. dp_set_bpr_enable(struct dp_pdev *pdev, int val)
  810. {
  811. struct dp_mon_ops *mon_ops;
  812. mon_ops = dp_mon_ops_get(pdev->soc);
  813. if (mon_ops && mon_ops->mon_set_bpr_enable)
  814. return mon_ops->mon_set_bpr_enable(pdev, val);
  815. return QDF_STATUS_E_FAILURE;
  816. }
  817. #endif
  818. #ifdef WDI_EVENT_ENABLE
  819. #ifdef BE_PKTLOG_SUPPORT
  820. static bool
  821. dp_set_hybrid_pktlog_enable(struct dp_pdev *pdev,
  822. struct dp_mon_pdev *mon_pdev,
  823. struct dp_soc *soc)
  824. {
  825. if (mon_pdev->mvdev) {
  826. /* Nothing needs to be done if monitor mode is
  827. * enabled
  828. */
  829. mon_pdev->pktlog_hybrid_mode = true;
  830. return false;
  831. }
  832. if (!mon_pdev->pktlog_hybrid_mode) {
  833. mon_pdev->pktlog_hybrid_mode = true;
  834. dp_mon_filter_setup_pktlog_hybrid(pdev);
  835. if (dp_mon_filter_update(pdev) !=
  836. QDF_STATUS_SUCCESS) {
  837. dp_cdp_err("Set hybrid filters failed");
  838. dp_mon_filter_reset_pktlog_hybrid(pdev);
  839. mon_pdev->rx_pktlog_mode =
  840. DP_RX_PKTLOG_DISABLED;
  841. return false;
  842. }
  843. dp_monitor_reap_timer_start(soc, CDP_MON_REAP_SOURCE_PKTLOG);
  844. }
  845. return true;
  846. }
  847. static void
  848. dp_set_hybrid_pktlog_disable(struct dp_mon_pdev *mon_pdev)
  849. {
  850. mon_pdev->pktlog_hybrid_mode = false;
  851. }
  852. #else
  853. static void
  854. dp_set_hybrid_pktlog_disable(struct dp_mon_pdev *mon_pdev)
  855. {
  856. }
  857. static bool
  858. dp_set_hybrid_pktlog_enable(struct dp_pdev *pdev,
  859. struct dp_mon_pdev *mon_pdev,
  860. struct dp_soc *soc)
  861. {
  862. dp_cdp_err("Hybrid mode is supported only on beryllium");
  863. return true;
  864. }
  865. #endif
  866. int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
  867. bool enable)
  868. {
  869. struct dp_soc *soc = NULL;
  870. int max_mac_rings = wlan_cfg_get_num_mac_rings
  871. (pdev->wlan_cfg_ctx);
  872. uint8_t mac_id = 0;
  873. struct dp_mon_soc *mon_soc;
  874. struct dp_mon_ops *mon_ops;
  875. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  876. soc = pdev->soc;
  877. mon_soc = soc->monitor_soc;
  878. mon_ops = dp_mon_ops_get(soc);
  879. if (!mon_ops)
  880. return 0;
  881. dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
  882. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  883. FL("Max_mac_rings %d "),
  884. max_mac_rings);
  885. if (enable) {
  886. switch (event) {
  887. case WDI_EVENT_RX_DESC:
  888. if (mon_pdev->mvdev) {
  889. /* Nothing needs to be done if monitor mode is
  890. * enabled
  891. */
  892. mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
  893. return 0;
  894. }
  895. if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL)
  896. break;
  897. mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
  898. dp_mon_filter_setup_rx_pkt_log_full(pdev);
  899. if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
  900. dp_cdp_err("%pK: Pktlog full filters set failed",
  901. soc);
  902. dp_mon_filter_reset_rx_pkt_log_full(pdev);
  903. mon_pdev->rx_pktlog_mode =
  904. DP_RX_PKTLOG_DISABLED;
  905. return 0;
  906. }
  907. dp_monitor_reap_timer_start(soc,
  908. CDP_MON_REAP_SOURCE_PKTLOG);
  909. break;
  910. case WDI_EVENT_LITE_RX:
  911. if (mon_pdev->mvdev) {
  912. /* Nothing needs to be done if monitor mode is
  913. * enabled
  914. */
  915. mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
  916. return 0;
  917. }
  918. if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE)
  919. break;
  920. mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
  921. /*
  922. * Set the packet log lite mode filter.
  923. */
  924. dp_mon_filter_setup_rx_pkt_log_lite(pdev);
  925. if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
  926. dp_cdp_err("%pK: Pktlog lite filters set failed",
  927. soc);
  928. dp_mon_filter_reset_rx_pkt_log_lite(pdev);
  929. mon_pdev->rx_pktlog_mode =
  930. DP_RX_PKTLOG_DISABLED;
  931. return 0;
  932. }
  933. dp_monitor_reap_timer_start(soc,
  934. CDP_MON_REAP_SOURCE_PKTLOG);
  935. break;
  936. case WDI_EVENT_LITE_T2H:
  937. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  938. int mac_for_pdev = dp_get_mac_id_for_pdev(
  939. mac_id, pdev->pdev_id);
  940. mon_pdev->pktlog_ppdu_stats = true;
  941. dp_h2t_cfg_stats_msg_send(pdev,
  942. DP_PPDU_TXLITE_STATS_BITMASK_CFG,
  943. mac_for_pdev);
  944. }
  945. break;
  946. case WDI_EVENT_RX_CBF:
  947. if (mon_pdev->mvdev) {
  948. /* Nothing needs to be done if monitor mode is
  949. * enabled
  950. */
  951. dp_mon_info("Mon mode, CBF setting filters");
  952. mon_pdev->rx_pktlog_cbf = true;
  953. return 0;
  954. }
  955. if (mon_pdev->rx_pktlog_cbf)
  956. break;
  957. mon_pdev->rx_pktlog_cbf = true;
  958. mon_pdev->monitor_configured = true;
  959. if (mon_ops->mon_vdev_set_monitor_mode_buf_rings)
  960. mon_ops->mon_vdev_set_monitor_mode_buf_rings(
  961. pdev);
  962. /*
  963. * Set the packet log lite mode filter.
  964. */
  965. qdf_info("Non mon mode: Enable destination ring");
  966. dp_mon_filter_setup_rx_pkt_log_cbf(pdev);
  967. if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
  968. dp_mon_err("Pktlog set CBF filters failed");
  969. dp_mon_filter_reset_rx_pktlog_cbf(pdev);
  970. mon_pdev->rx_pktlog_mode =
  971. DP_RX_PKTLOG_DISABLED;
  972. mon_pdev->monitor_configured = false;
  973. return 0;
  974. }
  975. dp_monitor_reap_timer_start(soc,
  976. CDP_MON_REAP_SOURCE_PKTLOG);
  977. break;
  978. case WDI_EVENT_HYBRID_TX:
  979. if (!dp_set_hybrid_pktlog_enable(pdev, mon_pdev, soc))
  980. return 0;
  981. break;
  982. default:
  983. /* Nothing needs to be done for other pktlog types */
  984. break;
  985. }
  986. } else {
  987. switch (event) {
  988. case WDI_EVENT_RX_DESC:
  989. case WDI_EVENT_LITE_RX:
  990. if (mon_pdev->mvdev) {
  991. /* Nothing needs to be done if monitor mode is
  992. * enabled
  993. */
  994. mon_pdev->rx_pktlog_mode =
  995. DP_RX_PKTLOG_DISABLED;
  996. return 0;
  997. }
  998. if (mon_pdev->rx_pktlog_mode == DP_RX_PKTLOG_DISABLED)
  999. break;
  1000. mon_pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
  1001. dp_mon_filter_reset_rx_pkt_log_full(pdev);
  1002. if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
  1003. dp_cdp_err("%pK: Pktlog filters reset failed",
  1004. soc);
  1005. return 0;
  1006. }
  1007. dp_mon_filter_reset_rx_pkt_log_lite(pdev);
  1008. if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
  1009. dp_cdp_err("%pK: Pktlog filters reset failed",
  1010. soc);
  1011. return 0;
  1012. }
  1013. dp_monitor_reap_timer_stop(soc,
  1014. CDP_MON_REAP_SOURCE_PKTLOG);
  1015. break;
  1016. case WDI_EVENT_LITE_T2H:
  1017. /*
  1018. * To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
  1019. * passing value 0. Once these macros will define in htt
  1020. * header file will use proper macros
  1021. */
  1022. for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
  1023. int mac_for_pdev =
  1024. dp_get_mac_id_for_pdev(mac_id,
  1025. pdev->pdev_id);
  1026. mon_pdev->pktlog_ppdu_stats = false;
  1027. if (!mon_pdev->enhanced_stats_en &&
  1028. !mon_pdev->tx_sniffer_enable &&
  1029. !mon_pdev->mcopy_mode) {
  1030. dp_h2t_cfg_stats_msg_send(pdev, 0,
  1031. mac_for_pdev);
  1032. } else if (mon_pdev->tx_sniffer_enable ||
  1033. mon_pdev->mcopy_mode) {
  1034. dp_h2t_cfg_stats_msg_send(pdev,
  1035. DP_PPDU_STATS_CFG_SNIFFER,
  1036. mac_for_pdev);
  1037. } else if (mon_pdev->enhanced_stats_en) {
  1038. dp_h2t_cfg_stats_msg_send(pdev,
  1039. DP_PPDU_STATS_CFG_ENH_STATS,
  1040. mac_for_pdev);
  1041. }
  1042. }
  1043. break;
  1044. case WDI_EVENT_RX_CBF:
  1045. mon_pdev->rx_pktlog_cbf = false;
  1046. break;
  1047. case WDI_EVENT_HYBRID_TX:
  1048. dp_set_hybrid_pktlog_disable(mon_pdev);
  1049. break;
  1050. default:
  1051. /* Nothing needs to be done for other pktlog types */
  1052. break;
  1053. }
  1054. }
  1055. return 0;
  1056. }
  1057. #endif
  1058. /* MCL specific functions */
  1059. #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
  1060. void dp_pktlogmod_exit(struct dp_pdev *pdev)
  1061. {
  1062. struct dp_soc *soc = pdev->soc;
  1063. struct hif_opaque_softc *scn = soc->hif_handle;
  1064. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1065. if (!scn) {
  1066. dp_mon_err("Invalid hif(scn) handle");
  1067. return;
  1068. }
  1069. dp_monitor_reap_timer_stop(soc, CDP_MON_REAP_SOURCE_PKTLOG);
  1070. pktlogmod_exit(scn);
  1071. mon_pdev->pkt_log_init = false;
  1072. }
  1073. #endif /*DP_CON_MON*/
  1074. #if defined(WDI_EVENT_ENABLE) && defined(QCA_ENHANCED_STATS_SUPPORT)
  1075. #ifdef IPA_OFFLOAD
  1076. void dp_peer_get_tx_rx_stats(struct dp_peer *peer,
  1077. struct cdp_interface_peer_stats *peer_stats_intf)
  1078. {
  1079. struct dp_rx_tid *rx_tid = NULL;
  1080. uint8_t i = 0;
  1081. for (i = 0; i < DP_MAX_TIDS; i++) {
  1082. rx_tid = &peer->rx_tid[i];
  1083. peer_stats_intf->rx_byte_count +=
  1084. rx_tid->rx_msdu_cnt.bytes;
  1085. peer_stats_intf->rx_packet_count +=
  1086. rx_tid->rx_msdu_cnt.num;
  1087. }
  1088. peer_stats_intf->tx_packet_count =
  1089. peer->monitor_peer->stats.tx.tx_ucast_success.num;
  1090. peer_stats_intf->tx_byte_count =
  1091. peer->monitor_peer->stats.tx.tx_ucast_success.bytes;
  1092. }
  1093. #else
  1094. void dp_peer_get_tx_rx_stats(struct dp_peer *peer,
  1095. struct cdp_interface_peer_stats *peer_stats_intf)
  1096. {
  1097. struct dp_txrx_peer *txrx_peer = NULL;
  1098. struct dp_peer *tgt_peer = NULL;
  1099. tgt_peer = dp_get_tgt_peer_from_peer(peer);
  1100. txrx_peer = tgt_peer->txrx_peer;
  1101. peer_stats_intf->rx_packet_count = txrx_peer->to_stack.num;
  1102. peer_stats_intf->rx_byte_count = txrx_peer->to_stack.bytes;
  1103. peer_stats_intf->tx_packet_count =
  1104. txrx_peer->stats.per_pkt_stats.tx.ucast.num;
  1105. peer_stats_intf->tx_byte_count =
  1106. txrx_peer->stats.per_pkt_stats.tx.tx_success.bytes;
  1107. }
  1108. #endif
  1109. QDF_STATUS dp_peer_stats_notify(struct dp_pdev *dp_pdev, struct dp_peer *peer)
  1110. {
  1111. struct cdp_interface_peer_stats peer_stats_intf = {0};
  1112. struct dp_mon_peer_stats *mon_peer_stats = NULL;
  1113. struct dp_peer *tgt_peer = NULL;
  1114. struct dp_txrx_peer *txrx_peer = NULL;
  1115. if (qdf_unlikely(!peer || !peer->vdev || !peer->monitor_peer))
  1116. return QDF_STATUS_E_FAULT;
  1117. tgt_peer = dp_get_tgt_peer_from_peer(peer);
  1118. if (qdf_unlikely(!tgt_peer))
  1119. return QDF_STATUS_E_FAULT;
  1120. txrx_peer = tgt_peer->txrx_peer;
  1121. if (!qdf_unlikely(txrx_peer))
  1122. return QDF_STATUS_E_FAULT;
  1123. mon_peer_stats = &peer->monitor_peer->stats;
  1124. if (mon_peer_stats->rx.last_snr != mon_peer_stats->rx.snr)
  1125. peer_stats_intf.rssi_changed = true;
  1126. if ((mon_peer_stats->rx.snr && peer_stats_intf.rssi_changed) ||
  1127. (mon_peer_stats->tx.tx_rate &&
  1128. mon_peer_stats->tx.tx_rate != mon_peer_stats->tx.last_tx_rate)) {
  1129. qdf_mem_copy(peer_stats_intf.peer_mac, peer->mac_addr.raw,
  1130. QDF_MAC_ADDR_SIZE);
  1131. peer_stats_intf.vdev_id = peer->vdev->vdev_id;
  1132. peer_stats_intf.last_peer_tx_rate =
  1133. mon_peer_stats->tx.last_tx_rate;
  1134. peer_stats_intf.peer_tx_rate = mon_peer_stats->tx.tx_rate;
  1135. peer_stats_intf.peer_rssi = mon_peer_stats->rx.snr;
  1136. peer_stats_intf.ack_rssi = mon_peer_stats->tx.last_ack_rssi;
  1137. dp_peer_get_tx_rx_stats(peer, &peer_stats_intf);
  1138. peer_stats_intf.per = tgt_peer->stats.tx.last_per;
  1139. peer_stats_intf.free_buff = INVALID_FREE_BUFF;
  1140. dp_wdi_event_handler(WDI_EVENT_PEER_STATS, dp_pdev->soc,
  1141. (void *)&peer_stats_intf, 0,
  1142. WDI_NO_VAL, dp_pdev->pdev_id);
  1143. }
  1144. return QDF_STATUS_SUCCESS;
  1145. }
  1146. #endif
  1147. #ifdef FEATURE_NAC_RSSI
  1148. /**
  1149. * dp_rx_nac_filter(): Function to perform filtering of non-associated
  1150. * clients
  1151. * @pdev: DP pdev handle
  1152. * @rx_pkt_hdr: Rx packet Header
  1153. *
  1154. * return: dp_vdev*
  1155. */
  1156. static
  1157. struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
  1158. uint8_t *rx_pkt_hdr)
  1159. {
  1160. struct ieee80211_frame *wh;
  1161. struct dp_neighbour_peer *peer = NULL;
  1162. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1163. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  1164. if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
  1165. return NULL;
  1166. qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
  1167. TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
  1168. neighbour_peer_list_elem) {
  1169. if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  1170. wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) {
  1171. dp_rx_debug("%pK: NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x",
  1172. pdev->soc,
  1173. peer->neighbour_peers_macaddr.raw[0],
  1174. peer->neighbour_peers_macaddr.raw[1],
  1175. peer->neighbour_peers_macaddr.raw[2],
  1176. peer->neighbour_peers_macaddr.raw[3],
  1177. peer->neighbour_peers_macaddr.raw[4],
  1178. peer->neighbour_peers_macaddr.raw[5]);
  1179. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  1180. return mon_pdev->mvdev;
  1181. }
  1182. }
  1183. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  1184. return NULL;
  1185. }
  1186. QDF_STATUS dp_filter_neighbour_peer(struct dp_pdev *pdev,
  1187. uint8_t *rx_pkt_hdr)
  1188. {
  1189. struct dp_vdev *vdev = NULL;
  1190. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1191. if (mon_pdev->filter_neighbour_peers) {
  1192. /* Next Hop scenario not yet handle */
  1193. vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
  1194. if (vdev) {
  1195. dp_rx_mon_deliver(pdev->soc, pdev->pdev_id,
  1196. pdev->invalid_peer_head_msdu,
  1197. pdev->invalid_peer_tail_msdu);
  1198. pdev->invalid_peer_head_msdu = NULL;
  1199. pdev->invalid_peer_tail_msdu = NULL;
  1200. return QDF_STATUS_SUCCESS;
  1201. }
  1202. }
  1203. return QDF_STATUS_E_FAILURE;
  1204. }
  1205. #endif
  1206. #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
  1207. /*
  1208. * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
  1209. * address for smart mesh filtering
  1210. * @txrx_soc: cdp soc handle
  1211. * @vdev_id: id of virtual device object
  1212. * @cmd: Add/Del command
  1213. * @macaddr: nac client mac address
  1214. *
  1215. * Return: success/failure
  1216. */
  1217. static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc_hdl,
  1218. uint8_t vdev_id,
  1219. uint32_t cmd, uint8_t *macaddr)
  1220. {
  1221. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  1222. struct dp_pdev *pdev;
  1223. struct dp_neighbour_peer *peer = NULL;
  1224. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  1225. DP_MOD_ID_CDP);
  1226. struct dp_mon_pdev *mon_pdev;
  1227. if (!vdev || !macaddr)
  1228. goto fail0;
  1229. pdev = vdev->pdev;
  1230. if (!pdev)
  1231. goto fail0;
  1232. mon_pdev = pdev->monitor_pdev;
  1233. /* Store address of NAC (neighbour peer) which will be checked
  1234. * against TA of received packets.
  1235. */
  1236. if (cmd == DP_NAC_PARAM_ADD) {
  1237. peer = (struct dp_neighbour_peer *)qdf_mem_malloc(
  1238. sizeof(*peer));
  1239. if (!peer) {
  1240. dp_cdp_err("%pK: DP neighbour peer node memory allocation failed"
  1241. , soc);
  1242. goto fail0;
  1243. }
  1244. qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
  1245. macaddr, QDF_MAC_ADDR_SIZE);
  1246. peer->vdev = vdev;
  1247. qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
  1248. /* add this neighbour peer into the list */
  1249. TAILQ_INSERT_TAIL(&mon_pdev->neighbour_peers_list, peer,
  1250. neighbour_peer_list_elem);
  1251. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  1252. /* first neighbour */
  1253. if (!mon_pdev->neighbour_peers_added) {
  1254. QDF_STATUS status = QDF_STATUS_SUCCESS;
  1255. mon_pdev->neighbour_peers_added = true;
  1256. dp_mon_filter_setup_smart_monitor(pdev);
  1257. status = dp_mon_filter_update(pdev);
  1258. if (status != QDF_STATUS_SUCCESS) {
  1259. dp_cdp_err("%pK: smart mon filter setup failed",
  1260. soc);
  1261. dp_mon_filter_reset_smart_monitor(pdev);
  1262. mon_pdev->neighbour_peers_added = false;
  1263. }
  1264. }
  1265. } else if (cmd == DP_NAC_PARAM_DEL) {
  1266. qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
  1267. TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
  1268. neighbour_peer_list_elem) {
  1269. if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  1270. macaddr, QDF_MAC_ADDR_SIZE)) {
  1271. /* delete this peer from the list */
  1272. TAILQ_REMOVE(&mon_pdev->neighbour_peers_list,
  1273. peer, neighbour_peer_list_elem);
  1274. qdf_mem_free(peer);
  1275. break;
  1276. }
  1277. }
  1278. /* last neighbour deleted */
  1279. if (TAILQ_EMPTY(&mon_pdev->neighbour_peers_list)) {
  1280. QDF_STATUS status = QDF_STATUS_SUCCESS;
  1281. dp_mon_filter_reset_smart_monitor(pdev);
  1282. status = dp_mon_filter_update(pdev);
  1283. if (status != QDF_STATUS_SUCCESS) {
  1284. dp_cdp_err("%pK: smart mon filter clear failed",
  1285. soc);
  1286. }
  1287. mon_pdev->neighbour_peers_added = false;
  1288. }
  1289. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  1290. }
  1291. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  1292. return 1;
  1293. fail0:
  1294. if (vdev)
  1295. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  1296. return 0;
  1297. }
  1298. #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
  1299. #ifdef ATH_SUPPORT_NAC_RSSI
  1300. /**
  1301. * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
  1302. * @soc_hdl: DP soc handle
  1303. * @vdev_id: id of DP vdev handle
  1304. * @mac_addr: neighbour mac
  1305. * @rssi: rssi value
  1306. *
  1307. * Return: 0 for success. nonzero for failure.
  1308. */
  1309. static QDF_STATUS dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc_hdl,
  1310. uint8_t vdev_id,
  1311. char *mac_addr,
  1312. uint8_t *rssi)
  1313. {
  1314. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1315. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  1316. DP_MOD_ID_CDP);
  1317. struct dp_pdev *pdev;
  1318. struct dp_neighbour_peer *peer = NULL;
  1319. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  1320. struct dp_mon_pdev *mon_pdev;
  1321. if (!vdev)
  1322. return status;
  1323. pdev = vdev->pdev;
  1324. mon_pdev = pdev->monitor_pdev;
  1325. *rssi = 0;
  1326. qdf_spin_lock_bh(&mon_pdev->neighbour_peer_mutex);
  1327. TAILQ_FOREACH(peer, &mon_pdev->neighbour_peers_list,
  1328. neighbour_peer_list_elem) {
  1329. if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  1330. mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
  1331. *rssi = peer->rssi;
  1332. status = QDF_STATUS_SUCCESS;
  1333. break;
  1334. }
  1335. }
  1336. qdf_spin_unlock_bh(&mon_pdev->neighbour_peer_mutex);
  1337. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  1338. return status;
  1339. }
  1340. static QDF_STATUS
  1341. dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc,
  1342. uint8_t vdev_id,
  1343. enum cdp_nac_param_cmd cmd, char *bssid,
  1344. char *client_macaddr,
  1345. uint8_t chan_num)
  1346. {
  1347. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  1348. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  1349. DP_MOD_ID_CDP);
  1350. struct dp_pdev *pdev;
  1351. struct dp_mon_pdev *mon_pdev;
  1352. if (!vdev)
  1353. return QDF_STATUS_E_FAILURE;
  1354. pdev = (struct dp_pdev *)vdev->pdev;
  1355. mon_pdev = pdev->monitor_pdev;
  1356. mon_pdev->nac_rssi_filtering = 1;
  1357. /* Store address of NAC (neighbour peer) which will be checked
  1358. * against TA of received packets.
  1359. */
  1360. if (cmd == CDP_NAC_PARAM_ADD) {
  1361. dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
  1362. DP_NAC_PARAM_ADD,
  1363. (uint8_t *)client_macaddr);
  1364. } else if (cmd == CDP_NAC_PARAM_DEL) {
  1365. dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
  1366. DP_NAC_PARAM_DEL,
  1367. (uint8_t *)client_macaddr);
  1368. }
  1369. if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
  1370. soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
  1371. (soc->ctrl_psoc, pdev->pdev_id,
  1372. vdev->vdev_id, cmd, bssid, client_macaddr);
  1373. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  1374. return QDF_STATUS_SUCCESS;
  1375. }
  1376. #endif
  1377. bool
  1378. dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl,
  1379. enum cdp_mon_reap_source source,
  1380. bool enable)
  1381. {
  1382. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1383. if (enable)
  1384. return dp_monitor_reap_timer_start(soc, source);
  1385. else
  1386. return dp_monitor_reap_timer_stop(soc, source);
  1387. }
  1388. #if defined(DP_CON_MON)
  1389. #ifndef REMOVE_PKT_LOG
  1390. /**
  1391. * dp_pkt_log_init() - API to initialize packet log
  1392. * @soc_hdl: Datapath soc handle
  1393. * @pdev_id: id of data path pdev handle
  1394. * @scn: HIF context
  1395. *
  1396. * Return: none
  1397. */
  1398. void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
  1399. {
  1400. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1401. struct dp_pdev *handle =
  1402. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1403. struct dp_mon_pdev *mon_pdev;
  1404. if (!handle) {
  1405. dp_mon_err("pdev handle is NULL");
  1406. return;
  1407. }
  1408. mon_pdev = handle->monitor_pdev;
  1409. if (mon_pdev->pkt_log_init) {
  1410. dp_mon_err("%pK: Packet log not initialized", soc);
  1411. return;
  1412. }
  1413. pktlog_sethandle(&mon_pdev->pl_dev, scn);
  1414. pktlog_set_pdev_id(mon_pdev->pl_dev, pdev_id);
  1415. pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
  1416. if (pktlogmod_init(scn)) {
  1417. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1418. "%s: pktlogmod_init failed", __func__);
  1419. mon_pdev->pkt_log_init = false;
  1420. } else {
  1421. mon_pdev->pkt_log_init = true;
  1422. }
  1423. }
  1424. /**
  1425. * dp_pkt_log_con_service() - connect packet log service
  1426. * @soc_hdl: Datapath soc handle
  1427. * @pdev_id: id of data path pdev handle
  1428. * @scn: device context
  1429. *
  1430. * Return: none
  1431. */
  1432. static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
  1433. uint8_t pdev_id, void *scn)
  1434. {
  1435. dp_pkt_log_init(soc_hdl, pdev_id, scn);
  1436. pktlog_htc_attach();
  1437. }
  1438. /**
  1439. * dp_pkt_log_exit() - Wrapper API to cleanup pktlog info
  1440. * @soc_hdl: Datapath soc handle
  1441. * @pdev_id: id of data path pdev handle
  1442. *
  1443. * Return: none
  1444. */
  1445. static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1446. {
  1447. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1448. struct dp_pdev *pdev =
  1449. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1450. if (!pdev) {
  1451. dp_err("pdev handle is NULL");
  1452. return;
  1453. }
  1454. dp_pktlogmod_exit(pdev);
  1455. }
  1456. #else
  1457. static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
  1458. uint8_t pdev_id, void *scn)
  1459. {
  1460. }
  1461. static void dp_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1462. {
  1463. }
  1464. #endif
  1465. #endif
  1466. void dp_neighbour_peers_detach(struct dp_pdev *pdev)
  1467. {
  1468. struct dp_neighbour_peer *peer = NULL;
  1469. struct dp_neighbour_peer *temp_peer = NULL;
  1470. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1471. TAILQ_FOREACH_SAFE(peer, &mon_pdev->neighbour_peers_list,
  1472. neighbour_peer_list_elem, temp_peer) {
  1473. /* delete this peer from the list */
  1474. TAILQ_REMOVE(&mon_pdev->neighbour_peers_list,
  1475. peer, neighbour_peer_list_elem);
  1476. qdf_mem_free(peer);
  1477. }
  1478. qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex);
  1479. }
  1480. #ifdef QCA_ENHANCED_STATS_SUPPORT
  1481. /*
  1482. * dp_mon_tx_enable_enhanced_stats() - Enable enhanced Tx stats
  1483. * @pdev: Datapath pdev handle
  1484. *
  1485. * Return: void
  1486. */
  1487. static void dp_mon_tx_enable_enhanced_stats(struct dp_pdev *pdev)
  1488. {
  1489. struct dp_soc *soc = pdev->soc;
  1490. struct dp_mon_ops *mon_ops = NULL;
  1491. mon_ops = dp_mon_ops_get(soc);
  1492. if (mon_ops && mon_ops->mon_tx_enable_enhanced_stats)
  1493. mon_ops->mon_tx_enable_enhanced_stats(pdev);
  1494. }
  1495. /*
  1496. * dp_enable_enhanced_stats()- API to enable enhanced statistcs
  1497. * @soc_handle: DP_SOC handle
  1498. * @pdev_id: id of DP_PDEV handle
  1499. *
  1500. * Return: QDF_STATUS
  1501. */
  1502. static QDF_STATUS
  1503. dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
  1504. {
  1505. struct dp_pdev *pdev = NULL;
  1506. QDF_STATUS status = QDF_STATUS_SUCCESS;
  1507. struct dp_mon_pdev *mon_pdev;
  1508. pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  1509. pdev_id);
  1510. if (!pdev)
  1511. return QDF_STATUS_E_FAILURE;
  1512. mon_pdev = pdev->monitor_pdev;
  1513. if (!mon_pdev)
  1514. return QDF_STATUS_E_FAILURE;
  1515. if (mon_pdev->enhanced_stats_en == 0)
  1516. dp_cal_client_timer_start(mon_pdev->cal_client_ctx);
  1517. mon_pdev->enhanced_stats_en = 1;
  1518. pdev->enhanced_stats_en = true;
  1519. dp_mon_filter_setup_enhanced_stats(pdev);
  1520. status = dp_mon_filter_update(pdev);
  1521. if (status != QDF_STATUS_SUCCESS) {
  1522. dp_cdp_err("%pK: Failed to set enhanced mode filters", soc);
  1523. dp_mon_filter_reset_enhanced_stats(pdev);
  1524. dp_cal_client_timer_stop(mon_pdev->cal_client_ctx);
  1525. mon_pdev->enhanced_stats_en = 0;
  1526. pdev->enhanced_stats_en = false;
  1527. return QDF_STATUS_E_FAILURE;
  1528. }
  1529. dp_mon_tx_enable_enhanced_stats(pdev);
  1530. return QDF_STATUS_SUCCESS;
  1531. }
  1532. /*
  1533. * dp_mon_tx_disable_enhanced_stats() - Disable enhanced Tx stats
  1534. * @pdev: Datapath pdev handle
  1535. *
  1536. * Return: void
  1537. */
  1538. static void dp_mon_tx_disable_enhanced_stats(struct dp_pdev *pdev)
  1539. {
  1540. struct dp_soc *soc = pdev->soc;
  1541. struct dp_mon_ops *mon_ops = NULL;
  1542. mon_ops = dp_mon_ops_get(soc);
  1543. if (mon_ops && mon_ops->mon_tx_disable_enhanced_stats)
  1544. mon_ops->mon_tx_disable_enhanced_stats(pdev);
  1545. }
  1546. /*
  1547. * dp_disable_enhanced_stats()- API to disable enhanced statistcs
  1548. *
  1549. * @param soc - the soc handle
  1550. * @param pdev_id - pdev_id of pdev
  1551. * @return - QDF_STATUS
  1552. */
  1553. static QDF_STATUS
  1554. dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
  1555. {
  1556. struct dp_pdev *pdev =
  1557. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  1558. pdev_id);
  1559. struct dp_mon_pdev *mon_pdev;
  1560. if (!pdev || !pdev->monitor_pdev)
  1561. return QDF_STATUS_E_FAILURE;
  1562. mon_pdev = pdev->monitor_pdev;
  1563. if (mon_pdev->enhanced_stats_en == 1)
  1564. dp_cal_client_timer_stop(mon_pdev->cal_client_ctx);
  1565. mon_pdev->enhanced_stats_en = 0;
  1566. pdev->enhanced_stats_en = false;
  1567. dp_mon_tx_disable_enhanced_stats(pdev);
  1568. dp_mon_filter_reset_enhanced_stats(pdev);
  1569. if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
  1570. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1571. FL("Failed to reset enhanced mode filters"));
  1572. }
  1573. return QDF_STATUS_SUCCESS;
  1574. }
  1575. #ifdef WDI_EVENT_ENABLE
  1576. QDF_STATUS dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
  1577. struct cdp_rx_stats_ppdu_user *ppdu_user)
  1578. {
  1579. struct cdp_interface_peer_qos_stats qos_stats_intf = {0};
  1580. if (qdf_unlikely(ppdu_user->peer_id == HTT_INVALID_PEER)) {
  1581. dp_mon_warn("Invalid peer id");
  1582. return QDF_STATUS_E_FAILURE;
  1583. }
  1584. qdf_mem_copy(qos_stats_intf.peer_mac, ppdu_user->mac_addr,
  1585. QDF_MAC_ADDR_SIZE);
  1586. qos_stats_intf.frame_control = ppdu_user->frame_control;
  1587. qos_stats_intf.frame_control_info_valid =
  1588. ppdu_user->frame_control_info_valid;
  1589. qos_stats_intf.qos_control = ppdu_user->qos_control;
  1590. qos_stats_intf.qos_control_info_valid =
  1591. ppdu_user->qos_control_info_valid;
  1592. qos_stats_intf.vdev_id = ppdu_user->vdev_id;
  1593. dp_wdi_event_handler(WDI_EVENT_PEER_QOS_STATS, dp_pdev->soc,
  1594. (void *)&qos_stats_intf, 0,
  1595. WDI_NO_VAL, dp_pdev->pdev_id);
  1596. return QDF_STATUS_SUCCESS;
  1597. }
  1598. #else
  1599. static inline QDF_STATUS
  1600. dp_peer_qos_stats_notify(struct dp_pdev *dp_pdev,
  1601. struct cdp_rx_stats_ppdu_user *ppdu_user)
  1602. {
  1603. return QDF_STATUS_SUCCESS;
  1604. }
  1605. #endif
  1606. #endif /* QCA_ENHANCED_STATS_SUPPORT */
  1607. /**
  1608. * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
  1609. * for pktlog
  1610. * @soc: cdp_soc handle
  1611. * @pdev_id: id of dp pdev handle
  1612. * @mac_addr: Peer mac address
  1613. * @enb_dsb: Enable or disable peer based filtering
  1614. *
  1615. * Return: QDF_STATUS
  1616. */
  1617. static int
  1618. dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id,
  1619. uint8_t *mac_addr, uint8_t enb_dsb)
  1620. {
  1621. struct dp_peer *peer;
  1622. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  1623. struct dp_pdev *pdev =
  1624. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  1625. pdev_id);
  1626. struct dp_mon_pdev *mon_pdev;
  1627. if (!pdev)
  1628. return QDF_STATUS_E_FAILURE;
  1629. mon_pdev = pdev->monitor_pdev;
  1630. peer = dp_peer_find_hash_find((struct dp_soc *)soc, mac_addr,
  1631. 0, DP_VDEV_ALL, DP_MOD_ID_CDP);
  1632. if (!peer) {
  1633. dp_mon_err("Invalid Peer");
  1634. return QDF_STATUS_E_FAILURE;
  1635. }
  1636. if (!IS_MLO_DP_MLD_PEER(peer) && peer->monitor_peer) {
  1637. peer->monitor_peer->peer_based_pktlog_filter = enb_dsb;
  1638. mon_pdev->dp_peer_based_pktlog = enb_dsb;
  1639. status = QDF_STATUS_SUCCESS;
  1640. }
  1641. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1642. return status;
  1643. }
  1644. /**
  1645. * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
  1646. * @soc: DP_SOC handle
  1647. * @pdev_id: id of DP_PDEV handle
  1648. * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
  1649. * @is_tx_pkt_cap_enable: enable/disable/delete/print
  1650. * Tx packet capture in monitor mode
  1651. * @peer_mac: MAC address for which the above need to be enabled/disabled
  1652. *
  1653. * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
  1654. */
  1655. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  1656. static QDF_STATUS
  1657. dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
  1658. uint8_t pdev_id,
  1659. bool is_rx_pkt_cap_enable,
  1660. uint8_t is_tx_pkt_cap_enable,
  1661. uint8_t *peer_mac)
  1662. {
  1663. struct dp_peer *peer;
  1664. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  1665. struct dp_pdev *pdev =
  1666. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  1667. pdev_id);
  1668. if (!pdev)
  1669. return QDF_STATUS_E_FAILURE;
  1670. peer = dp_peer_find_hash_find((struct dp_soc *)soc,
  1671. peer_mac, 0, DP_VDEV_ALL,
  1672. DP_MOD_ID_CDP);
  1673. if (!peer)
  1674. return QDF_STATUS_E_FAILURE;
  1675. /* we need to set tx pkt capture for non associated peer */
  1676. if (!IS_MLO_DP_MLD_PEER(peer)) {
  1677. status = dp_monitor_tx_peer_filter(pdev, peer,
  1678. is_tx_pkt_cap_enable,
  1679. peer_mac);
  1680. status = dp_peer_set_rx_capture_enabled(pdev, peer,
  1681. is_rx_pkt_cap_enable,
  1682. peer_mac);
  1683. }
  1684. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1685. return status;
  1686. }
  1687. #endif
  1688. #ifdef QCA_MCOPY_SUPPORT
  1689. QDF_STATUS dp_mcopy_check_deliver(struct dp_pdev *pdev,
  1690. uint16_t peer_id,
  1691. uint32_t ppdu_id,
  1692. uint8_t first_msdu)
  1693. {
  1694. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1695. if (mon_pdev->mcopy_mode) {
  1696. if (mon_pdev->mcopy_mode == M_COPY) {
  1697. if ((mon_pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
  1698. (mon_pdev->m_copy_id.tx_peer_id == peer_id)) {
  1699. return QDF_STATUS_E_INVAL;
  1700. }
  1701. }
  1702. if (!first_msdu)
  1703. return QDF_STATUS_E_INVAL;
  1704. mon_pdev->m_copy_id.tx_ppdu_id = ppdu_id;
  1705. mon_pdev->m_copy_id.tx_peer_id = peer_id;
  1706. }
  1707. return QDF_STATUS_SUCCESS;
  1708. }
  1709. #endif
  1710. #ifdef WDI_EVENT_ENABLE
  1711. #ifndef REMOVE_PKT_LOG
  1712. static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1713. {
  1714. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1715. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  1716. if (!pdev || !pdev->monitor_pdev)
  1717. return NULL;
  1718. return pdev->monitor_pdev->pl_dev;
  1719. }
  1720. #else
  1721. static void *dp_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
  1722. {
  1723. return NULL;
  1724. }
  1725. #endif
  1726. #endif
  1727. QDF_STATUS dp_rx_populate_cbf_hdr(struct dp_soc *soc,
  1728. uint32_t mac_id,
  1729. uint32_t event,
  1730. qdf_nbuf_t mpdu,
  1731. uint32_t msdu_timestamp)
  1732. {
  1733. uint32_t data_size, hdr_size, ppdu_id, align4byte;
  1734. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1735. uint32_t *msg_word;
  1736. if (!pdev)
  1737. return QDF_STATUS_E_INVAL;
  1738. ppdu_id = pdev->monitor_pdev->ppdu_info.com_info.ppdu_id;
  1739. hdr_size = HTT_T2H_PPDU_STATS_IND_HDR_SIZE
  1740. + qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload);
  1741. data_size = qdf_nbuf_len(mpdu);
  1742. qdf_nbuf_push_head(mpdu, hdr_size);
  1743. msg_word = (uint32_t *)qdf_nbuf_data(mpdu);
  1744. /*
  1745. * Populate the PPDU Stats Indication header
  1746. */
  1747. HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_T2H_MSG_TYPE_PPDU_STATS_IND);
  1748. HTT_T2H_PPDU_STATS_MAC_ID_SET(*msg_word, mac_id);
  1749. HTT_T2H_PPDU_STATS_PDEV_ID_SET(*msg_word, pdev->pdev_id);
  1750. align4byte = ((data_size +
  1751. qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload)
  1752. + 3) >> 2) << 2;
  1753. HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_SET(*msg_word, align4byte);
  1754. msg_word++;
  1755. HTT_T2H_PPDU_STATS_PPDU_ID_SET(*msg_word, ppdu_id);
  1756. msg_word++;
  1757. *msg_word = msdu_timestamp;
  1758. msg_word++;
  1759. /* Skip reserved field */
  1760. msg_word++;
  1761. /*
  1762. * Populate MGMT_CTRL Payload TLV first
  1763. */
  1764. HTT_STATS_TLV_TAG_SET(*msg_word,
  1765. HTT_PPDU_STATS_RX_MGMTCTRL_PAYLOAD_TLV);
  1766. align4byte = ((data_size - sizeof(htt_tlv_hdr_t) +
  1767. qdf_offsetof(htt_ppdu_stats_rx_mgmtctrl_payload_tlv, payload)
  1768. + 3) >> 2) << 2;
  1769. HTT_STATS_TLV_LENGTH_SET(*msg_word, align4byte);
  1770. msg_word++;
  1771. HTT_PPDU_STATS_RX_MGMTCTRL_TLV_FRAME_LENGTH_SET(
  1772. *msg_word, data_size);
  1773. msg_word++;
  1774. dp_wdi_event_handler(event, soc, (void *)mpdu,
  1775. HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id);
  1776. qdf_nbuf_pull_head(mpdu, hdr_size);
  1777. return QDF_STATUS_SUCCESS;
  1778. }
  1779. #ifdef ATH_SUPPORT_EXT_STAT
  1780. #ifdef WLAN_TELEMETRY_STATS_SUPPORT
  1781. /* dp_peer_update_telemetry_stats- update peer telemetry stats
  1782. * @peer : Datapath peer
  1783. */
  1784. static inline
  1785. void dp_peer_update_telemetry_stats(struct dp_peer *peer)
  1786. {
  1787. struct dp_pdev *pdev;
  1788. struct dp_vdev *vdev;
  1789. struct dp_mon_peer *mon_peer = NULL;
  1790. uint8_t idx;
  1791. vdev = peer->vdev;
  1792. if (!vdev)
  1793. return;
  1794. pdev = vdev->pdev;
  1795. if (!pdev)
  1796. return;
  1797. mon_peer = peer->monitor_peer;
  1798. if (qdf_likely(mon_peer)) {
  1799. DP_STATS_INC(pdev, telemetry_stats.tx_mpdu_failed,
  1800. mon_peer->stats.tx.retries);
  1801. DP_STATS_INC(pdev, telemetry_stats.tx_mpdu_total,
  1802. mon_peer->stats.tx.tx_mpdus_tried);
  1803. idx = mon_peer->stats.airtime_consumption.avg_consumption.idx;
  1804. mon_peer->stats.airtime_consumption.avg_consumption.avg_consumption_per_sec[idx] =
  1805. mon_peer->stats.airtime_consumption.consumption;
  1806. mon_peer->stats.airtime_consumption.consumption = 0;
  1807. mon_peer->stats.airtime_consumption.avg_consumption.idx++;
  1808. if (mon_peer->stats.airtime_consumption.avg_consumption.idx ==
  1809. MAX_CONSUMPTION_TIME)
  1810. mon_peer->stats.airtime_consumption.avg_consumption.idx = 0;
  1811. }
  1812. }
  1813. #else
  1814. static inline
  1815. void dp_peer_update_telemetry_stats(struct dp_peer *peer)
  1816. { }
  1817. #endif
  1818. /*dp_peer_cal_clients_stats_update - update peer stats on cal client timer
  1819. * @soc : Datapath SOC
  1820. * @peer : Datapath peer
  1821. * @arg : argument to iter function
  1822. */
  1823. #ifdef IPA_OFFLOAD
  1824. static void
  1825. dp_peer_cal_clients_stats_update(struct dp_soc *soc,
  1826. struct dp_peer *peer,
  1827. void *arg)
  1828. {
  1829. struct cdp_calibr_stats_intf peer_stats_intf = {0};
  1830. struct dp_peer *tgt_peer = NULL;
  1831. struct dp_txrx_peer *txrx_peer = NULL;
  1832. dp_peer_update_telemetry_stats(peer);
  1833. if (!dp_peer_is_primary_link_peer(peer))
  1834. return;
  1835. tgt_peer = dp_get_tgt_peer_from_peer(peer);
  1836. if (!tgt_peer || !(tgt_peer->txrx_peer))
  1837. return;
  1838. txrx_peer = tgt_peer->txrx_peer;
  1839. peer_stats_intf.to_stack = txrx_peer->to_stack;
  1840. peer_stats_intf.tx_success =
  1841. peer->monitor_peer->stats.tx.tx_ucast_success;
  1842. peer_stats_intf.tx_ucast =
  1843. peer->monitor_peer->stats.tx.tx_ucast_total;
  1844. dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf,
  1845. &tgt_peer->stats);
  1846. dp_peer_get_rxtid_stats_ipa(peer, dp_peer_update_tid_stats_from_reo);
  1847. }
  1848. #else
  1849. static void
  1850. dp_peer_cal_clients_stats_update(struct dp_soc *soc,
  1851. struct dp_peer *peer,
  1852. void *arg)
  1853. {
  1854. struct cdp_calibr_stats_intf peer_stats_intf = {0};
  1855. struct dp_peer *tgt_peer = NULL;
  1856. struct dp_txrx_peer *txrx_peer = NULL;
  1857. dp_peer_update_telemetry_stats(peer);
  1858. if (!dp_peer_is_primary_link_peer(peer))
  1859. return;
  1860. tgt_peer = dp_get_tgt_peer_from_peer(peer);
  1861. if (!tgt_peer || !(tgt_peer->txrx_peer))
  1862. return;
  1863. txrx_peer = tgt_peer->txrx_peer;
  1864. peer_stats_intf.to_stack = txrx_peer->to_stack;
  1865. peer_stats_intf.tx_success =
  1866. txrx_peer->stats.per_pkt_stats.tx.tx_success;
  1867. peer_stats_intf.tx_ucast =
  1868. txrx_peer->stats.per_pkt_stats.tx.ucast;
  1869. dp_cal_client_update_peer_stats_wifi3(&peer_stats_intf,
  1870. &tgt_peer->stats);
  1871. }
  1872. #endif
  1873. /*dp_iterate_update_peer_list - update peer stats on cal client timer
  1874. * @pdev_hdl: pdev handle
  1875. */
  1876. static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
  1877. {
  1878. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  1879. dp_pdev_iterate_peer(pdev, dp_peer_cal_clients_stats_update, NULL,
  1880. DP_MOD_ID_CDP);
  1881. }
  1882. #else
  1883. static void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
  1884. {
  1885. }
  1886. #endif
  1887. #ifdef ATH_SUPPORT_NAC
  1888. int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
  1889. bool val)
  1890. {
  1891. /* Enable/Disable smart mesh filtering. This flag will be checked
  1892. * during rx processing to check if packets are from NAC clients.
  1893. */
  1894. pdev->monitor_pdev->filter_neighbour_peers = val;
  1895. return 0;
  1896. }
  1897. #endif /* ATH_SUPPORT_NAC */
  1898. #ifdef WLAN_ATF_ENABLE
  1899. void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
  1900. {
  1901. if (!pdev) {
  1902. dp_cdp_err("Invalid pdev");
  1903. return;
  1904. }
  1905. pdev->monitor_pdev->dp_atf_stats_enable = value;
  1906. }
  1907. #endif
  1908. #ifdef QCA_ENHANCED_STATS_SUPPORT
  1909. /*
  1910. * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
  1911. * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
  1912. * @pdev: DP PDEV handle
  1913. * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
  1914. * @length: tlv_length
  1915. *
  1916. * return:QDF_STATUS_SUCCESS if nbuf has to be freed in caller
  1917. */
  1918. QDF_STATUS
  1919. dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
  1920. qdf_nbuf_t tag_buf,
  1921. uint32_t ppdu_id)
  1922. {
  1923. uint32_t *nbuf_ptr;
  1924. uint8_t trim_size;
  1925. size_t head_size;
  1926. struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info;
  1927. uint32_t *msg_word;
  1928. uint32_t tsf_hdr;
  1929. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  1930. if ((!mon_pdev->tx_sniffer_enable) && (!mon_pdev->mcopy_mode) &&
  1931. (!mon_pdev->bpr_enable) && (!mon_pdev->tx_capture_enabled))
  1932. return QDF_STATUS_SUCCESS;
  1933. /*
  1934. * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t
  1935. */
  1936. msg_word = (uint32_t *)qdf_nbuf_data(tag_buf);
  1937. msg_word = msg_word + 2;
  1938. tsf_hdr = *msg_word;
  1939. trim_size = ((mon_pdev->mgmtctrl_frm_info.mgmt_buf +
  1940. HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
  1941. qdf_nbuf_data(tag_buf));
  1942. if (!qdf_nbuf_pull_head(tag_buf, trim_size))
  1943. return QDF_STATUS_SUCCESS;
  1944. qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
  1945. mon_pdev->mgmtctrl_frm_info.mgmt_buf_len);
  1946. if (mon_pdev->tx_capture_enabled) {
  1947. head_size = sizeof(struct cdp_tx_mgmt_comp_info);
  1948. if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) {
  1949. qdf_err("Fail to get headroom h_sz %zu h_avail %d\n",
  1950. head_size, qdf_nbuf_headroom(tag_buf));
  1951. qdf_assert_always(0);
  1952. return QDF_STATUS_E_NOMEM;
  1953. }
  1954. ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *)
  1955. qdf_nbuf_push_head(tag_buf, head_size);
  1956. qdf_assert_always(ptr_mgmt_comp_info);
  1957. ptr_mgmt_comp_info->ppdu_id = ppdu_id;
  1958. ptr_mgmt_comp_info->is_sgen_pkt = true;
  1959. ptr_mgmt_comp_info->tx_tsf = tsf_hdr;
  1960. } else {
  1961. head_size = sizeof(ppdu_id);
  1962. nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size);
  1963. *nbuf_ptr = ppdu_id;
  1964. }
  1965. if (mon_pdev->bpr_enable) {
  1966. dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
  1967. tag_buf, HTT_INVALID_PEER,
  1968. WDI_NO_VAL, pdev->pdev_id);
  1969. }
  1970. dp_deliver_mgmt_frm(pdev, tag_buf);
  1971. return QDF_STATUS_E_ALREADY;
  1972. }
  1973. /*
  1974. * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv
  1975. * bitmap for sniffer mode
  1976. * @bitmap: received bitmap
  1977. *
  1978. * Return: expected bitmap value, returns zero if doesn't match with
  1979. * either 64-bit Tx window or 256-bit window tlv bitmap
  1980. */
  1981. int
  1982. dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
  1983. {
  1984. if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
  1985. return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
  1986. else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
  1987. return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
  1988. return 0;
  1989. }
  1990. /*
  1991. * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats.
  1992. * @peer: Datapath peer handle
  1993. * @ppdu: User PPDU Descriptor
  1994. * @cur_ppdu_id: PPDU_ID
  1995. *
  1996. * Return: None
  1997. *
  1998. * on Tx data frame, we may get delayed ba set
  1999. * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we
  2000. * request Block Ack Request(BAR). Successful msdu is received only after Block
  2001. * Ack. To populate peer stats we need successful msdu(data frame).
  2002. * So we hold the Tx data stats on delayed_ba for stats update.
  2003. */
  2004. static void
  2005. dp_peer_copy_delay_stats(struct dp_peer *peer,
  2006. struct cdp_tx_completion_ppdu_user *ppdu,
  2007. uint32_t cur_ppdu_id)
  2008. {
  2009. struct dp_pdev *pdev;
  2010. struct dp_vdev *vdev;
  2011. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  2012. if (mon_peer->last_delayed_ba) {
  2013. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2014. "BA not yet recv for prev delayed ppdu[%d] - cur ppdu[%d]",
  2015. mon_peer->last_delayed_ba_ppduid, cur_ppdu_id);
  2016. vdev = peer->vdev;
  2017. if (vdev) {
  2018. pdev = vdev->pdev;
  2019. pdev->stats.cdp_delayed_ba_not_recev++;
  2020. }
  2021. }
  2022. mon_peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size;
  2023. mon_peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc;
  2024. mon_peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re;
  2025. mon_peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf;
  2026. mon_peer->delayed_ba_ppdu_stats.bw = ppdu->bw;
  2027. mon_peer->delayed_ba_ppdu_stats.nss = ppdu->nss;
  2028. mon_peer->delayed_ba_ppdu_stats.gi = ppdu->gi;
  2029. mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
  2030. mon_peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc;
  2031. mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
  2032. mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast =
  2033. ppdu->mpdu_tried_ucast;
  2034. mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast =
  2035. ppdu->mpdu_tried_mcast;
  2036. mon_peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl;
  2037. mon_peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl;
  2038. mon_peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
  2039. mon_peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start;
  2040. mon_peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones;
  2041. mon_peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast;
  2042. mon_peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos;
  2043. mon_peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id;
  2044. mon_peer->last_delayed_ba = true;
  2045. ppdu->debug_copied = true;
  2046. }
  2047. /*
  2048. * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats.
  2049. * @peer: Datapath peer handle
  2050. * @ppdu: PPDU Descriptor
  2051. *
  2052. * Return: None
  2053. *
  2054. * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info
  2055. * from Tx BAR frame not required to populate peer stats.
  2056. * But we need successful MPDU and MSDU to update previous
  2057. * transmitted Tx data frame. Overwrite ppdu stats with the previous
  2058. * stored ppdu stats.
  2059. */
  2060. static void
  2061. dp_peer_copy_stats_to_bar(struct dp_peer *peer,
  2062. struct cdp_tx_completion_ppdu_user *ppdu)
  2063. {
  2064. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  2065. ppdu->ltf_size = mon_peer->delayed_ba_ppdu_stats.ltf_size;
  2066. ppdu->stbc = mon_peer->delayed_ba_ppdu_stats.stbc;
  2067. ppdu->he_re = mon_peer->delayed_ba_ppdu_stats.he_re;
  2068. ppdu->txbf = mon_peer->delayed_ba_ppdu_stats.txbf;
  2069. ppdu->bw = mon_peer->delayed_ba_ppdu_stats.bw;
  2070. ppdu->nss = mon_peer->delayed_ba_ppdu_stats.nss;
  2071. ppdu->gi = mon_peer->delayed_ba_ppdu_stats.gi;
  2072. ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
  2073. ppdu->ldpc = mon_peer->delayed_ba_ppdu_stats.ldpc;
  2074. ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
  2075. ppdu->mpdu_tried_ucast =
  2076. mon_peer->delayed_ba_ppdu_stats.mpdu_tried_ucast;
  2077. ppdu->mpdu_tried_mcast =
  2078. mon_peer->delayed_ba_ppdu_stats.mpdu_tried_mcast;
  2079. ppdu->frame_ctrl = mon_peer->delayed_ba_ppdu_stats.frame_ctrl;
  2080. ppdu->qos_ctrl = mon_peer->delayed_ba_ppdu_stats.qos_ctrl;
  2081. ppdu->dcm = mon_peer->delayed_ba_ppdu_stats.dcm;
  2082. ppdu->ru_start = mon_peer->delayed_ba_ppdu_stats.ru_start;
  2083. ppdu->ru_tones = mon_peer->delayed_ba_ppdu_stats.ru_tones;
  2084. ppdu->is_mcast = mon_peer->delayed_ba_ppdu_stats.is_mcast;
  2085. ppdu->user_pos = mon_peer->delayed_ba_ppdu_stats.user_pos;
  2086. ppdu->mu_group_id = mon_peer->delayed_ba_ppdu_stats.mu_group_id;
  2087. mon_peer->last_delayed_ba = false;
  2088. ppdu->debug_copied = true;
  2089. }
  2090. /*
  2091. * dp_tx_rate_stats_update() - Update rate per-peer statistics
  2092. * @peer: Datapath peer handle
  2093. * @ppdu: PPDU Descriptor
  2094. *
  2095. * Return: None
  2096. */
  2097. static void
  2098. dp_tx_rate_stats_update(struct dp_peer *peer,
  2099. struct cdp_tx_completion_ppdu_user *ppdu)
  2100. {
  2101. uint32_t ratekbps = 0;
  2102. uint64_t ppdu_tx_rate = 0;
  2103. uint32_t rix;
  2104. uint16_t ratecode = 0;
  2105. struct dp_mon_peer *mon_peer = NULL;
  2106. if (!peer || !ppdu)
  2107. return;
  2108. if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK)
  2109. return;
  2110. mon_peer = peer->monitor_peer;
  2111. if (!mon_peer)
  2112. return;
  2113. ratekbps = dp_getrateindex(ppdu->gi,
  2114. ppdu->mcs,
  2115. ppdu->nss,
  2116. ppdu->preamble,
  2117. ppdu->bw,
  2118. ppdu->punc_mode,
  2119. &rix,
  2120. &ratecode);
  2121. if (!ratekbps)
  2122. return;
  2123. /* Calculate goodput in non-training period
  2124. * In training period, don't do anything as
  2125. * pending pkt is send as goodput.
  2126. */
  2127. if ((!peer->bss_peer) && (!ppdu->sa_is_training)) {
  2128. ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) *
  2129. (CDP_PERCENT_MACRO - ppdu->current_rate_per));
  2130. }
  2131. ppdu->rix = rix;
  2132. ppdu->tx_ratekbps = ratekbps;
  2133. ppdu->tx_ratecode = ratecode;
  2134. DP_STATS_UPD(mon_peer, tx.tx_rate, ratekbps);
  2135. mon_peer->stats.tx.avg_tx_rate =
  2136. dp_ath_rate_lpf(mon_peer->stats.tx.avg_tx_rate, ratekbps);
  2137. ppdu_tx_rate = dp_ath_rate_out(mon_peer->stats.tx.avg_tx_rate);
  2138. DP_STATS_UPD(mon_peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
  2139. mon_peer->stats.tx.bw_info = ppdu->bw;
  2140. mon_peer->stats.tx.gi_info = ppdu->gi;
  2141. mon_peer->stats.tx.nss_info = ppdu->nss;
  2142. mon_peer->stats.tx.mcs_info = ppdu->mcs;
  2143. mon_peer->stats.tx.preamble_info = ppdu->preamble;
  2144. if (peer->vdev) {
  2145. /*
  2146. * In STA mode:
  2147. * We get ucast stats as BSS peer stats.
  2148. *
  2149. * In AP mode:
  2150. * We get mcast stats as BSS peer stats.
  2151. * We get ucast stats as assoc peer stats.
  2152. */
  2153. if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) {
  2154. peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
  2155. peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
  2156. } else {
  2157. peer->vdev->stats.tx.last_tx_rate = ratekbps;
  2158. peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
  2159. }
  2160. }
  2161. }
  2162. #if defined(FEATURE_PERPKT_INFO) && defined(WDI_EVENT_ENABLE)
  2163. void dp_send_stats_event(struct dp_pdev *pdev, struct dp_peer *peer,
  2164. uint16_t peer_id)
  2165. {
  2166. struct cdp_interface_peer_stats peer_stats_intf = {0};
  2167. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  2168. struct dp_txrx_peer *txrx_peer = NULL;
  2169. if (qdf_unlikely(!mon_peer))
  2170. return;
  2171. mon_peer->stats.rx.rx_snr_measured_time = qdf_system_ticks();
  2172. peer_stats_intf.rx_avg_snr = mon_peer->stats.rx.avg_snr;
  2173. txrx_peer = dp_get_txrx_peer(peer);
  2174. if (qdf_likely(txrx_peer)) {
  2175. peer_stats_intf.rx_byte_count = txrx_peer->to_stack.bytes;
  2176. peer_stats_intf.tx_byte_count =
  2177. txrx_peer->stats.per_pkt_stats.tx.tx_success.bytes;
  2178. }
  2179. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
  2180. &peer_stats_intf, peer_id,
  2181. UPDATE_PEER_STATS, pdev->pdev_id);
  2182. }
  2183. #endif
  2184. #ifdef WLAN_FEATURE_11BE
  2185. /*
  2186. * dp_get_ru_index_frm_ru_tones() - get ru index
  2187. * @ru_tones: ru tones
  2188. *
  2189. * Return: ru index
  2190. */
  2191. static inline enum cdp_ru_index dp_get_ru_index_frm_ru_tones(uint16_t ru_tones)
  2192. {
  2193. enum cdp_ru_index ru_index;
  2194. switch (ru_tones) {
  2195. case RU_26:
  2196. ru_index = RU_26_INDEX;
  2197. break;
  2198. case RU_52:
  2199. ru_index = RU_52_INDEX;
  2200. break;
  2201. case RU_52_26:
  2202. ru_index = RU_52_26_INDEX;
  2203. break;
  2204. case RU_106:
  2205. ru_index = RU_106_INDEX;
  2206. break;
  2207. case RU_106_26:
  2208. ru_index = RU_106_26_INDEX;
  2209. break;
  2210. case RU_242:
  2211. ru_index = RU_242_INDEX;
  2212. break;
  2213. case RU_484:
  2214. ru_index = RU_484_INDEX;
  2215. break;
  2216. case RU_484_242:
  2217. ru_index = RU_484_242_INDEX;
  2218. break;
  2219. case RU_996:
  2220. ru_index = RU_996_INDEX;
  2221. break;
  2222. case RU_996_484:
  2223. ru_index = RU_996_484_INDEX;
  2224. break;
  2225. case RU_996_484_242:
  2226. ru_index = RU_996_484_242_INDEX;
  2227. break;
  2228. case RU_2X996:
  2229. ru_index = RU_2X996_INDEX;
  2230. break;
  2231. case RU_2X996_484:
  2232. ru_index = RU_2X996_484_INDEX;
  2233. break;
  2234. case RU_3X996:
  2235. ru_index = RU_3X996_INDEX;
  2236. break;
  2237. case RU_3X996_484:
  2238. ru_index = RU_2X996_484_INDEX;
  2239. break;
  2240. case RU_4X996:
  2241. ru_index = RU_4X996_INDEX;
  2242. break;
  2243. default:
  2244. ru_index = RU_INDEX_MAX;
  2245. break;
  2246. }
  2247. return ru_index;
  2248. }
  2249. /*
  2250. * dp_mon_get_ru_width_from_ru_size() - get ru_width from ru_size enum
  2251. * @ru_size: HTT ru_size enum
  2252. *
  2253. * Return: ru_width of uint32_t type
  2254. */
  2255. static uint32_t dp_mon_get_ru_width_from_ru_size(uint16_t ru_size)
  2256. {
  2257. uint32_t width = 0;
  2258. switch (ru_size) {
  2259. case HTT_PPDU_STATS_RU_26:
  2260. width = RU_26;
  2261. break;
  2262. case HTT_PPDU_STATS_RU_52:
  2263. width = RU_52;
  2264. break;
  2265. case HTT_PPDU_STATS_RU_52_26:
  2266. width = RU_52_26;
  2267. break;
  2268. case HTT_PPDU_STATS_RU_106:
  2269. width = RU_106;
  2270. break;
  2271. case HTT_PPDU_STATS_RU_106_26:
  2272. width = RU_106_26;
  2273. break;
  2274. case HTT_PPDU_STATS_RU_242:
  2275. width = RU_242;
  2276. break;
  2277. case HTT_PPDU_STATS_RU_484:
  2278. width = RU_484;
  2279. break;
  2280. case HTT_PPDU_STATS_RU_484_242:
  2281. width = RU_484_242;
  2282. break;
  2283. case HTT_PPDU_STATS_RU_996:
  2284. width = RU_996;
  2285. break;
  2286. case HTT_PPDU_STATS_RU_996_484:
  2287. width = RU_996_484;
  2288. break;
  2289. case HTT_PPDU_STATS_RU_996_484_242:
  2290. width = RU_996_484_242;
  2291. break;
  2292. case HTT_PPDU_STATS_RU_996x2:
  2293. width = RU_2X996;
  2294. break;
  2295. case HTT_PPDU_STATS_RU_996x2_484:
  2296. width = RU_2X996_484;
  2297. break;
  2298. case HTT_PPDU_STATS_RU_996x3:
  2299. width = RU_3X996;
  2300. break;
  2301. case HTT_PPDU_STATS_RU_996x3_484:
  2302. width = RU_3X996_484;
  2303. break;
  2304. case HTT_PPDU_STATS_RU_996x4:
  2305. width = RU_4X996;
  2306. break;
  2307. default:
  2308. dp_mon_debug("Unsupported ru_size: %d rcvd", ru_size);
  2309. }
  2310. return width;
  2311. }
  2312. #else
  2313. static inline enum cdp_ru_index dp_get_ru_index_frm_ru_tones(uint16_t ru_tones)
  2314. {
  2315. enum cdp_ru_index ru_index;
  2316. switch (ru_tones) {
  2317. case RU_26:
  2318. ru_index = RU_26_INDEX;
  2319. break;
  2320. case RU_52:
  2321. ru_index = RU_52_INDEX;
  2322. break;
  2323. case RU_106:
  2324. ru_index = RU_106_INDEX;
  2325. break;
  2326. case RU_242:
  2327. ru_index = RU_242_INDEX;
  2328. break;
  2329. case RU_484:
  2330. ru_index = RU_484_INDEX;
  2331. break;
  2332. case RU_996:
  2333. ru_index = RU_996_INDEX;
  2334. break;
  2335. default:
  2336. ru_index = RU_INDEX_MAX;
  2337. break;
  2338. }
  2339. return ru_index;
  2340. }
  2341. static uint32_t dp_mon_get_ru_width_from_ru_size(uint16_t ru_size)
  2342. {
  2343. uint32_t width = 0;
  2344. switch (ru_size) {
  2345. case HTT_PPDU_STATS_RU_26:
  2346. width = RU_26;
  2347. break;
  2348. case HTT_PPDU_STATS_RU_52:
  2349. width = RU_52;
  2350. break;
  2351. case HTT_PPDU_STATS_RU_106:
  2352. width = RU_106;
  2353. break;
  2354. case HTT_PPDU_STATS_RU_242:
  2355. width = RU_242;
  2356. break;
  2357. case HTT_PPDU_STATS_RU_484:
  2358. width = RU_484;
  2359. break;
  2360. case HTT_PPDU_STATS_RU_996:
  2361. width = RU_996;
  2362. break;
  2363. default:
  2364. dp_mon_debug("Unsupported ru_size: %d rcvd", ru_size);
  2365. }
  2366. return width;
  2367. }
  2368. #endif
  2369. /*
  2370. * dp_tx_stats_update() - Update per-peer statistics
  2371. * @pdev: Datapath pdev handle
  2372. * @peer: Datapath peer handle
  2373. * @ppdu: PPDU Descriptor
  2374. * @ack_rssi: RSSI of last ack received
  2375. *
  2376. * Return: None
  2377. */
  2378. static void
  2379. dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
  2380. struct cdp_tx_completion_ppdu_user *ppdu,
  2381. uint32_t ack_rssi)
  2382. {
  2383. uint8_t preamble, mcs;
  2384. uint16_t num_msdu;
  2385. uint16_t num_mpdu;
  2386. uint16_t mpdu_tried;
  2387. uint16_t mpdu_failed;
  2388. struct dp_mon_ops *mon_ops;
  2389. enum cdp_ru_index ru_index;
  2390. struct dp_mon_peer *mon_peer = NULL;
  2391. uint32_t ratekbps = 0;
  2392. uint64_t tx_byte_count;
  2393. preamble = ppdu->preamble;
  2394. mcs = ppdu->mcs;
  2395. num_msdu = ppdu->num_msdu;
  2396. num_mpdu = ppdu->mpdu_success;
  2397. mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
  2398. mpdu_failed = mpdu_tried - num_mpdu;
  2399. tx_byte_count = ppdu->success_bytes;
  2400. /* If the peer statistics are already processed as part of
  2401. * per-MSDU completion handler, do not process these again in per-PPDU
  2402. * indications
  2403. */
  2404. if (pdev->soc->process_tx_status)
  2405. return;
  2406. mon_peer = peer->monitor_peer;
  2407. if (!mon_peer)
  2408. return;
  2409. if (!ppdu->is_mcast) {
  2410. DP_STATS_INC(mon_peer, tx.tx_ucast_total.num, num_msdu);
  2411. DP_STATS_INC(mon_peer, tx.tx_ucast_total.bytes,
  2412. tx_byte_count);
  2413. }
  2414. if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
  2415. /*
  2416. * All failed mpdu will be retried, so incrementing
  2417. * retries mpdu based on mpdu failed. Even for
  2418. * ack failure i.e for long retries we get
  2419. * mpdu failed equal mpdu tried.
  2420. */
  2421. DP_STATS_INC(mon_peer, tx.retries, mpdu_failed);
  2422. return;
  2423. }
  2424. if (ppdu->is_ppdu_cookie_valid)
  2425. DP_STATS_INC(mon_peer, tx.num_ppdu_cookie_valid, 1);
  2426. if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
  2427. ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
  2428. if (qdf_unlikely(ppdu->mu_group_id &&
  2429. !(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
  2430. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2431. "mu_group_id out of bound!!\n");
  2432. else
  2433. DP_STATS_UPD(mon_peer, tx.mu_group_id[ppdu->mu_group_id],
  2434. (ppdu->user_pos + 1));
  2435. }
  2436. if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
  2437. ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
  2438. DP_STATS_UPD(mon_peer, tx.ru_tones, ppdu->ru_tones);
  2439. DP_STATS_UPD(mon_peer, tx.ru_start, ppdu->ru_start);
  2440. ru_index = dp_get_ru_index_frm_ru_tones(ppdu->ru_tones);
  2441. if (ru_index != RU_INDEX_MAX) {
  2442. DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].num_msdu,
  2443. num_msdu);
  2444. DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].num_mpdu,
  2445. num_mpdu);
  2446. DP_STATS_INC(mon_peer, tx.ru_loc[ru_index].mpdu_tried,
  2447. mpdu_tried);
  2448. }
  2449. }
  2450. /*
  2451. * All failed mpdu will be retried, so incrementing
  2452. * retries mpdu based on mpdu failed. Even for
  2453. * ack failure i.e for long retries we get
  2454. * mpdu failed equal mpdu tried.
  2455. */
  2456. DP_STATS_INC(mon_peer, tx.retries, mpdu_failed);
  2457. DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].num_msdu,
  2458. num_msdu);
  2459. DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu,
  2460. num_mpdu);
  2461. DP_STATS_INC(mon_peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried,
  2462. mpdu_tried);
  2463. DP_STATS_INC(mon_peer, tx.sgi_count[ppdu->gi], num_msdu);
  2464. DP_STATS_INC(mon_peer, tx.bw[ppdu->bw], num_msdu);
  2465. DP_STATS_INC(mon_peer, tx.nss[ppdu->nss], num_msdu);
  2466. if (ppdu->tid < CDP_DATA_TID_MAX)
  2467. DP_STATS_INC(mon_peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
  2468. num_msdu);
  2469. DP_STATS_INCC(mon_peer, tx.stbc, num_msdu, ppdu->stbc);
  2470. DP_STATS_INCC(mon_peer, tx.ldpc, num_msdu, ppdu->ldpc);
  2471. if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
  2472. DP_STATS_UPD(mon_peer, tx.last_ack_rssi, ack_rssi);
  2473. if (!ppdu->is_mcast) {
  2474. DP_STATS_INC(mon_peer, tx.tx_ucast_success.num, num_msdu);
  2475. DP_STATS_INC(mon_peer, tx.tx_ucast_success.bytes,
  2476. tx_byte_count);
  2477. }
  2478. DP_STATS_INCC(mon_peer,
  2479. tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  2480. ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
  2481. DP_STATS_INCC(mon_peer,
  2482. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  2483. ((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
  2484. DP_STATS_INCC(mon_peer,
  2485. tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  2486. ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
  2487. DP_STATS_INCC(mon_peer,
  2488. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  2489. ((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
  2490. DP_STATS_INCC(mon_peer,
  2491. tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  2492. ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
  2493. DP_STATS_INCC(mon_peer,
  2494. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  2495. ((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
  2496. DP_STATS_INCC(mon_peer,
  2497. tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  2498. ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
  2499. DP_STATS_INCC(mon_peer,
  2500. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  2501. ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
  2502. DP_STATS_INCC(mon_peer,
  2503. tx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  2504. ((mcs >= MAX_MCS_11AX) && (preamble == DOT11_AX)));
  2505. DP_STATS_INCC(mon_peer,
  2506. tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  2507. ((mcs < MAX_MCS_11AX) && (preamble == DOT11_AX)));
  2508. DP_STATS_INCC(mon_peer, tx.ampdu_cnt, num_mpdu, ppdu->is_ampdu);
  2509. DP_STATS_INCC(mon_peer, tx.non_ampdu_cnt, num_mpdu, !(ppdu->is_ampdu));
  2510. DP_STATS_INCC(mon_peer, tx.pream_punct_cnt, 1, ppdu->pream_punct);
  2511. DP_STATS_INC(mon_peer, tx.tx_ppdus, 1);
  2512. DP_STATS_INC(mon_peer, tx.tx_mpdus_success, num_mpdu);
  2513. DP_STATS_INC(mon_peer, tx.tx_mpdus_tried, mpdu_tried);
  2514. mon_ops = dp_mon_ops_get(pdev->soc);
  2515. if (mon_ops && mon_ops->mon_tx_stats_update)
  2516. mon_ops->mon_tx_stats_update(mon_peer, ppdu);
  2517. dp_tx_rate_stats_update(peer, ppdu);
  2518. dp_peer_stats_notify(pdev, peer);
  2519. ratekbps = mon_peer->stats.tx.tx_rate;
  2520. DP_STATS_UPD(mon_peer, tx.last_tx_rate, ratekbps);
  2521. dp_send_stats_event(pdev, peer, ppdu->peer_id);
  2522. }
  2523. /*
  2524. * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
  2525. * if a new peer id arrives in a PPDU
  2526. * pdev: DP pdev handle
  2527. * @peer_id : peer unique identifier
  2528. * @ppdu_info: per ppdu tlv structure
  2529. *
  2530. * return:user index to be populated
  2531. */
  2532. static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
  2533. uint16_t peer_id,
  2534. struct ppdu_info *ppdu_info)
  2535. {
  2536. uint8_t user_index = 0;
  2537. struct cdp_tx_completion_ppdu *ppdu_desc;
  2538. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2539. ppdu_desc =
  2540. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2541. while ((user_index + 1) <= ppdu_info->last_user) {
  2542. ppdu_user_desc = &ppdu_desc->user[user_index];
  2543. if (ppdu_user_desc->peer_id != peer_id) {
  2544. user_index++;
  2545. continue;
  2546. } else {
  2547. /* Max users possible is 8 so user array index should
  2548. * not exceed 7
  2549. */
  2550. qdf_assert_always(user_index <= (ppdu_desc->max_users - 1));
  2551. return user_index;
  2552. }
  2553. }
  2554. ppdu_info->last_user++;
  2555. /* Max users possible is 8 so last user should not exceed 8 */
  2556. qdf_assert_always(ppdu_info->last_user <= ppdu_desc->max_users);
  2557. return ppdu_info->last_user - 1;
  2558. }
  2559. /*
  2560. * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
  2561. * pdev: DP pdev handle
  2562. * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
  2563. * @ppdu_info: per ppdu tlv structure
  2564. *
  2565. * return:void
  2566. */
  2567. static void
  2568. dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
  2569. uint32_t *tag_buf,
  2570. struct ppdu_info *ppdu_info)
  2571. {
  2572. uint16_t frame_type;
  2573. uint16_t frame_ctrl;
  2574. uint16_t freq;
  2575. struct dp_soc *soc = NULL;
  2576. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  2577. uint64_t ppdu_start_timestamp;
  2578. uint32_t *start_tag_buf;
  2579. start_tag_buf = tag_buf;
  2580. ppdu_desc =
  2581. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2582. ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
  2583. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID);
  2584. ppdu_info->sched_cmdid =
  2585. HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
  2586. ppdu_desc->num_users =
  2587. HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
  2588. qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
  2589. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE);
  2590. frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
  2591. ppdu_desc->htt_frame_type = frame_type;
  2592. frame_ctrl = ppdu_desc->frame_ctrl;
  2593. ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id;
  2594. switch (frame_type) {
  2595. case HTT_STATS_FTYPE_TIDQ_DATA_SU:
  2596. case HTT_STATS_FTYPE_TIDQ_DATA_MU:
  2597. case HTT_STATS_FTYPE_SGEN_QOS_NULL:
  2598. /*
  2599. * for management packet, frame type come as DATA_SU
  2600. * need to check frame_ctrl before setting frame_type
  2601. */
  2602. if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL)
  2603. ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
  2604. else
  2605. ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
  2606. break;
  2607. case HTT_STATS_FTYPE_SGEN_MU_BAR:
  2608. case HTT_STATS_FTYPE_SGEN_BAR:
  2609. case HTT_STATS_FTYPE_SGEN_BE_MU_BAR:
  2610. ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
  2611. break;
  2612. default:
  2613. ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
  2614. break;
  2615. }
  2616. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US);
  2617. ppdu_desc->tx_duration = *tag_buf;
  2618. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
  2619. ppdu_desc->ppdu_start_timestamp = *tag_buf;
  2620. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE);
  2621. freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
  2622. if (freq != ppdu_desc->channel) {
  2623. soc = pdev->soc;
  2624. ppdu_desc->channel = freq;
  2625. pdev->operating_channel.freq = freq;
  2626. if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
  2627. pdev->operating_channel.num =
  2628. soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc,
  2629. pdev->pdev_id,
  2630. freq);
  2631. if (soc && soc->cdp_soc.ol_ops->freq_to_band)
  2632. pdev->operating_channel.band =
  2633. soc->cdp_soc.ol_ops->freq_to_band(soc->ctrl_psoc,
  2634. pdev->pdev_id,
  2635. freq);
  2636. }
  2637. ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
  2638. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM);
  2639. ppdu_desc->phy_ppdu_tx_time_us =
  2640. HTT_PPDU_STATS_COMMON_TLV_PHY_PPDU_TX_TIME_US_GET(*tag_buf);
  2641. ppdu_desc->beam_change =
  2642. HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf);
  2643. ppdu_desc->doppler =
  2644. HTT_PPDU_STATS_COMMON_TLV_DOPPLER_INDICATION_GET(*tag_buf);
  2645. ppdu_desc->spatial_reuse =
  2646. HTT_PPDU_STATS_COMMON_TLV_SPATIAL_REUSE_GET(*tag_buf);
  2647. dp_tx_capture_htt_frame_counter(pdev, frame_type);
  2648. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US);
  2649. ppdu_start_timestamp = *tag_buf;
  2650. ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp <<
  2651. HTT_SHIFT_UPPER_TIMESTAMP) &
  2652. HTT_MASK_UPPER_TIMESTAMP);
  2653. ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
  2654. ppdu_desc->tx_duration;
  2655. /* Ack time stamp is same as end time stamp*/
  2656. ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
  2657. ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
  2658. ppdu_desc->tx_duration;
  2659. ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp;
  2660. ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp;
  2661. ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration;
  2662. /* Ack time stamp is same as end time stamp*/
  2663. ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
  2664. tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(BSSCOLOR_OBSS_PSR);
  2665. ppdu_desc->bss_color =
  2666. HTT_PPDU_STATS_COMMON_TLV_BSS_COLOR_ID_GET(*tag_buf);
  2667. }
  2668. /*
  2669. * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
  2670. * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
  2671. * @ppdu_info: per ppdu tlv structure
  2672. *
  2673. * return:void
  2674. */
  2675. static void dp_process_ppdu_stats_user_common_tlv(
  2676. struct dp_pdev *pdev, uint32_t *tag_buf,
  2677. struct ppdu_info *ppdu_info)
  2678. {
  2679. uint16_t peer_id;
  2680. struct cdp_tx_completion_ppdu *ppdu_desc;
  2681. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2682. uint8_t curr_user_index = 0;
  2683. struct dp_peer *peer;
  2684. struct dp_vdev *vdev;
  2685. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2686. ppdu_desc =
  2687. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2688. tag_buf++;
  2689. peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
  2690. curr_user_index =
  2691. dp_get_ppdu_info_user_index(pdev,
  2692. peer_id, ppdu_info);
  2693. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2694. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2695. ppdu_desc->vdev_id =
  2696. HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
  2697. ppdu_user_desc->peer_id = peer_id;
  2698. tag_buf++;
  2699. if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
  2700. ppdu_user_desc->delayed_ba = 1;
  2701. ppdu_desc->delayed_ba = 1;
  2702. }
  2703. if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
  2704. ppdu_user_desc->is_mcast = true;
  2705. ppdu_user_desc->mpdu_tried_mcast =
  2706. HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
  2707. ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
  2708. } else {
  2709. ppdu_user_desc->mpdu_tried_ucast =
  2710. HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
  2711. }
  2712. ppdu_user_desc->is_seq_num_valid =
  2713. HTT_PPDU_STATS_USER_COMMON_TLV_IS_SQNUM_VALID_IN_BUFFER_GET(*tag_buf);
  2714. tag_buf++;
  2715. ppdu_user_desc->qos_ctrl =
  2716. HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
  2717. ppdu_user_desc->frame_ctrl =
  2718. HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
  2719. ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
  2720. if (ppdu_user_desc->delayed_ba)
  2721. ppdu_user_desc->mpdu_success = 0;
  2722. tag_buf += 3;
  2723. if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
  2724. ppdu_user_desc->ppdu_cookie =
  2725. HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
  2726. ppdu_user_desc->is_ppdu_cookie_valid = 1;
  2727. }
  2728. /* returning earlier causes other feilds unpopulated */
  2729. if (peer_id == DP_SCAN_PEER_ID) {
  2730. vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
  2731. DP_MOD_ID_TX_PPDU_STATS);
  2732. if (!vdev)
  2733. return;
  2734. qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
  2735. QDF_MAC_ADDR_SIZE);
  2736. dp_vdev_unref_delete(pdev->soc, vdev, DP_MOD_ID_TX_PPDU_STATS);
  2737. } else {
  2738. peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
  2739. DP_MOD_ID_TX_PPDU_STATS);
  2740. if (!peer) {
  2741. /*
  2742. * fw sends peer_id which is about to removed but
  2743. * it was already removed in host.
  2744. * eg: for disassoc, fw send ppdu stats
  2745. * with peer id equal to previously associated
  2746. * peer's peer_id but it was removed
  2747. */
  2748. vdev = dp_vdev_get_ref_by_id(pdev->soc,
  2749. ppdu_desc->vdev_id,
  2750. DP_MOD_ID_TX_PPDU_STATS);
  2751. if (!vdev)
  2752. return;
  2753. qdf_mem_copy(ppdu_user_desc->mac_addr,
  2754. vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE);
  2755. dp_vdev_unref_delete(pdev->soc, vdev,
  2756. DP_MOD_ID_TX_PPDU_STATS);
  2757. return;
  2758. }
  2759. qdf_mem_copy(ppdu_user_desc->mac_addr,
  2760. peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
  2761. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  2762. }
  2763. }
  2764. /**
  2765. * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
  2766. * @pdev: DP pdev handle
  2767. * @tag_buf: T2H message buffer carrying the user rate TLV
  2768. * @ppdu_info: per ppdu tlv structure
  2769. *
  2770. * return:void
  2771. */
  2772. static void
  2773. dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
  2774. uint32_t *tag_buf,
  2775. struct ppdu_info *ppdu_info)
  2776. {
  2777. uint16_t peer_id;
  2778. struct cdp_tx_completion_ppdu *ppdu_desc;
  2779. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2780. uint8_t curr_user_index = 0;
  2781. struct dp_vdev *vdev;
  2782. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2783. uint8_t bw, ru_format;
  2784. uint16_t ru_size;
  2785. ppdu_desc =
  2786. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2787. tag_buf++;
  2788. peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
  2789. curr_user_index =
  2790. dp_get_ppdu_info_user_index(pdev,
  2791. peer_id, ppdu_info);
  2792. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2793. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2794. if (peer_id == DP_SCAN_PEER_ID) {
  2795. vdev = dp_vdev_get_ref_by_id(pdev->soc, ppdu_desc->vdev_id,
  2796. DP_MOD_ID_TX_PPDU_STATS);
  2797. if (!vdev)
  2798. return;
  2799. dp_vdev_unref_delete(pdev->soc, vdev,
  2800. DP_MOD_ID_TX_PPDU_STATS);
  2801. }
  2802. ppdu_user_desc->peer_id = peer_id;
  2803. ppdu_user_desc->tid =
  2804. HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
  2805. tag_buf += 1;
  2806. ppdu_user_desc->user_pos =
  2807. HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
  2808. ppdu_user_desc->mu_group_id =
  2809. HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
  2810. ru_format = HTT_PPDU_STATS_USER_RATE_TLV_RU_FORMAT_GET(*tag_buf);
  2811. tag_buf += 1;
  2812. if (!ru_format) {
  2813. /* ru_format = 0: ru_end, ru_start */
  2814. ppdu_user_desc->ru_start =
  2815. HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
  2816. ppdu_user_desc->ru_tones =
  2817. (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
  2818. HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
  2819. } else if (ru_format == 1) {
  2820. /* ru_format = 1: ru_index, ru_size */
  2821. ru_size = HTT_PPDU_STATS_USER_RATE_TLV_RU_SIZE_GET(*tag_buf);
  2822. ppdu_user_desc->ru_tones =
  2823. dp_mon_get_ru_width_from_ru_size(ru_size);
  2824. } else {
  2825. dp_mon_debug("Unsupported ru_format: %d rcvd", ru_format);
  2826. }
  2827. ppdu_desc->usr_ru_tones_sum += ppdu_user_desc->ru_tones;
  2828. tag_buf += 2;
  2829. ppdu_user_desc->ppdu_type =
  2830. HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
  2831. tag_buf++;
  2832. ppdu_user_desc->tx_rate = *tag_buf;
  2833. ppdu_user_desc->ltf_size =
  2834. HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
  2835. ppdu_user_desc->stbc =
  2836. HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
  2837. ppdu_user_desc->he_re =
  2838. HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
  2839. ppdu_user_desc->txbf =
  2840. HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
  2841. bw = HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf);
  2842. /* Align bw value as per host data structures */
  2843. if (bw == HTT_PPDU_STATS_BANDWIDTH_320MHZ)
  2844. ppdu_user_desc->bw = bw - 3;
  2845. else
  2846. ppdu_user_desc->bw = bw - 2;
  2847. ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
  2848. ppdu_desc->usr_nss_sum += ppdu_user_desc->nss;
  2849. ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
  2850. ppdu_user_desc->preamble =
  2851. HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
  2852. ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
  2853. ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
  2854. ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
  2855. tag_buf += 2;
  2856. ppdu_user_desc->punc_pattern_bitmap =
  2857. HTT_PPDU_STATS_USER_RATE_TLV_PUNC_PATTERN_BITMAP_GET(*tag_buf);
  2858. }
  2859. /*
  2860. * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
  2861. * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
  2862. * pdev: DP PDEV handle
  2863. * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
  2864. * @ppdu_info: per ppdu tlv structure
  2865. *
  2866. * return:void
  2867. */
  2868. static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
  2869. struct dp_pdev *pdev, uint32_t *tag_buf,
  2870. struct ppdu_info *ppdu_info)
  2871. {
  2872. htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
  2873. (htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
  2874. struct cdp_tx_completion_ppdu *ppdu_desc;
  2875. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2876. uint8_t curr_user_index = 0;
  2877. uint16_t peer_id;
  2878. uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
  2879. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2880. ppdu_desc =
  2881. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2882. tag_buf++;
  2883. peer_id =
  2884. HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
  2885. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  2886. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2887. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2888. ppdu_user_desc->peer_id = peer_id;
  2889. ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
  2890. qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
  2891. sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
  2892. dp_process_ppdu_stats_update_failed_bitmap(pdev,
  2893. (void *)ppdu_user_desc,
  2894. ppdu_info->ppdu_id,
  2895. size);
  2896. }
  2897. /*
  2898. * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
  2899. * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
  2900. * soc: DP SOC handle
  2901. * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
  2902. * @ppdu_info: per ppdu tlv structure
  2903. *
  2904. * return:void
  2905. */
  2906. static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
  2907. struct dp_pdev *pdev, uint32_t *tag_buf,
  2908. struct ppdu_info *ppdu_info)
  2909. {
  2910. htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
  2911. (htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
  2912. struct cdp_tx_completion_ppdu *ppdu_desc;
  2913. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2914. uint8_t curr_user_index = 0;
  2915. uint16_t peer_id;
  2916. uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
  2917. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2918. ppdu_desc =
  2919. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2920. tag_buf++;
  2921. peer_id =
  2922. HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
  2923. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  2924. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2925. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2926. ppdu_user_desc->peer_id = peer_id;
  2927. ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
  2928. qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
  2929. sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
  2930. dp_process_ppdu_stats_update_failed_bitmap(pdev,
  2931. (void *)ppdu_user_desc,
  2932. ppdu_info->ppdu_id,
  2933. size);
  2934. }
  2935. /*
  2936. * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
  2937. * htt_ppdu_stats_user_cmpltn_common_tlv
  2938. * soc: DP SOC handle
  2939. * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
  2940. * @ppdu_info: per ppdu tlv structure
  2941. *
  2942. * return:void
  2943. */
  2944. static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
  2945. struct dp_pdev *pdev, uint32_t *tag_buf,
  2946. struct ppdu_info *ppdu_info)
  2947. {
  2948. uint16_t peer_id;
  2949. struct cdp_tx_completion_ppdu *ppdu_desc;
  2950. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  2951. uint8_t curr_user_index = 0;
  2952. uint8_t bw_iter;
  2953. htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
  2954. (htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
  2955. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  2956. ppdu_desc =
  2957. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  2958. tag_buf++;
  2959. peer_id =
  2960. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
  2961. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  2962. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  2963. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  2964. ppdu_user_desc->peer_id = peer_id;
  2965. ppdu_user_desc->completion_status =
  2966. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
  2967. *tag_buf);
  2968. ppdu_user_desc->tid =
  2969. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
  2970. tag_buf++;
  2971. if (qdf_likely(ppdu_user_desc->completion_status ==
  2972. HTT_PPDU_STATS_USER_STATUS_OK)) {
  2973. ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
  2974. ppdu_user_desc->usr_ack_rssi = dp_stats_buf->ack_rssi;
  2975. ppdu_user_desc->ack_rssi_valid = 1;
  2976. } else {
  2977. ppdu_user_desc->ack_rssi_valid = 0;
  2978. }
  2979. tag_buf++;
  2980. ppdu_user_desc->mpdu_success =
  2981. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
  2982. ppdu_user_desc->mpdu_failed =
  2983. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) -
  2984. ppdu_user_desc->mpdu_success;
  2985. tag_buf++;
  2986. ppdu_user_desc->long_retries =
  2987. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
  2988. ppdu_user_desc->short_retries =
  2989. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
  2990. ppdu_user_desc->retry_mpdus =
  2991. ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
  2992. ppdu_user_desc->is_ampdu =
  2993. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
  2994. ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
  2995. ppdu_desc->resp_type =
  2996. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf);
  2997. ppdu_desc->mprot_type =
  2998. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf);
  2999. ppdu_desc->rts_success =
  3000. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf);
  3001. ppdu_desc->rts_failure =
  3002. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf);
  3003. ppdu_user_desc->pream_punct =
  3004. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PREAM_PUNC_TX_GET(*tag_buf);
  3005. ppdu_info->compltn_common_tlv++;
  3006. /*
  3007. * MU BAR may send request to n users but we may received ack only from
  3008. * m users. To have count of number of users respond back, we have a
  3009. * separate counter bar_num_users per PPDU that get increment for every
  3010. * htt_ppdu_stats_user_cmpltn_common_tlv
  3011. */
  3012. ppdu_desc->bar_num_users++;
  3013. tag_buf++;
  3014. for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) {
  3015. ppdu_user_desc->rssi_chain[bw_iter] =
  3016. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf);
  3017. tag_buf++;
  3018. }
  3019. ppdu_user_desc->sa_tx_antenna =
  3020. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf);
  3021. tag_buf++;
  3022. ppdu_user_desc->sa_is_training =
  3023. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf);
  3024. if (ppdu_user_desc->sa_is_training) {
  3025. ppdu_user_desc->sa_goodput =
  3026. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf);
  3027. }
  3028. tag_buf++;
  3029. for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) {
  3030. ppdu_user_desc->sa_max_rates[bw_iter] =
  3031. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]);
  3032. }
  3033. tag_buf += CDP_NUM_SA_BW;
  3034. ppdu_user_desc->current_rate_per =
  3035. HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf);
  3036. }
  3037. /*
  3038. * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
  3039. * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
  3040. * pdev: DP PDEV handle
  3041. * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
  3042. * @ppdu_info: per ppdu tlv structure
  3043. *
  3044. * return:void
  3045. */
  3046. static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
  3047. struct dp_pdev *pdev, uint32_t *tag_buf,
  3048. struct ppdu_info *ppdu_info)
  3049. {
  3050. htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
  3051. (htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
  3052. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  3053. struct cdp_tx_completion_ppdu *ppdu_desc;
  3054. uint8_t curr_user_index = 0;
  3055. uint16_t peer_id;
  3056. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  3057. ppdu_desc =
  3058. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  3059. tag_buf++;
  3060. peer_id =
  3061. HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
  3062. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  3063. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  3064. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  3065. ppdu_user_desc->peer_id = peer_id;
  3066. ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
  3067. qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
  3068. sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
  3069. ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32;
  3070. }
  3071. /*
  3072. * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
  3073. * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
  3074. * pdev: DP PDEV handle
  3075. * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
  3076. * @ppdu_info: per ppdu tlv structure
  3077. *
  3078. * return:void
  3079. */
  3080. static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
  3081. struct dp_pdev *pdev, uint32_t *tag_buf,
  3082. struct ppdu_info *ppdu_info)
  3083. {
  3084. htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
  3085. (htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
  3086. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  3087. struct cdp_tx_completion_ppdu *ppdu_desc;
  3088. uint8_t curr_user_index = 0;
  3089. uint16_t peer_id;
  3090. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  3091. ppdu_desc =
  3092. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  3093. tag_buf++;
  3094. peer_id =
  3095. HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
  3096. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  3097. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  3098. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  3099. ppdu_user_desc->peer_id = peer_id;
  3100. ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
  3101. qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
  3102. sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
  3103. ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32;
  3104. }
  3105. /*
  3106. * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
  3107. * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
  3108. * pdev: DP PDE handle
  3109. * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
  3110. * @ppdu_info: per ppdu tlv structure
  3111. *
  3112. * return:void
  3113. */
  3114. static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
  3115. struct dp_pdev *pdev, uint32_t *tag_buf,
  3116. struct ppdu_info *ppdu_info)
  3117. {
  3118. uint16_t peer_id;
  3119. struct cdp_tx_completion_ppdu *ppdu_desc;
  3120. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  3121. uint8_t curr_user_index = 0;
  3122. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  3123. ppdu_desc =
  3124. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  3125. tag_buf += 2;
  3126. peer_id =
  3127. HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
  3128. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  3129. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  3130. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  3131. if (!ppdu_user_desc->ack_ba_tlv) {
  3132. ppdu_user_desc->ack_ba_tlv = 1;
  3133. } else {
  3134. pdev->stats.ack_ba_comes_twice++;
  3135. return;
  3136. }
  3137. ppdu_user_desc->peer_id = peer_id;
  3138. tag_buf++;
  3139. /* not to update ppdu_desc->tid from this TLV */
  3140. ppdu_user_desc->num_mpdu =
  3141. HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
  3142. ppdu_user_desc->num_msdu =
  3143. HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
  3144. ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
  3145. tag_buf++;
  3146. ppdu_user_desc->start_seq =
  3147. HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(
  3148. *tag_buf);
  3149. tag_buf++;
  3150. ppdu_user_desc->success_bytes = *tag_buf;
  3151. /* increase ack ba tlv counter on successful mpdu */
  3152. if (ppdu_user_desc->num_mpdu)
  3153. ppdu_info->ack_ba_tlv++;
  3154. if (ppdu_user_desc->ba_size == 0) {
  3155. ppdu_user_desc->ba_seq_no = ppdu_user_desc->start_seq;
  3156. ppdu_user_desc->ba_bitmap[0] = 1;
  3157. ppdu_user_desc->ba_size = 1;
  3158. }
  3159. }
  3160. /*
  3161. * dp_process_ppdu_stats_user_common_array_tlv: Process
  3162. * htt_ppdu_stats_user_common_array_tlv
  3163. * pdev: DP PDEV handle
  3164. * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
  3165. * @ppdu_info: per ppdu tlv structure
  3166. *
  3167. * return:void
  3168. */
  3169. static void dp_process_ppdu_stats_user_common_array_tlv(
  3170. struct dp_pdev *pdev, uint32_t *tag_buf,
  3171. struct ppdu_info *ppdu_info)
  3172. {
  3173. uint32_t peer_id;
  3174. struct cdp_tx_completion_ppdu *ppdu_desc;
  3175. struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
  3176. uint8_t curr_user_index = 0;
  3177. struct htt_tx_ppdu_stats_info *dp_stats_buf;
  3178. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  3179. ppdu_desc =
  3180. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  3181. tag_buf++;
  3182. dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
  3183. tag_buf += 3;
  3184. peer_id =
  3185. HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
  3186. if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
  3187. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  3188. "Invalid peer");
  3189. return;
  3190. }
  3191. curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
  3192. ppdu_user_desc = &ppdu_desc->user[curr_user_index];
  3193. ppdu_user_desc->tlv_bitmap |= (1 << tlv_type);
  3194. ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
  3195. ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
  3196. tag_buf++;
  3197. ppdu_user_desc->success_msdus =
  3198. HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
  3199. ppdu_user_desc->retry_msdus =
  3200. HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
  3201. tag_buf++;
  3202. ppdu_user_desc->failed_msdus =
  3203. HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
  3204. }
  3205. /*
  3206. * dp_process_ppdu_stats_flush_tlv: Process
  3207. * htt_ppdu_stats_flush_tlv
  3208. * @pdev: DP PDEV handle
  3209. * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
  3210. * @ppdu_info: per ppdu tlv structure
  3211. *
  3212. * return:void
  3213. */
  3214. static void
  3215. dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
  3216. uint32_t *tag_buf,
  3217. struct ppdu_info *ppdu_info)
  3218. {
  3219. struct cdp_tx_completion_ppdu *ppdu_desc;
  3220. uint32_t peer_id;
  3221. uint8_t tid;
  3222. struct dp_peer *peer;
  3223. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3224. struct dp_mon_peer *mon_peer = NULL;
  3225. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  3226. qdf_nbuf_data(ppdu_info->nbuf);
  3227. ppdu_desc->is_flush = 1;
  3228. tag_buf++;
  3229. ppdu_desc->drop_reason = *tag_buf;
  3230. tag_buf++;
  3231. ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
  3232. ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf);
  3233. ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf);
  3234. tag_buf++;
  3235. peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
  3236. tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
  3237. ppdu_desc->num_users = 1;
  3238. ppdu_desc->user[0].peer_id = peer_id;
  3239. ppdu_desc->user[0].tid = tid;
  3240. ppdu_desc->queue_type =
  3241. HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf);
  3242. peer = dp_peer_get_ref_by_id(pdev->soc, peer_id,
  3243. DP_MOD_ID_TX_PPDU_STATS);
  3244. if (!peer)
  3245. goto add_ppdu_to_sched_list;
  3246. if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
  3247. mon_peer = peer->monitor_peer;
  3248. DP_STATS_INC(mon_peer,
  3249. tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
  3250. ppdu_desc->num_msdu);
  3251. }
  3252. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  3253. add_ppdu_to_sched_list:
  3254. ppdu_info->done = 1;
  3255. TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
  3256. mon_pdev->list_depth--;
  3257. TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info,
  3258. ppdu_info_list_elem);
  3259. mon_pdev->sched_comp_list_depth++;
  3260. }
  3261. /**
  3262. * dp_process_ppdu_stats_sch_cmd_status_tlv: Process schedule command status tlv
  3263. * Here we are not going to process the buffer.
  3264. * @pdev: DP PDEV handle
  3265. * @ppdu_info: per ppdu tlv structure
  3266. *
  3267. * return:void
  3268. */
  3269. static void
  3270. dp_process_ppdu_stats_sch_cmd_status_tlv(struct dp_pdev *pdev,
  3271. struct ppdu_info *ppdu_info)
  3272. {
  3273. struct cdp_tx_completion_ppdu *ppdu_desc;
  3274. struct dp_peer *peer;
  3275. uint8_t num_users;
  3276. uint8_t i;
  3277. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3278. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  3279. qdf_nbuf_data(ppdu_info->nbuf);
  3280. num_users = ppdu_desc->bar_num_users;
  3281. for (i = 0; i < num_users; i++) {
  3282. if (ppdu_desc->user[i].user_pos == 0) {
  3283. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
  3284. /* update phy mode for bar frame */
  3285. ppdu_desc->phy_mode =
  3286. ppdu_desc->user[i].preamble;
  3287. ppdu_desc->user[0].mcs = ppdu_desc->user[i].mcs;
  3288. break;
  3289. }
  3290. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) {
  3291. ppdu_desc->frame_ctrl =
  3292. ppdu_desc->user[i].frame_ctrl;
  3293. break;
  3294. }
  3295. }
  3296. }
  3297. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
  3298. ppdu_desc->delayed_ba) {
  3299. qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
  3300. for (i = 0; i < ppdu_desc->num_users; i++) {
  3301. struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
  3302. uint64_t start_tsf;
  3303. uint64_t end_tsf;
  3304. uint32_t ppdu_id;
  3305. struct dp_mon_peer *mon_peer;
  3306. ppdu_id = ppdu_desc->ppdu_id;
  3307. peer = dp_peer_get_ref_by_id
  3308. (pdev->soc, ppdu_desc->user[i].peer_id,
  3309. DP_MOD_ID_TX_PPDU_STATS);
  3310. /**
  3311. * This check is to make sure peer is not deleted
  3312. * after processing the TLVs.
  3313. */
  3314. if (!peer || !peer->monitor_peer)
  3315. continue;
  3316. mon_peer = peer->monitor_peer;
  3317. delay_ppdu = &mon_peer->delayed_ba_ppdu_stats;
  3318. start_tsf = ppdu_desc->ppdu_start_timestamp;
  3319. end_tsf = ppdu_desc->ppdu_end_timestamp;
  3320. /**
  3321. * save delayed ba user info
  3322. */
  3323. if (ppdu_desc->user[i].delayed_ba) {
  3324. dp_peer_copy_delay_stats(peer,
  3325. &ppdu_desc->user[i],
  3326. ppdu_id);
  3327. mon_peer->last_delayed_ba_ppduid = ppdu_id;
  3328. delay_ppdu->ppdu_start_timestamp = start_tsf;
  3329. delay_ppdu->ppdu_end_timestamp = end_tsf;
  3330. }
  3331. ppdu_desc->user[i].peer_last_delayed_ba =
  3332. mon_peer->last_delayed_ba;
  3333. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  3334. if (ppdu_desc->user[i].delayed_ba &&
  3335. !ppdu_desc->user[i].debug_copied) {
  3336. QDF_TRACE(QDF_MODULE_ID_TXRX,
  3337. QDF_TRACE_LEVEL_INFO_MED,
  3338. "%s: %d ppdu_id[%d] bar_ppdu_id[%d] num_users[%d] usr[%d] htt_frame_type[%d]\n",
  3339. __func__, __LINE__,
  3340. ppdu_desc->ppdu_id,
  3341. ppdu_desc->bar_ppdu_id,
  3342. ppdu_desc->num_users,
  3343. i,
  3344. ppdu_desc->htt_frame_type);
  3345. }
  3346. }
  3347. }
  3348. /*
  3349. * when frame type is BAR and STATS_COMMON_TLV is set
  3350. * copy the store peer delayed info to BAR status
  3351. */
  3352. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
  3353. for (i = 0; i < ppdu_desc->bar_num_users; i++) {
  3354. struct cdp_delayed_tx_completion_ppdu_user *delay_ppdu;
  3355. uint64_t start_tsf;
  3356. uint64_t end_tsf;
  3357. struct dp_mon_peer *mon_peer;
  3358. peer = dp_peer_get_ref_by_id
  3359. (pdev->soc,
  3360. ppdu_desc->user[i].peer_id,
  3361. DP_MOD_ID_TX_PPDU_STATS);
  3362. /**
  3363. * This check is to make sure peer is not deleted
  3364. * after processing the TLVs.
  3365. */
  3366. if (!peer || !peer->monitor_peer)
  3367. continue;
  3368. mon_peer = peer->monitor_peer;
  3369. if (ppdu_desc->user[i].completion_status !=
  3370. HTT_PPDU_STATS_USER_STATUS_OK) {
  3371. dp_peer_unref_delete(peer,
  3372. DP_MOD_ID_TX_PPDU_STATS);
  3373. continue;
  3374. }
  3375. delay_ppdu = &mon_peer->delayed_ba_ppdu_stats;
  3376. start_tsf = delay_ppdu->ppdu_start_timestamp;
  3377. end_tsf = delay_ppdu->ppdu_end_timestamp;
  3378. if (mon_peer->last_delayed_ba) {
  3379. dp_peer_copy_stats_to_bar(peer,
  3380. &ppdu_desc->user[i]);
  3381. ppdu_desc->ppdu_id =
  3382. mon_peer->last_delayed_ba_ppduid;
  3383. ppdu_desc->ppdu_start_timestamp = start_tsf;
  3384. ppdu_desc->ppdu_end_timestamp = end_tsf;
  3385. }
  3386. ppdu_desc->user[i].peer_last_delayed_ba =
  3387. mon_peer->last_delayed_ba;
  3388. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  3389. }
  3390. }
  3391. TAILQ_REMOVE(&mon_pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
  3392. mon_pdev->list_depth--;
  3393. TAILQ_INSERT_TAIL(&mon_pdev->sched_comp_ppdu_list, ppdu_info,
  3394. ppdu_info_list_elem);
  3395. mon_pdev->sched_comp_list_depth++;
  3396. }
  3397. /**
  3398. * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU
  3399. *
  3400. * If the TLV length sent as part of PPDU TLV is less that expected size i.e
  3401. * size of corresponding data structure, pad the remaining bytes with zeros
  3402. * and continue processing the TLVs
  3403. *
  3404. * @pdev: DP pdev handle
  3405. * @tag_buf: TLV buffer
  3406. * @tlv_expected_size: Expected size of Tag
  3407. * @tlv_len: TLV length received from FW
  3408. *
  3409. * Return: Pointer to updated TLV
  3410. */
  3411. static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev,
  3412. uint32_t *tag_buf,
  3413. uint16_t tlv_expected_size,
  3414. uint16_t tlv_len)
  3415. {
  3416. uint32_t *tlv_desc = tag_buf;
  3417. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3418. qdf_assert_always(tlv_len != 0);
  3419. if (tlv_len < tlv_expected_size) {
  3420. qdf_mem_zero(mon_pdev->ppdu_tlv_buf, tlv_expected_size);
  3421. qdf_mem_copy(mon_pdev->ppdu_tlv_buf, tag_buf, tlv_len);
  3422. tlv_desc = mon_pdev->ppdu_tlv_buf;
  3423. }
  3424. return tlv_desc;
  3425. }
  3426. /**
  3427. * dp_process_ppdu_tag(): Function to process the PPDU TLVs
  3428. * @pdev: DP pdev handle
  3429. * @tag_buf: TLV buffer
  3430. * @tlv_len: length of tlv
  3431. * @ppdu_info: per ppdu tlv structure
  3432. *
  3433. * return: void
  3434. */
  3435. static void dp_process_ppdu_tag(struct dp_pdev *pdev,
  3436. uint32_t *tag_buf,
  3437. uint32_t tlv_len,
  3438. struct ppdu_info *ppdu_info)
  3439. {
  3440. uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
  3441. uint16_t tlv_expected_size;
  3442. uint32_t *tlv_desc;
  3443. switch (tlv_type) {
  3444. case HTT_PPDU_STATS_COMMON_TLV:
  3445. tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv);
  3446. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3447. tlv_expected_size, tlv_len);
  3448. dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info);
  3449. break;
  3450. case HTT_PPDU_STATS_USR_COMMON_TLV:
  3451. tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv);
  3452. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3453. tlv_expected_size, tlv_len);
  3454. dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc,
  3455. ppdu_info);
  3456. break;
  3457. case HTT_PPDU_STATS_USR_RATE_TLV:
  3458. tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv);
  3459. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3460. tlv_expected_size, tlv_len);
  3461. dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc,
  3462. ppdu_info);
  3463. break;
  3464. case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
  3465. tlv_expected_size =
  3466. sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv);
  3467. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3468. tlv_expected_size, tlv_len);
  3469. dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
  3470. pdev, tlv_desc, ppdu_info);
  3471. break;
  3472. case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
  3473. tlv_expected_size =
  3474. sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv);
  3475. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3476. tlv_expected_size, tlv_len);
  3477. dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
  3478. pdev, tlv_desc, ppdu_info);
  3479. break;
  3480. case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
  3481. tlv_expected_size =
  3482. sizeof(htt_ppdu_stats_user_cmpltn_common_tlv);
  3483. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3484. tlv_expected_size, tlv_len);
  3485. dp_process_ppdu_stats_user_cmpltn_common_tlv(
  3486. pdev, tlv_desc, ppdu_info);
  3487. break;
  3488. case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
  3489. tlv_expected_size =
  3490. sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv);
  3491. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3492. tlv_expected_size, tlv_len);
  3493. dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
  3494. pdev, tlv_desc, ppdu_info);
  3495. break;
  3496. case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
  3497. tlv_expected_size =
  3498. sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv);
  3499. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3500. tlv_expected_size, tlv_len);
  3501. dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
  3502. pdev, tlv_desc, ppdu_info);
  3503. break;
  3504. case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
  3505. tlv_expected_size =
  3506. sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv);
  3507. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3508. tlv_expected_size, tlv_len);
  3509. dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
  3510. pdev, tlv_desc, ppdu_info);
  3511. break;
  3512. case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
  3513. tlv_expected_size =
  3514. sizeof(htt_ppdu_stats_usr_common_array_tlv_v);
  3515. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3516. tlv_expected_size, tlv_len);
  3517. dp_process_ppdu_stats_user_common_array_tlv(
  3518. pdev, tlv_desc, ppdu_info);
  3519. break;
  3520. case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
  3521. tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv);
  3522. tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
  3523. tlv_expected_size, tlv_len);
  3524. dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc,
  3525. ppdu_info);
  3526. break;
  3527. case HTT_PPDU_STATS_SCH_CMD_STATUS_TLV:
  3528. dp_process_ppdu_stats_sch_cmd_status_tlv(pdev, ppdu_info);
  3529. break;
  3530. default:
  3531. break;
  3532. }
  3533. }
  3534. #ifdef WLAN_TELEMETRY_STATS_SUPPORT
  3535. static inline
  3536. void dp_ppdu_desc_user_airtime_consumption_update(
  3537. struct dp_peer *peer,
  3538. struct cdp_tx_completion_ppdu_user *user)
  3539. {
  3540. struct dp_mon_peer *mon_peer = NULL;
  3541. mon_peer = peer->monitor_peer;
  3542. if (qdf_unlikely(!mon_peer))
  3543. return;
  3544. DP_STATS_INC(mon_peer, airtime_consumption.consumption,
  3545. user->phy_tx_time_us);
  3546. }
  3547. #else
  3548. static inline
  3549. void dp_ppdu_desc_user_airtime_consumption_update(
  3550. struct dp_peer *peer,
  3551. struct cdp_tx_completion_ppdu_user *user)
  3552. { }
  3553. #endif
  3554. #if defined(WLAN_ATF_ENABLE) || defined(WLAN_TELEMETRY_STATS_SUPPORT)
  3555. static void
  3556. dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
  3557. struct dp_peer *peer,
  3558. struct cdp_tx_completion_ppdu *ppdu_desc,
  3559. struct cdp_tx_completion_ppdu_user *user)
  3560. {
  3561. uint32_t nss_ru_width_sum = 0;
  3562. struct dp_mon_peer *mon_peer = NULL;
  3563. if (!pdev || !ppdu_desc || !user || !peer)
  3564. return;
  3565. if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_DATA)
  3566. return;
  3567. mon_peer = peer->monitor_peer;
  3568. if (qdf_unlikely(!mon_peer))
  3569. return;
  3570. nss_ru_width_sum = ppdu_desc->usr_nss_sum * ppdu_desc->usr_ru_tones_sum;
  3571. if (!nss_ru_width_sum)
  3572. nss_ru_width_sum = 1;
  3573. /**
  3574. * For SU-MIMO PPDU phy Tx time is same for the single user.
  3575. * For MU-MIMO phy Tx time is calculated per user as below
  3576. * user phy tx time =
  3577. * Entire PPDU duration * MU Ratio * OFDMA Ratio
  3578. * MU Ratio = usr_nss / Sum_of_nss_of_all_users
  3579. * OFDMA_ratio = usr_ru_width / Sum_of_ru_width_of_all_users
  3580. * usr_ru_widt = ru_end – ru_start + 1
  3581. */
  3582. if (ppdu_desc->htt_frame_type == HTT_STATS_FTYPE_TIDQ_DATA_SU) {
  3583. user->phy_tx_time_us = ppdu_desc->phy_ppdu_tx_time_us;
  3584. } else {
  3585. user->phy_tx_time_us = (ppdu_desc->phy_ppdu_tx_time_us *
  3586. user->nss * user->ru_tones) / nss_ru_width_sum;
  3587. }
  3588. dp_ppdu_desc_user_airtime_consumption_update(peer, user);
  3589. }
  3590. #else
  3591. static void
  3592. dp_ppdu_desc_user_phy_tx_time_update(struct dp_pdev *pdev,
  3593. struct dp_peer *peer,
  3594. struct cdp_tx_completion_ppdu *ppdu_desc,
  3595. struct cdp_tx_completion_ppdu_user *user)
  3596. {
  3597. }
  3598. #endif
  3599. /**
  3600. * dp_ppdu_desc_user_stats_update(): Function to update TX user stats
  3601. * @pdev: DP pdev handle
  3602. * @ppdu_info: per PPDU TLV descriptor
  3603. *
  3604. * return: void
  3605. */
  3606. void
  3607. dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
  3608. struct ppdu_info *ppdu_info)
  3609. {
  3610. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  3611. struct dp_peer *peer = NULL;
  3612. uint32_t tlv_bitmap_expected;
  3613. uint32_t tlv_bitmap_default;
  3614. uint16_t i;
  3615. uint32_t num_users;
  3616. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3617. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  3618. qdf_nbuf_data(ppdu_info->nbuf);
  3619. if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_BAR)
  3620. ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
  3621. tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
  3622. if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode ||
  3623. mon_pdev->tx_capture_enabled) {
  3624. if (ppdu_info->is_ampdu)
  3625. tlv_bitmap_expected =
  3626. dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
  3627. ppdu_info->tlv_bitmap);
  3628. }
  3629. tlv_bitmap_default = tlv_bitmap_expected;
  3630. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
  3631. num_users = ppdu_desc->bar_num_users;
  3632. ppdu_desc->num_users = ppdu_desc->bar_num_users;
  3633. } else {
  3634. num_users = ppdu_desc->num_users;
  3635. }
  3636. qdf_assert_always(ppdu_desc->num_users <= ppdu_desc->max_users);
  3637. for (i = 0; i < num_users; i++) {
  3638. ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
  3639. ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
  3640. peer = dp_peer_get_ref_by_id(pdev->soc,
  3641. ppdu_desc->user[i].peer_id,
  3642. DP_MOD_ID_TX_PPDU_STATS);
  3643. /**
  3644. * This check is to make sure peer is not deleted
  3645. * after processing the TLVs.
  3646. */
  3647. if (!peer)
  3648. continue;
  3649. ppdu_desc->user[i].is_bss_peer = peer->bss_peer;
  3650. /*
  3651. * different frame like DATA, BAR or CTRL has different
  3652. * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we
  3653. * receive other tlv in-order/sequential from fw.
  3654. * Since ACK_BA_STATUS TLV come from Hardware it is
  3655. * asynchronous So we need to depend on some tlv to confirm
  3656. * all tlv is received for a ppdu.
  3657. * So we depend on both SCHED_CMD_STATUS_TLV and
  3658. * ACK_BA_STATUS_TLV. for failure packet we won't get
  3659. * ACK_BA_STATUS_TLV.
  3660. */
  3661. if (!(ppdu_info->tlv_bitmap &
  3662. (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)) ||
  3663. (!(ppdu_info->tlv_bitmap &
  3664. (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) &&
  3665. (ppdu_desc->user[i].completion_status ==
  3666. HTT_PPDU_STATS_USER_STATUS_OK))) {
  3667. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  3668. continue;
  3669. }
  3670. /**
  3671. * Update tx stats for data frames having Qos as well as
  3672. * non-Qos data tid
  3673. */
  3674. if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
  3675. (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) ||
  3676. (ppdu_desc->htt_frame_type ==
  3677. HTT_STATS_FTYPE_SGEN_QOS_NULL) ||
  3678. ((ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) &&
  3679. (ppdu_desc->num_mpdu > 1))) &&
  3680. (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) {
  3681. dp_tx_stats_update(pdev, peer,
  3682. &ppdu_desc->user[i],
  3683. ppdu_desc->ack_rssi);
  3684. }
  3685. dp_ppdu_desc_user_phy_tx_time_update(pdev, peer, ppdu_desc,
  3686. &ppdu_desc->user[i]);
  3687. dp_peer_unref_delete(peer, DP_MOD_ID_TX_PPDU_STATS);
  3688. tlv_bitmap_expected = tlv_bitmap_default;
  3689. }
  3690. }
  3691. #if !defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(QCA_MONITOR_2_0_SUPPORT)
  3692. /*
  3693. * dp_tx_ppdu_desc_notify() - Notify to upper layer about PPDU via WDI
  3694. *
  3695. * @pdev: Datapath pdev handle
  3696. * @nbuf: Buffer to be delivered to upper layer
  3697. *
  3698. * Return: void
  3699. */
  3700. static void dp_tx_ppdu_desc_notify(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
  3701. {
  3702. struct dp_soc *soc = pdev->soc;
  3703. struct dp_mon_ops *mon_ops = NULL;
  3704. mon_ops = dp_mon_ops_get(soc);
  3705. if (mon_ops && mon_ops->mon_ppdu_desc_notify)
  3706. mon_ops->mon_ppdu_desc_notify(pdev, nbuf);
  3707. else
  3708. qdf_nbuf_free(nbuf);
  3709. }
  3710. void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
  3711. struct ppdu_info *ppdu_info)
  3712. {
  3713. struct ppdu_info *s_ppdu_info = NULL;
  3714. struct ppdu_info *ppdu_info_next = NULL;
  3715. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  3716. qdf_nbuf_t nbuf;
  3717. uint32_t time_delta = 0;
  3718. bool starved = 0;
  3719. bool matched = 0;
  3720. bool recv_ack_ba_done = 0;
  3721. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3722. if (ppdu_info->tlv_bitmap &
  3723. (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
  3724. ppdu_info->done)
  3725. recv_ack_ba_done = 1;
  3726. mon_pdev->last_sched_cmdid = ppdu_info->sched_cmdid;
  3727. s_ppdu_info = TAILQ_FIRST(&mon_pdev->sched_comp_ppdu_list);
  3728. TAILQ_FOREACH_SAFE(s_ppdu_info, &mon_pdev->sched_comp_ppdu_list,
  3729. ppdu_info_list_elem, ppdu_info_next) {
  3730. if (s_ppdu_info->tsf_l32 > ppdu_info->tsf_l32)
  3731. time_delta = (MAX_TSF_32 - s_ppdu_info->tsf_l32) +
  3732. ppdu_info->tsf_l32;
  3733. else
  3734. time_delta = ppdu_info->tsf_l32 - s_ppdu_info->tsf_l32;
  3735. if (!s_ppdu_info->done && !recv_ack_ba_done) {
  3736. if (time_delta < MAX_SCHED_STARVE) {
  3737. dp_mon_info("pdev[%d] ppdu_id[%d] sched_cmdid[%d] TLV_B[0x%x] TSF[%u] D[%d]",
  3738. pdev->pdev_id,
  3739. s_ppdu_info->ppdu_id,
  3740. s_ppdu_info->sched_cmdid,
  3741. s_ppdu_info->tlv_bitmap,
  3742. s_ppdu_info->tsf_l32,
  3743. s_ppdu_info->done);
  3744. break;
  3745. }
  3746. starved = 1;
  3747. }
  3748. mon_pdev->delivered_sched_cmdid = s_ppdu_info->sched_cmdid;
  3749. TAILQ_REMOVE(&mon_pdev->sched_comp_ppdu_list, s_ppdu_info,
  3750. ppdu_info_list_elem);
  3751. mon_pdev->sched_comp_list_depth--;
  3752. nbuf = s_ppdu_info->nbuf;
  3753. qdf_assert_always(nbuf);
  3754. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  3755. qdf_nbuf_data(nbuf);
  3756. ppdu_desc->tlv_bitmap = s_ppdu_info->tlv_bitmap;
  3757. if (starved) {
  3758. dp_mon_info("ppdu starved fc[0x%x] h_ftype[%d] tlv_bitmap[0x%x] cs[%d]\n",
  3759. ppdu_desc->frame_ctrl,
  3760. ppdu_desc->htt_frame_type,
  3761. ppdu_desc->tlv_bitmap,
  3762. ppdu_desc->user[0].completion_status);
  3763. starved = 0;
  3764. }
  3765. if (ppdu_info->ppdu_id == s_ppdu_info->ppdu_id &&
  3766. ppdu_info->sched_cmdid == s_ppdu_info->sched_cmdid)
  3767. matched = 1;
  3768. dp_ppdu_desc_user_stats_update(pdev, s_ppdu_info);
  3769. qdf_mem_free(s_ppdu_info);
  3770. dp_tx_ppdu_desc_notify(pdev, nbuf);
  3771. if (matched)
  3772. break;
  3773. }
  3774. }
  3775. #endif
  3776. /*
  3777. * dp_tx_ppdu_desc_deliver() - Deliver PPDU desc to upper layer
  3778. *
  3779. * @pdev: Datapath pdev handle
  3780. * @ppdu_info: per PPDU TLV descriptor
  3781. *
  3782. * Return: void
  3783. */
  3784. static void dp_tx_ppdu_desc_deliver(struct dp_pdev *pdev,
  3785. struct ppdu_info *ppdu_info)
  3786. {
  3787. struct dp_soc *soc = pdev->soc;
  3788. struct dp_mon_ops *mon_ops = NULL;
  3789. mon_ops = dp_mon_ops_get(soc);
  3790. if (mon_ops && mon_ops->mon_ppdu_desc_deliver) {
  3791. mon_ops->mon_ppdu_desc_deliver(pdev, ppdu_info);
  3792. } else {
  3793. qdf_nbuf_free(ppdu_info->nbuf);
  3794. ppdu_info->nbuf = NULL;
  3795. qdf_mem_free(ppdu_info);
  3796. }
  3797. }
  3798. /**
  3799. * dp_get_ppdu_desc(): Function to allocate new PPDU status
  3800. * desc for new ppdu id
  3801. * @pdev: DP pdev handle
  3802. * @ppdu_id: PPDU unique identifier
  3803. * @tlv_type: TLV type received
  3804. * @tsf_l32: timestamp received along with ppdu stats indication header
  3805. * @max_users: Maximum user for that particular ppdu
  3806. *
  3807. * return: ppdu_info per ppdu tlv structure
  3808. */
  3809. static
  3810. struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
  3811. uint8_t tlv_type, uint32_t tsf_l32,
  3812. uint8_t max_users)
  3813. {
  3814. struct ppdu_info *ppdu_info = NULL;
  3815. struct ppdu_info *s_ppdu_info = NULL;
  3816. struct ppdu_info *ppdu_info_next = NULL;
  3817. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  3818. uint32_t size = 0;
  3819. struct cdp_tx_completion_ppdu *tmp_ppdu_desc = NULL;
  3820. struct cdp_tx_completion_ppdu_user *tmp_user;
  3821. uint32_t time_delta;
  3822. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3823. /*
  3824. * Find ppdu_id node exists or not
  3825. */
  3826. TAILQ_FOREACH_SAFE(ppdu_info, &mon_pdev->ppdu_info_list,
  3827. ppdu_info_list_elem, ppdu_info_next) {
  3828. if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
  3829. if (ppdu_info->tsf_l32 > tsf_l32)
  3830. time_delta = (MAX_TSF_32 -
  3831. ppdu_info->tsf_l32) + tsf_l32;
  3832. else
  3833. time_delta = tsf_l32 - ppdu_info->tsf_l32;
  3834. if (time_delta > WRAP_DROP_TSF_DELTA) {
  3835. TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
  3836. ppdu_info, ppdu_info_list_elem);
  3837. mon_pdev->list_depth--;
  3838. pdev->stats.ppdu_wrap_drop++;
  3839. tmp_ppdu_desc =
  3840. (struct cdp_tx_completion_ppdu *)
  3841. qdf_nbuf_data(ppdu_info->nbuf);
  3842. tmp_user = &tmp_ppdu_desc->user[0];
  3843. dp_htt_tx_stats_info("S_PID [%d] S_TSF[%u] TLV_BITMAP[0x%x] [CMPLTN - %d ACK_BA - %d] CS[%d] - R_PID[%d] R_TSF[%u] R_TLV_TAG[0x%x]\n",
  3844. ppdu_info->ppdu_id,
  3845. ppdu_info->tsf_l32,
  3846. ppdu_info->tlv_bitmap,
  3847. tmp_user->completion_status,
  3848. ppdu_info->compltn_common_tlv,
  3849. ppdu_info->ack_ba_tlv,
  3850. ppdu_id, tsf_l32,
  3851. tlv_type);
  3852. qdf_nbuf_free(ppdu_info->nbuf);
  3853. ppdu_info->nbuf = NULL;
  3854. qdf_mem_free(ppdu_info);
  3855. } else {
  3856. break;
  3857. }
  3858. }
  3859. }
  3860. /*
  3861. * check if it is ack ba tlv and if it is not there in ppdu info
  3862. * list then check it in sched completion ppdu list
  3863. */
  3864. if (!ppdu_info &&
  3865. tlv_type == HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) {
  3866. TAILQ_FOREACH(s_ppdu_info,
  3867. &mon_pdev->sched_comp_ppdu_list,
  3868. ppdu_info_list_elem) {
  3869. if (s_ppdu_info && (s_ppdu_info->ppdu_id == ppdu_id)) {
  3870. if (s_ppdu_info->tsf_l32 > tsf_l32)
  3871. time_delta = (MAX_TSF_32 -
  3872. s_ppdu_info->tsf_l32) +
  3873. tsf_l32;
  3874. else
  3875. time_delta = tsf_l32 -
  3876. s_ppdu_info->tsf_l32;
  3877. if (time_delta < WRAP_DROP_TSF_DELTA) {
  3878. ppdu_info = s_ppdu_info;
  3879. break;
  3880. }
  3881. } else {
  3882. /*
  3883. * ACK BA STATUS TLV comes sequential order
  3884. * if we received ack ba status tlv for second
  3885. * ppdu and first ppdu is still waiting for
  3886. * ACK BA STATUS TLV. Based on fw comment
  3887. * we won't receive it tlv later. So we can
  3888. * set ppdu info done.
  3889. */
  3890. if (s_ppdu_info)
  3891. s_ppdu_info->done = 1;
  3892. }
  3893. }
  3894. }
  3895. if (ppdu_info) {
  3896. if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
  3897. /**
  3898. * if we get tlv_type that is already been processed
  3899. * for ppdu, that means we got a new ppdu with same
  3900. * ppdu id. Hence Flush the older ppdu
  3901. * for MUMIMO and OFDMA, In a PPDU we have
  3902. * multiple user with same tlv types. tlv bitmap is
  3903. * used to check whether SU or MU_MIMO/OFDMA
  3904. */
  3905. if (!(ppdu_info->tlv_bitmap &
  3906. (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
  3907. return ppdu_info;
  3908. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  3909. qdf_nbuf_data(ppdu_info->nbuf);
  3910. /**
  3911. * apart from ACK BA STATUS TLV rest all comes in order
  3912. * so if tlv type not ACK BA STATUS TLV we can deliver
  3913. * ppdu_info
  3914. */
  3915. if ((tlv_type ==
  3916. HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV) &&
  3917. ((ppdu_desc->htt_frame_type ==
  3918. HTT_STATS_FTYPE_SGEN_MU_BAR) ||
  3919. (ppdu_desc->htt_frame_type ==
  3920. HTT_STATS_FTYPE_SGEN_BE_MU_BAR)))
  3921. return ppdu_info;
  3922. dp_tx_ppdu_desc_deliver(pdev, ppdu_info);
  3923. } else {
  3924. return ppdu_info;
  3925. }
  3926. }
  3927. /**
  3928. * Flush the head ppdu descriptor if ppdu desc list reaches max
  3929. * threshold
  3930. */
  3931. if (mon_pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
  3932. ppdu_info = TAILQ_FIRST(&mon_pdev->ppdu_info_list);
  3933. TAILQ_REMOVE(&mon_pdev->ppdu_info_list,
  3934. ppdu_info, ppdu_info_list_elem);
  3935. mon_pdev->list_depth--;
  3936. pdev->stats.ppdu_drop++;
  3937. qdf_nbuf_free(ppdu_info->nbuf);
  3938. ppdu_info->nbuf = NULL;
  3939. qdf_mem_free(ppdu_info);
  3940. }
  3941. size = sizeof(struct cdp_tx_completion_ppdu) +
  3942. (max_users * sizeof(struct cdp_tx_completion_ppdu_user));
  3943. /*
  3944. * Allocate new ppdu_info node
  3945. */
  3946. ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
  3947. if (!ppdu_info)
  3948. return NULL;
  3949. ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev, size,
  3950. 0, 4, TRUE);
  3951. if (!ppdu_info->nbuf) {
  3952. qdf_mem_free(ppdu_info);
  3953. return NULL;
  3954. }
  3955. ppdu_info->ppdu_desc =
  3956. (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
  3957. qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf), size);
  3958. if (!qdf_nbuf_put_tail(ppdu_info->nbuf, size)) {
  3959. dp_mon_err("No tailroom for HTT PPDU");
  3960. qdf_nbuf_free(ppdu_info->nbuf);
  3961. ppdu_info->nbuf = NULL;
  3962. ppdu_info->last_user = 0;
  3963. qdf_mem_free(ppdu_info);
  3964. return NULL;
  3965. }
  3966. ppdu_info->ppdu_desc->max_users = max_users;
  3967. ppdu_info->tsf_l32 = tsf_l32;
  3968. /**
  3969. * No lock is needed because all PPDU TLVs are processed in
  3970. * same context and this list is updated in same context
  3971. */
  3972. TAILQ_INSERT_TAIL(&mon_pdev->ppdu_info_list, ppdu_info,
  3973. ppdu_info_list_elem);
  3974. mon_pdev->list_depth++;
  3975. return ppdu_info;
  3976. }
  3977. /**
  3978. * dp_htt_process_tlv(): Function to process each PPDU TLVs
  3979. * @pdev: DP pdev handle
  3980. * @htt_t2h_msg: HTT target to host message
  3981. *
  3982. * return: ppdu_info per ppdu tlv structure
  3983. */
  3984. static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
  3985. qdf_nbuf_t htt_t2h_msg)
  3986. {
  3987. uint32_t length;
  3988. uint32_t ppdu_id;
  3989. uint8_t tlv_type;
  3990. uint32_t tlv_length, tlv_bitmap_expected;
  3991. uint8_t *tlv_buf;
  3992. struct ppdu_info *ppdu_info = NULL;
  3993. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  3994. uint8_t max_users = CDP_MU_MAX_USERS;
  3995. uint32_t tsf_l32;
  3996. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  3997. uint32_t *msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
  3998. length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
  3999. msg_word = msg_word + 1;
  4000. ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
  4001. msg_word = msg_word + 1;
  4002. tsf_l32 = (uint32_t)(*msg_word);
  4003. msg_word = msg_word + 2;
  4004. while (length > 0) {
  4005. tlv_buf = (uint8_t *)msg_word;
  4006. tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
  4007. tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
  4008. if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
  4009. pdev->stats.ppdu_stats_counter[tlv_type]++;
  4010. if (tlv_length == 0)
  4011. break;
  4012. tlv_length += HTT_TLV_HDR_LEN;
  4013. /**
  4014. * Not allocating separate ppdu descriptor for MGMT Payload
  4015. * TLV as this is sent as separate WDI indication and it
  4016. * doesn't contain any ppdu information
  4017. */
  4018. if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
  4019. mon_pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
  4020. mon_pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
  4021. mon_pdev->mgmtctrl_frm_info.mgmt_buf_len =
  4022. HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET
  4023. (*(msg_word + 1));
  4024. msg_word =
  4025. (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
  4026. length -= (tlv_length);
  4027. continue;
  4028. }
  4029. /*
  4030. * retrieve max_users if it's USERS_INFO,
  4031. * else, it's 1 for COMPLTN_FLUSH,
  4032. * else, use CDP_MU_MAX_USERS
  4033. */
  4034. if (tlv_type == HTT_PPDU_STATS_USERS_INFO_TLV) {
  4035. max_users =
  4036. HTT_PPDU_STATS_USERS_INFO_TLV_MAX_USERS_GET(*(msg_word + 1));
  4037. } else if (tlv_type == HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV) {
  4038. max_users = 1;
  4039. }
  4040. ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type,
  4041. tsf_l32, max_users);
  4042. if (!ppdu_info)
  4043. return NULL;
  4044. ppdu_info->ppdu_id = ppdu_id;
  4045. ppdu_info->tlv_bitmap |= (1 << tlv_type);
  4046. dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
  4047. /**
  4048. * Increment pdev level tlv count to monitor
  4049. * missing TLVs
  4050. */
  4051. mon_pdev->tlv_count++;
  4052. ppdu_info->last_tlv_cnt = mon_pdev->tlv_count;
  4053. msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
  4054. length -= (tlv_length);
  4055. }
  4056. if (!ppdu_info)
  4057. return NULL;
  4058. mon_pdev->last_ppdu_id = ppdu_id;
  4059. tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
  4060. if (mon_pdev->tx_sniffer_enable || mon_pdev->mcopy_mode ||
  4061. mon_pdev->tx_capture_enabled) {
  4062. if (ppdu_info->is_ampdu)
  4063. tlv_bitmap_expected =
  4064. dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
  4065. ppdu_info->tlv_bitmap);
  4066. }
  4067. ppdu_desc = ppdu_info->ppdu_desc;
  4068. if (!ppdu_desc)
  4069. return NULL;
  4070. if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
  4071. HTT_PPDU_STATS_USER_STATUS_OK) {
  4072. tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
  4073. }
  4074. /*
  4075. * for frame type DATA and BAR, we update stats based on MSDU,
  4076. * successful msdu and mpdu are populate from ACK BA STATUS TLV
  4077. * which comes out of order. successful mpdu also populated from
  4078. * COMPLTN COMMON TLV which comes in order. for every ppdu_info
  4079. * we store successful mpdu from both tlv and compare before delivering
  4080. * to make sure we received ACK BA STATUS TLV. For some self generated
  4081. * frame we won't get ack ba status tlv so no need to wait for
  4082. * ack ba status tlv.
  4083. */
  4084. if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL &&
  4085. ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) {
  4086. /*
  4087. * most of the time bar frame will have duplicate ack ba
  4088. * status tlv
  4089. */
  4090. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR &&
  4091. (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv))
  4092. return NULL;
  4093. /*
  4094. * For data frame, compltn common tlv should match ack ba status
  4095. * tlv and completion status. Reason we are checking first user
  4096. * for ofdma, completion seen at next MU BAR frm, for mimo
  4097. * only for first user completion will be immediate.
  4098. */
  4099. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
  4100. (ppdu_desc->user[0].completion_status == 0 &&
  4101. (ppdu_info->compltn_common_tlv != ppdu_info->ack_ba_tlv)))
  4102. return NULL;
  4103. }
  4104. /**
  4105. * Once all the TLVs for a given PPDU has been processed,
  4106. * return PPDU status to be delivered to higher layer.
  4107. * tlv_bitmap_expected can't be available for different frame type.
  4108. * But SCHED CMD STATS TLV is the last TLV from the FW for a ppdu.
  4109. * apart from ACK BA TLV, FW sends other TLV in sequential order.
  4110. * flush tlv comes separate.
  4111. */
  4112. if ((ppdu_info->tlv_bitmap != 0 &&
  4113. (ppdu_info->tlv_bitmap &
  4114. (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV))) ||
  4115. (ppdu_info->tlv_bitmap &
  4116. (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV))) {
  4117. ppdu_info->done = 1;
  4118. return ppdu_info;
  4119. }
  4120. return NULL;
  4121. }
  4122. #endif /* QCA_ENHANCED_STATS_SUPPORT */
  4123. #ifdef QCA_ENHANCED_STATS_SUPPORT
  4124. /**
  4125. * dp_tx_ppdu_stats_feat_enable_check() - Check if feature(s) is enabled to
  4126. * consume stats received from FW via HTT
  4127. * @pdev: Datapath pdev handle
  4128. *
  4129. * Return: void
  4130. */
  4131. static bool dp_tx_ppdu_stats_feat_enable_check(struct dp_pdev *pdev)
  4132. {
  4133. struct dp_soc *soc = pdev->soc;
  4134. struct dp_mon_ops *mon_ops = NULL;
  4135. mon_ops = dp_mon_ops_get(soc);
  4136. if (mon_ops && mon_ops->mon_ppdu_stats_feat_enable_check)
  4137. return mon_ops->mon_ppdu_stats_feat_enable_check(pdev);
  4138. else
  4139. return false;
  4140. }
  4141. #endif
  4142. #if defined(WDI_EVENT_ENABLE)
  4143. #ifdef QCA_ENHANCED_STATS_SUPPORT
  4144. /**
  4145. * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
  4146. * @soc: DP SOC handle
  4147. * @pdev_id: pdev id
  4148. * @htt_t2h_msg: HTT message nbuf
  4149. *
  4150. * return:void
  4151. */
  4152. static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
  4153. uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
  4154. {
  4155. struct dp_pdev *pdev;
  4156. struct ppdu_info *ppdu_info = NULL;
  4157. bool free_buf = true;
  4158. struct dp_mon_pdev *mon_pdev;
  4159. if (pdev_id >= MAX_PDEV_CNT)
  4160. return true;
  4161. pdev = soc->pdev_list[pdev_id];
  4162. if (!pdev)
  4163. return true;
  4164. mon_pdev = pdev->monitor_pdev;
  4165. if (!mon_pdev)
  4166. return true;
  4167. if (!dp_tx_ppdu_stats_feat_enable_check(pdev))
  4168. return free_buf;
  4169. qdf_spin_lock_bh(&mon_pdev->ppdu_stats_lock);
  4170. ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
  4171. if (mon_pdev->mgmtctrl_frm_info.mgmt_buf) {
  4172. if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
  4173. (pdev, htt_t2h_msg, mon_pdev->mgmtctrl_frm_info.ppdu_id) !=
  4174. QDF_STATUS_SUCCESS)
  4175. free_buf = false;
  4176. }
  4177. if (ppdu_info)
  4178. dp_tx_ppdu_desc_deliver(pdev, ppdu_info);
  4179. mon_pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
  4180. mon_pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
  4181. mon_pdev->mgmtctrl_frm_info.ppdu_id = 0;
  4182. qdf_spin_unlock_bh(&mon_pdev->ppdu_stats_lock);
  4183. return free_buf;
  4184. }
  4185. #elif (!defined(REMOVE_PKT_LOG))
  4186. static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
  4187. uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
  4188. {
  4189. return true;
  4190. }
  4191. #endif/* QCA_ENHANCED_STATS_SUPPORT */
  4192. #endif
  4193. #if defined(WDI_EVENT_ENABLE) &&\
  4194. (defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
  4195. /*
  4196. * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
  4197. * @htt_soc: HTT SOC handle
  4198. * @msg_word: Pointer to payload
  4199. * @htt_t2h_msg: HTT msg nbuf
  4200. *
  4201. * Return: True if buffer should be freed by caller.
  4202. */
  4203. bool
  4204. dp_ppdu_stats_ind_handler(struct htt_soc *soc,
  4205. uint32_t *msg_word,
  4206. qdf_nbuf_t htt_t2h_msg)
  4207. {
  4208. u_int8_t pdev_id;
  4209. u_int8_t target_pdev_id;
  4210. bool free_buf;
  4211. target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
  4212. pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
  4213. target_pdev_id);
  4214. dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
  4215. htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
  4216. pdev_id);
  4217. free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
  4218. htt_t2h_msg);
  4219. return free_buf;
  4220. }
  4221. #endif
  4222. void
  4223. dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor)
  4224. {
  4225. pdev->monitor_pdev->rx_mon_recv_status.bsscolor = bsscolor;
  4226. }
  4227. bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle)
  4228. {
  4229. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4230. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4231. if ((mon_pdev->fp_data_filter & FILTER_DATA_UCAST) ||
  4232. (mon_pdev->mo_data_filter & FILTER_DATA_UCAST))
  4233. return true;
  4234. return false;
  4235. }
  4236. bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle)
  4237. {
  4238. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4239. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4240. if ((mon_pdev->fp_data_filter & FILTER_DATA_MCAST) ||
  4241. (mon_pdev->mo_data_filter & FILTER_DATA_MCAST))
  4242. return true;
  4243. return false;
  4244. }
  4245. bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle)
  4246. {
  4247. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  4248. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4249. if ((mon_pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
  4250. (mon_pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
  4251. if ((mon_pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
  4252. (mon_pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
  4253. return true;
  4254. }
  4255. }
  4256. return false;
  4257. }
  4258. QDF_STATUS dp_mon_soc_cfg_init(struct dp_soc *soc)
  4259. {
  4260. int target_type;
  4261. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4262. struct cdp_mon_ops *cdp_ops;
  4263. cdp_ops = dp_mon_cdp_ops_get(soc);
  4264. target_type = hal_get_target_type(soc->hal_soc);
  4265. switch (target_type) {
  4266. case TARGET_TYPE_QCA6290:
  4267. case TARGET_TYPE_QCA6390:
  4268. case TARGET_TYPE_QCA6490:
  4269. case TARGET_TYPE_QCA6750:
  4270. case TARGET_TYPE_KIWI:
  4271. case TARGET_TYPE_MANGO:
  4272. /* do nothing */
  4273. break;
  4274. case TARGET_TYPE_QCA8074:
  4275. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4276. MON_BUF_MIN_ENTRIES);
  4277. break;
  4278. case TARGET_TYPE_QCA8074V2:
  4279. case TARGET_TYPE_QCA6018:
  4280. case TARGET_TYPE_QCA9574:
  4281. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4282. MON_BUF_MIN_ENTRIES);
  4283. mon_soc->hw_nac_monitor_support = 1;
  4284. break;
  4285. case TARGET_TYPE_QCN9000:
  4286. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4287. MON_BUF_MIN_ENTRIES);
  4288. mon_soc->hw_nac_monitor_support = 1;
  4289. if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE)) {
  4290. if (cdp_ops && cdp_ops->config_full_mon_mode)
  4291. cdp_ops->config_full_mon_mode((struct cdp_soc_t *)soc, 1);
  4292. }
  4293. break;
  4294. case TARGET_TYPE_QCA5018:
  4295. case TARGET_TYPE_QCN6122:
  4296. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4297. MON_BUF_MIN_ENTRIES);
  4298. mon_soc->hw_nac_monitor_support = 1;
  4299. break;
  4300. case TARGET_TYPE_QCN9224:
  4301. case TARGET_TYPE_QCA5332:
  4302. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  4303. MON_BUF_MIN_ENTRIES);
  4304. mon_soc->hw_nac_monitor_support = 1;
  4305. mon_soc->monitor_mode_v2 = 1;
  4306. break;
  4307. default:
  4308. dp_mon_info("%s: Unknown tgt type %d\n", __func__, target_type);
  4309. qdf_assert_always(0);
  4310. break;
  4311. }
  4312. dp_mon_info("hw_nac_monitor_support = %d",
  4313. mon_soc->hw_nac_monitor_support);
  4314. return QDF_STATUS_SUCCESS;
  4315. }
  4316. /**
  4317. * dp_mon_pdev_per_target_config() - Target specific monitor pdev configuration
  4318. * @pdev: PDEV handle [Should be valid]
  4319. *
  4320. * Return: None
  4321. */
  4322. static void dp_mon_pdev_per_target_config(struct dp_pdev *pdev)
  4323. {
  4324. struct dp_soc *soc = pdev->soc;
  4325. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4326. int target_type;
  4327. target_type = hal_get_target_type(soc->hal_soc);
  4328. switch (target_type) {
  4329. case TARGET_TYPE_KIWI:
  4330. case TARGET_TYPE_MANGO:
  4331. mon_pdev->is_tlv_hdr_64_bit = true;
  4332. break;
  4333. default:
  4334. mon_pdev->is_tlv_hdr_64_bit = false;
  4335. break;
  4336. }
  4337. }
  4338. QDF_STATUS dp_mon_pdev_attach(struct dp_pdev *pdev)
  4339. {
  4340. struct dp_soc *soc;
  4341. struct dp_mon_pdev *mon_pdev;
  4342. struct dp_mon_ops *mon_ops;
  4343. qdf_size_t mon_pdev_context_size;
  4344. if (!pdev) {
  4345. dp_mon_err("pdev is NULL");
  4346. goto fail0;
  4347. }
  4348. soc = pdev->soc;
  4349. mon_pdev_context_size = soc->arch_ops.txrx_get_mon_context_size(DP_CONTEXT_TYPE_MON_PDEV);
  4350. mon_pdev = dp_context_alloc_mem(soc, DP_MON_PDEV_TYPE, mon_pdev_context_size);
  4351. if (!mon_pdev) {
  4352. dp_mon_err("%pK: MONITOR pdev allocation failed", pdev);
  4353. goto fail0;
  4354. }
  4355. pdev->monitor_pdev = mon_pdev;
  4356. mon_ops = dp_mon_ops_get(pdev->soc);
  4357. if (!mon_ops) {
  4358. dp_mon_err("%pK: Invalid monitor ops", pdev);
  4359. goto fail1;
  4360. }
  4361. if (mon_ops->mon_pdev_alloc) {
  4362. if (mon_ops->mon_pdev_alloc(pdev)) {
  4363. dp_mon_err("%pK: MONITOR pdev alloc failed", pdev);
  4364. goto fail1;
  4365. }
  4366. }
  4367. if (mon_ops->mon_rings_alloc) {
  4368. if (mon_ops->mon_rings_alloc(pdev)) {
  4369. dp_mon_err("%pK: MONITOR rings setup failed", pdev);
  4370. goto fail2;
  4371. }
  4372. }
  4373. /* Rx monitor mode specific init */
  4374. if (mon_ops->rx_mon_desc_pool_alloc) {
  4375. if (mon_ops->rx_mon_desc_pool_alloc(pdev)) {
  4376. dp_mon_err("%pK: dp_rx_pdev_mon_attach failed", pdev);
  4377. goto fail3;
  4378. }
  4379. }
  4380. pdev->monitor_pdev = mon_pdev;
  4381. dp_mon_pdev_per_target_config(pdev);
  4382. return QDF_STATUS_SUCCESS;
  4383. fail3:
  4384. if (mon_ops->mon_rings_free)
  4385. mon_ops->mon_rings_free(pdev);
  4386. fail2:
  4387. if (mon_ops->mon_pdev_free)
  4388. mon_ops->mon_pdev_free(pdev);
  4389. fail1:
  4390. pdev->monitor_pdev = NULL;
  4391. qdf_mem_free(mon_pdev);
  4392. fail0:
  4393. return QDF_STATUS_E_NOMEM;
  4394. }
  4395. QDF_STATUS dp_mon_pdev_detach(struct dp_pdev *pdev)
  4396. {
  4397. struct dp_mon_pdev *mon_pdev;
  4398. struct dp_mon_ops *mon_ops = NULL;
  4399. if (!pdev) {
  4400. dp_mon_err("pdev is NULL");
  4401. return QDF_STATUS_E_FAILURE;
  4402. }
  4403. mon_pdev = pdev->monitor_pdev;
  4404. if (!mon_pdev) {
  4405. dp_mon_err("Monitor pdev is NULL");
  4406. return QDF_STATUS_E_FAILURE;
  4407. }
  4408. mon_ops = dp_mon_ops_get(pdev->soc);
  4409. if (!mon_ops) {
  4410. dp_mon_err("Monitor ops is NULL");
  4411. return QDF_STATUS_E_FAILURE;
  4412. }
  4413. if (mon_ops->rx_mon_desc_pool_free)
  4414. mon_ops->rx_mon_desc_pool_free(pdev);
  4415. if (mon_ops->mon_rings_free)
  4416. mon_ops->mon_rings_free(pdev);
  4417. if (mon_ops->mon_pdev_free)
  4418. mon_ops->mon_pdev_free(pdev);
  4419. qdf_mem_free(mon_pdev);
  4420. pdev->monitor_pdev = NULL;
  4421. return QDF_STATUS_SUCCESS;
  4422. }
  4423. QDF_STATUS dp_mon_pdev_init(struct dp_pdev *pdev)
  4424. {
  4425. struct dp_soc *soc;
  4426. struct dp_mon_pdev *mon_pdev;
  4427. struct dp_mon_ops *mon_ops = NULL;
  4428. if (!pdev) {
  4429. dp_mon_err("pdev is NULL");
  4430. return QDF_STATUS_E_FAILURE;
  4431. }
  4432. soc = pdev->soc;
  4433. mon_pdev = pdev->monitor_pdev;
  4434. mon_pdev->invalid_mon_peer = qdf_mem_malloc(sizeof(struct dp_mon_peer));
  4435. if (!mon_pdev->invalid_mon_peer) {
  4436. dp_mon_err("%pK: Memory allocation failed for invalid "
  4437. "monitor peer", pdev);
  4438. return QDF_STATUS_E_NOMEM;
  4439. }
  4440. mon_ops = dp_mon_ops_get(pdev->soc);
  4441. if (!mon_ops) {
  4442. dp_mon_err("Monitor ops is NULL");
  4443. goto fail0;
  4444. }
  4445. mon_pdev->filter = dp_mon_filter_alloc(mon_pdev);
  4446. if (!mon_pdev->filter) {
  4447. dp_mon_err("%pK: Memory allocation failed for monitor filter",
  4448. pdev);
  4449. goto fail0;
  4450. }
  4451. if (mon_ops->tx_mon_filter_alloc) {
  4452. if (mon_ops->tx_mon_filter_alloc(pdev)) {
  4453. dp_mon_err("%pK: Memory allocation failed for tx monitor "
  4454. "filter", pdev);
  4455. goto fail1;
  4456. }
  4457. }
  4458. qdf_spinlock_create(&mon_pdev->ppdu_stats_lock);
  4459. qdf_spinlock_create(&mon_pdev->neighbour_peer_mutex);
  4460. mon_pdev->monitor_configured = false;
  4461. mon_pdev->mon_chan_band = REG_BAND_UNKNOWN;
  4462. TAILQ_INIT(&mon_pdev->neighbour_peers_list);
  4463. mon_pdev->neighbour_peers_added = false;
  4464. mon_pdev->monitor_configured = false;
  4465. /* Monitor filter init */
  4466. mon_pdev->mon_filter_mode = MON_FILTER_ALL;
  4467. mon_pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
  4468. mon_pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
  4469. mon_pdev->fp_data_filter = FILTER_DATA_ALL;
  4470. mon_pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
  4471. mon_pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
  4472. mon_pdev->mo_data_filter = FILTER_DATA_ALL;
  4473. /*
  4474. * initialize ppdu tlv list
  4475. */
  4476. TAILQ_INIT(&mon_pdev->ppdu_info_list);
  4477. TAILQ_INIT(&mon_pdev->sched_comp_ppdu_list);
  4478. mon_pdev->list_depth = 0;
  4479. mon_pdev->tlv_count = 0;
  4480. /* initlialize cal client timer */
  4481. dp_cal_client_attach(&mon_pdev->cal_client_ctx,
  4482. dp_pdev_to_cdp_pdev(pdev),
  4483. pdev->soc->osdev,
  4484. &dp_iterate_update_peer_list);
  4485. if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
  4486. goto fail2;
  4487. if (mon_ops->mon_lite_mon_alloc) {
  4488. if (mon_ops->mon_lite_mon_alloc(pdev) != QDF_STATUS_SUCCESS) {
  4489. dp_mon_err("%pK: lite mon alloc failed", pdev);
  4490. goto fail3;
  4491. }
  4492. }
  4493. if (mon_ops->mon_rings_init) {
  4494. if (mon_ops->mon_rings_init(pdev)) {
  4495. dp_mon_err("%pK: MONITOR rings setup failed", pdev);
  4496. goto fail4;
  4497. }
  4498. }
  4499. /* initialize sw monitor rx descriptors */
  4500. if (mon_ops->rx_mon_desc_pool_init)
  4501. mon_ops->rx_mon_desc_pool_init(pdev);
  4502. /* allocate buffers and replenish the monitor RxDMA ring */
  4503. if (mon_ops->rx_mon_buffers_alloc) {
  4504. if (mon_ops->rx_mon_buffers_alloc(pdev)) {
  4505. dp_mon_err("%pK: rx mon buffers alloc failed", pdev);
  4506. goto fail5;
  4507. }
  4508. }
  4509. /* attach monitor function */
  4510. dp_monitor_tx_ppdu_stats_attach(pdev);
  4511. /* mon pdev extended init */
  4512. if (mon_ops->mon_pdev_ext_init)
  4513. mon_ops->mon_pdev_ext_init(pdev);
  4514. mon_pdev->is_dp_mon_pdev_initialized = true;
  4515. return QDF_STATUS_SUCCESS;
  4516. fail5:
  4517. if (mon_ops->rx_mon_desc_pool_deinit)
  4518. mon_ops->rx_mon_desc_pool_deinit(pdev);
  4519. if (mon_ops->mon_rings_deinit)
  4520. mon_ops->mon_rings_deinit(pdev);
  4521. fail4:
  4522. if (mon_ops->mon_lite_mon_dealloc)
  4523. mon_ops->mon_lite_mon_dealloc(pdev);
  4524. fail3:
  4525. dp_htt_ppdu_stats_detach(pdev);
  4526. fail2:
  4527. qdf_spinlock_destroy(&mon_pdev->neighbour_peer_mutex);
  4528. qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock);
  4529. if (mon_ops->tx_mon_filter_dealloc)
  4530. mon_ops->tx_mon_filter_dealloc(pdev);
  4531. fail1:
  4532. dp_mon_filter_dealloc(mon_pdev);
  4533. fail0:
  4534. qdf_mem_free(mon_pdev->invalid_mon_peer);
  4535. return QDF_STATUS_E_FAILURE;
  4536. }
  4537. QDF_STATUS dp_mon_pdev_deinit(struct dp_pdev *pdev)
  4538. {
  4539. struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
  4540. struct dp_mon_ops *mon_ops = NULL;
  4541. mon_ops = dp_mon_ops_get(pdev->soc);
  4542. if (!mon_ops) {
  4543. dp_mon_err("Monitor ops is NULL");
  4544. return QDF_STATUS_E_FAILURE;
  4545. }
  4546. if (!mon_pdev->is_dp_mon_pdev_initialized)
  4547. return QDF_STATUS_SUCCESS;
  4548. dp_mon_filters_reset(pdev);
  4549. /* mon pdev extended deinit */
  4550. if (mon_ops->mon_pdev_ext_deinit)
  4551. mon_ops->mon_pdev_ext_deinit(pdev);
  4552. /* detach monitor function */
  4553. dp_monitor_tx_ppdu_stats_detach(pdev);
  4554. if (mon_ops->rx_mon_buffers_free)
  4555. mon_ops->rx_mon_buffers_free(pdev);
  4556. if (mon_ops->rx_mon_desc_pool_deinit)
  4557. mon_ops->rx_mon_desc_pool_deinit(pdev);
  4558. if (mon_ops->mon_rings_deinit)
  4559. mon_ops->mon_rings_deinit(pdev);
  4560. dp_cal_client_detach(&mon_pdev->cal_client_ctx);
  4561. if (mon_ops->mon_lite_mon_dealloc)
  4562. mon_ops->mon_lite_mon_dealloc(pdev);
  4563. dp_htt_ppdu_stats_detach(pdev);
  4564. qdf_spinlock_destroy(&mon_pdev->ppdu_stats_lock);
  4565. dp_neighbour_peers_detach(pdev);
  4566. dp_pktlogmod_exit(pdev);
  4567. if (mon_ops->tx_mon_filter_dealloc)
  4568. mon_ops->tx_mon_filter_dealloc(pdev);
  4569. if (mon_pdev->filter)
  4570. dp_mon_filter_dealloc(mon_pdev);
  4571. if (mon_ops->mon_rings_deinit)
  4572. mon_ops->mon_rings_deinit(pdev);
  4573. if (mon_pdev->invalid_mon_peer)
  4574. qdf_mem_free(mon_pdev->invalid_mon_peer);
  4575. mon_pdev->is_dp_mon_pdev_initialized = false;
  4576. return QDF_STATUS_SUCCESS;
  4577. }
  4578. QDF_STATUS dp_mon_vdev_attach(struct dp_vdev *vdev)
  4579. {
  4580. struct dp_mon_vdev *mon_vdev;
  4581. struct dp_pdev *pdev = vdev->pdev;
  4582. mon_vdev = (struct dp_mon_vdev *)qdf_mem_malloc(sizeof(*mon_vdev));
  4583. if (!mon_vdev) {
  4584. dp_mon_err("%pK: Monitor vdev allocation failed", vdev);
  4585. return QDF_STATUS_E_NOMEM;
  4586. }
  4587. if (pdev && pdev->monitor_pdev &&
  4588. pdev->monitor_pdev->scan_spcl_vap_configured)
  4589. dp_scan_spcl_vap_stats_attach(mon_vdev);
  4590. vdev->monitor_vdev = mon_vdev;
  4591. return QDF_STATUS_SUCCESS;
  4592. }
  4593. QDF_STATUS dp_mon_vdev_detach(struct dp_vdev *vdev)
  4594. {
  4595. struct dp_mon_vdev *mon_vdev = vdev->monitor_vdev;
  4596. struct dp_pdev *pdev = vdev->pdev;
  4597. struct dp_mon_ops *mon_ops = dp_mon_ops_get(pdev->soc);
  4598. if (!mon_ops)
  4599. return QDF_STATUS_E_FAILURE;
  4600. if (!mon_vdev)
  4601. return QDF_STATUS_E_FAILURE;
  4602. if (pdev->monitor_pdev->scan_spcl_vap_configured)
  4603. dp_scan_spcl_vap_stats_detach(mon_vdev);
  4604. qdf_mem_free(mon_vdev);
  4605. vdev->monitor_vdev = NULL;
  4606. /* set mvdev to NULL only if detach is called for monitor/special vap
  4607. */
  4608. if (pdev->monitor_pdev->mvdev == vdev)
  4609. pdev->monitor_pdev->mvdev = NULL;
  4610. if (mon_ops->mon_lite_mon_vdev_delete)
  4611. mon_ops->mon_lite_mon_vdev_delete(pdev, vdev);
  4612. return QDF_STATUS_SUCCESS;
  4613. }
  4614. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  4615. /**
  4616. * dp_mon_peer_attach_notify() - Raise WDI event for peer create
  4617. * @peer: DP Peer handle
  4618. *
  4619. * Return: none
  4620. */
  4621. static inline
  4622. void dp_mon_peer_attach_notify(struct dp_peer *peer)
  4623. {
  4624. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  4625. struct dp_pdev *pdev;
  4626. struct dp_soc *soc;
  4627. struct cdp_peer_cookie peer_cookie;
  4628. pdev = peer->vdev->pdev;
  4629. soc = pdev->soc;
  4630. qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
  4631. QDF_MAC_ADDR_SIZE);
  4632. peer_cookie.ctx = NULL;
  4633. peer_cookie.pdev_id = pdev->pdev_id;
  4634. peer_cookie.cookie = pdev->next_peer_cookie++;
  4635. dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, soc,
  4636. (void *)&peer_cookie,
  4637. peer->peer_id, WDI_NO_VAL, pdev->pdev_id);
  4638. if (soc->peerstats_enabled) {
  4639. if (!peer_cookie.ctx) {
  4640. pdev->next_peer_cookie--;
  4641. qdf_err("Failed to initialize peer rate stats");
  4642. mon_peer->peerstats_ctx = NULL;
  4643. } else {
  4644. mon_peer->peerstats_ctx =
  4645. (struct cdp_peer_rate_stats_ctx *)
  4646. peer_cookie.ctx;
  4647. }
  4648. }
  4649. }
  4650. /**
  4651. * dp_mon_peer_detach_notify() - Raise WDI event for peer destroy
  4652. * @peer: DP Peer handle
  4653. *
  4654. * Return: none
  4655. */
  4656. static inline
  4657. void dp_mon_peer_detach_notify(struct dp_peer *peer)
  4658. {
  4659. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  4660. struct dp_pdev *pdev;
  4661. struct dp_soc *soc;
  4662. struct cdp_peer_cookie peer_cookie;
  4663. pdev = peer->vdev->pdev;
  4664. soc = pdev->soc;
  4665. /* send peer destroy event to upper layer */
  4666. qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
  4667. QDF_MAC_ADDR_SIZE);
  4668. peer_cookie.ctx = NULL;
  4669. peer_cookie.ctx = (struct cdp_stats_cookie *)mon_peer->peerstats_ctx;
  4670. dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
  4671. soc,
  4672. (void *)&peer_cookie,
  4673. peer->peer_id,
  4674. WDI_NO_VAL,
  4675. pdev->pdev_id);
  4676. mon_peer->peerstats_ctx = NULL;
  4677. }
  4678. #else
  4679. static inline
  4680. void dp_mon_peer_attach_notify(struct dp_peer *peer)
  4681. {
  4682. peer->monitor_peer->peerstats_ctx = NULL;
  4683. }
  4684. static inline
  4685. void dp_mon_peer_detach_notify(struct dp_peer *peer)
  4686. {
  4687. peer->monitor_peer->peerstats_ctx = NULL;
  4688. }
  4689. #endif
  4690. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
  4691. QDF_STATUS dp_mon_peer_attach(struct dp_peer *peer)
  4692. {
  4693. struct dp_mon_peer *mon_peer;
  4694. struct dp_pdev *pdev;
  4695. mon_peer = (struct dp_mon_peer *)qdf_mem_malloc(sizeof(*mon_peer));
  4696. if (!mon_peer) {
  4697. dp_mon_err("%pK: MONITOR peer allocation failed", peer);
  4698. return QDF_STATUS_E_NOMEM;
  4699. }
  4700. peer->monitor_peer = mon_peer;
  4701. pdev = peer->vdev->pdev;
  4702. /*
  4703. * In tx_monitor mode, filter may be set for unassociated peer
  4704. * when unassociated peer get associated peer need to
  4705. * update tx_cap_enabled flag to support peer filter.
  4706. */
  4707. dp_monitor_peer_tx_capture_filter_check(pdev, peer);
  4708. DP_STATS_INIT(mon_peer);
  4709. DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR);
  4710. dp_mon_peer_attach_notify(peer);
  4711. return QDF_STATUS_SUCCESS;
  4712. }
  4713. #endif
  4714. QDF_STATUS dp_mon_peer_detach(struct dp_peer *peer)
  4715. {
  4716. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  4717. if (!mon_peer)
  4718. return QDF_STATUS_SUCCESS;
  4719. dp_mon_peer_detach_notify(peer);
  4720. qdf_mem_free(mon_peer);
  4721. peer->monitor_peer = NULL;
  4722. return QDF_STATUS_SUCCESS;
  4723. }
  4724. #ifndef DISABLE_MON_CONFIG
  4725. void dp_mon_register_intr_ops(struct dp_soc *soc)
  4726. {
  4727. struct dp_mon_ops *mon_ops = NULL;
  4728. mon_ops = dp_mon_ops_get(soc);
  4729. if (!mon_ops) {
  4730. dp_mon_err("Monitor ops is NULL");
  4731. return;
  4732. }
  4733. if (mon_ops->mon_register_intr_ops)
  4734. mon_ops->mon_register_intr_ops(soc);
  4735. }
  4736. #endif
  4737. struct cdp_peer_rate_stats_ctx *dp_mon_peer_get_peerstats_ctx(struct
  4738. dp_peer *peer)
  4739. {
  4740. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  4741. if (mon_peer)
  4742. return mon_peer->peerstats_ctx;
  4743. else
  4744. return NULL;
  4745. }
  4746. #ifdef QCA_ENHANCED_STATS_SUPPORT
  4747. void dp_mon_peer_reset_stats(struct dp_peer *peer)
  4748. {
  4749. struct dp_mon_peer *mon_peer = NULL;
  4750. mon_peer = peer->monitor_peer;
  4751. if (!mon_peer)
  4752. return;
  4753. DP_STATS_CLR(mon_peer);
  4754. DP_STATS_UPD(mon_peer, rx.avg_snr, CDP_INVALID_SNR);
  4755. }
  4756. void dp_mon_peer_get_stats(struct dp_peer *peer, void *arg,
  4757. enum cdp_stat_update_type type)
  4758. {
  4759. struct dp_mon_peer *mon_peer = peer->monitor_peer;
  4760. struct dp_mon_peer_stats *mon_peer_stats;
  4761. if (!mon_peer || !arg)
  4762. return;
  4763. mon_peer_stats = &mon_peer->stats;
  4764. switch (type) {
  4765. case UPDATE_PEER_STATS:
  4766. {
  4767. struct cdp_peer_stats *peer_stats =
  4768. (struct cdp_peer_stats *)arg;
  4769. DP_UPDATE_MON_STATS(peer_stats, mon_peer_stats);
  4770. break;
  4771. }
  4772. case UPDATE_VDEV_STATS:
  4773. {
  4774. struct cdp_vdev_stats *vdev_stats =
  4775. (struct cdp_vdev_stats *)arg;
  4776. DP_UPDATE_MON_STATS(vdev_stats, mon_peer_stats);
  4777. break;
  4778. }
  4779. default:
  4780. dp_mon_err("Invalid stats_update_type");
  4781. }
  4782. }
  4783. void dp_mon_invalid_peer_update_pdev_stats(struct dp_pdev *pdev)
  4784. {
  4785. struct dp_mon_peer *mon_peer;
  4786. struct dp_mon_peer_stats *mon_peer_stats;
  4787. struct cdp_pdev_stats *pdev_stats;
  4788. if (!pdev || !pdev->monitor_pdev)
  4789. return;
  4790. mon_peer = pdev->monitor_pdev->invalid_mon_peer;
  4791. if (!mon_peer)
  4792. return;
  4793. mon_peer_stats = &mon_peer->stats;
  4794. pdev_stats = &pdev->stats;
  4795. DP_UPDATE_MON_STATS(pdev_stats, mon_peer_stats);
  4796. }
  4797. QDF_STATUS
  4798. dp_mon_peer_get_stats_param(struct dp_peer *peer, enum cdp_peer_stats_type type,
  4799. cdp_peer_stats_param_t *buf)
  4800. {
  4801. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  4802. struct dp_mon_peer *mon_peer;
  4803. mon_peer = peer->monitor_peer;
  4804. if (!mon_peer)
  4805. return QDF_STATUS_E_FAILURE;
  4806. switch (type) {
  4807. case cdp_peer_tx_rate:
  4808. buf->tx_rate = mon_peer->stats.tx.tx_rate;
  4809. break;
  4810. case cdp_peer_tx_last_tx_rate:
  4811. buf->last_tx_rate = mon_peer->stats.tx.last_tx_rate;
  4812. break;
  4813. case cdp_peer_tx_ratecode:
  4814. buf->tx_ratecode = mon_peer->stats.tx.tx_ratecode;
  4815. break;
  4816. case cdp_peer_rx_rate:
  4817. buf->rx_rate = mon_peer->stats.rx.rx_rate;
  4818. break;
  4819. case cdp_peer_rx_last_rx_rate:
  4820. buf->last_rx_rate = mon_peer->stats.rx.last_rx_rate;
  4821. break;
  4822. case cdp_peer_rx_ratecode:
  4823. buf->rx_ratecode = mon_peer->stats.rx.rx_ratecode;
  4824. break;
  4825. case cdp_peer_rx_avg_snr:
  4826. buf->rx_avg_snr = mon_peer->stats.rx.avg_snr;
  4827. break;
  4828. case cdp_peer_rx_snr:
  4829. buf->rx_snr = mon_peer->stats.rx.snr;
  4830. break;
  4831. default:
  4832. dp_err("Invalid stats type requested");
  4833. ret = QDF_STATUS_E_FAILURE;
  4834. }
  4835. return ret;
  4836. }
  4837. #endif
  4838. void dp_mon_ops_register(struct dp_soc *soc)
  4839. {
  4840. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4841. uint32_t target_type;
  4842. target_type = hal_get_target_type(soc->hal_soc);
  4843. switch (target_type) {
  4844. case TARGET_TYPE_QCA6290:
  4845. case TARGET_TYPE_QCA6390:
  4846. case TARGET_TYPE_QCA6490:
  4847. case TARGET_TYPE_QCA6750:
  4848. case TARGET_TYPE_KIWI:
  4849. case TARGET_TYPE_MANGO:
  4850. case TARGET_TYPE_QCA8074:
  4851. case TARGET_TYPE_QCA8074V2:
  4852. case TARGET_TYPE_QCA6018:
  4853. case TARGET_TYPE_QCA9574:
  4854. case TARGET_TYPE_QCN9000:
  4855. case TARGET_TYPE_QCA5018:
  4856. case TARGET_TYPE_QCN6122:
  4857. dp_mon_ops_register_1_0(mon_soc);
  4858. break;
  4859. case TARGET_TYPE_QCN9224:
  4860. case TARGET_TYPE_QCA5332:
  4861. #ifdef QCA_MONITOR_2_0_SUPPORT
  4862. dp_mon_ops_register_2_0(mon_soc);
  4863. #endif
  4864. break;
  4865. default:
  4866. dp_mon_err("%s: Unknown tgt type %d", __func__, target_type);
  4867. qdf_assert_always(0);
  4868. break;
  4869. }
  4870. }
  4871. #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
  4872. void dp_mon_ops_free(struct dp_soc *soc)
  4873. {
  4874. struct cdp_ops *ops = soc->cdp_soc.ops;
  4875. struct cdp_mon_ops *cdp_mon_ops = ops->mon_ops;
  4876. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  4877. struct dp_mon_ops *mon_ops = mon_soc->mon_ops;
  4878. if (cdp_mon_ops)
  4879. qdf_mem_free(cdp_mon_ops);
  4880. if (mon_ops)
  4881. qdf_mem_free(mon_ops);
  4882. }
  4883. #else
  4884. void dp_mon_ops_free(struct dp_soc *soc)
  4885. {
  4886. }
  4887. #endif
  4888. void dp_mon_cdp_ops_register(struct dp_soc *soc)
  4889. {
  4890. struct cdp_ops *ops = soc->cdp_soc.ops;
  4891. uint32_t target_type;
  4892. if (!ops) {
  4893. dp_mon_err("cdp_ops is NULL");
  4894. return;
  4895. }
  4896. target_type = hal_get_target_type(soc->hal_soc);
  4897. switch (target_type) {
  4898. case TARGET_TYPE_QCA6290:
  4899. case TARGET_TYPE_QCA6390:
  4900. case TARGET_TYPE_QCA6490:
  4901. case TARGET_TYPE_QCA6750:
  4902. case TARGET_TYPE_KIWI:
  4903. case TARGET_TYPE_MANGO:
  4904. case TARGET_TYPE_QCA8074:
  4905. case TARGET_TYPE_QCA8074V2:
  4906. case TARGET_TYPE_QCA6018:
  4907. case TARGET_TYPE_QCA9574:
  4908. case TARGET_TYPE_QCN9000:
  4909. case TARGET_TYPE_QCA5018:
  4910. case TARGET_TYPE_QCN6122:
  4911. dp_mon_cdp_ops_register_1_0(ops);
  4912. #ifdef ATH_SUPPORT_NAC_RSSI
  4913. ops->ctrl_ops->txrx_vdev_config_for_nac_rssi =
  4914. dp_config_for_nac_rssi;
  4915. ops->ctrl_ops->txrx_vdev_get_neighbour_rssi =
  4916. dp_vdev_get_neighbour_rssi;
  4917. #endif
  4918. #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
  4919. ops->ctrl_ops->txrx_update_filter_neighbour_peers =
  4920. dp_update_filter_neighbour_peers;
  4921. #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
  4922. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  4923. dp_cfr_filter_register_1_0(ops);
  4924. #endif
  4925. break;
  4926. case TARGET_TYPE_QCN9224:
  4927. case TARGET_TYPE_QCA5332:
  4928. #ifdef QCA_MONITOR_2_0_SUPPORT
  4929. dp_mon_cdp_ops_register_2_0(ops);
  4930. #ifdef ATH_SUPPORT_NAC_RSSI
  4931. ops->ctrl_ops->txrx_vdev_config_for_nac_rssi =
  4932. dp_lite_mon_config_nac_rssi_peer;
  4933. ops->ctrl_ops->txrx_vdev_get_neighbour_rssi =
  4934. dp_lite_mon_get_nac_peer_rssi;
  4935. #endif
  4936. #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
  4937. ops->ctrl_ops->txrx_update_filter_neighbour_peers =
  4938. dp_lite_mon_config_nac_peer;
  4939. #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
  4940. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  4941. dp_cfr_filter_register_2_0(ops);
  4942. #endif
  4943. #endif /* QCA_MONITOR_2_0_SUPPORT */
  4944. break;
  4945. default:
  4946. dp_mon_err("%s: Unknown tgt type %d", __func__, target_type);
  4947. qdf_assert_always(0);
  4948. break;
  4949. }
  4950. ops->cmn_drv_ops->txrx_set_monitor_mode = dp_vdev_set_monitor_mode;
  4951. ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev =
  4952. dp_get_mon_vdev_from_pdev_wifi3;
  4953. #ifdef DP_PEER_EXTENDED_API
  4954. ops->misc_ops->pkt_log_init = dp_pkt_log_init;
  4955. ops->misc_ops->pkt_log_con_service = dp_pkt_log_con_service;
  4956. ops->misc_ops->pkt_log_exit = dp_pkt_log_exit;
  4957. #endif
  4958. ops->ctrl_ops->enable_peer_based_pktlog =
  4959. dp_enable_peer_based_pktlog;
  4960. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  4961. ops->ctrl_ops->txrx_update_peer_pkt_capture_params =
  4962. dp_peer_update_pkt_capture_params;
  4963. #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
  4964. #ifdef QCA_ENHANCED_STATS_SUPPORT
  4965. ops->host_stats_ops->txrx_enable_enhanced_stats =
  4966. dp_enable_enhanced_stats;
  4967. ops->host_stats_ops->txrx_disable_enhanced_stats =
  4968. dp_disable_enhanced_stats;
  4969. #endif /* QCA_ENHANCED_STATS_SUPPORT */
  4970. #ifdef WDI_EVENT_ENABLE
  4971. ops->ctrl_ops->txrx_get_pldev = dp_get_pldev;
  4972. #endif
  4973. #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
  4974. ops->host_stats_ops->txrx_get_scan_spcl_vap_stats =
  4975. dp_get_scan_spcl_vap_stats;
  4976. #endif
  4977. return;
  4978. }
  4979. #ifdef QCA_MONITOR_OPS_PER_SOC_SUPPORT
  4980. static inline void
  4981. dp_mon_cdp_mon_ops_deregister(struct cdp_ops *ops)
  4982. {
  4983. if (ops->mon_ops) {
  4984. qdf_mem_free(ops->mon_ops);
  4985. ops->mon_ops = NULL;
  4986. }
  4987. }
  4988. #else
  4989. static inline void
  4990. dp_mon_cdp_mon_ops_deregister(struct cdp_ops *ops)
  4991. {
  4992. ops->mon_ops = NULL;
  4993. }
  4994. #endif
  4995. void dp_mon_cdp_ops_deregister(struct dp_soc *soc)
  4996. {
  4997. struct cdp_ops *ops = soc->cdp_soc.ops;
  4998. if (!ops) {
  4999. dp_mon_err("cdp_ops is NULL");
  5000. return;
  5001. }
  5002. dp_mon_cdp_mon_ops_deregister(ops);
  5003. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  5004. ops->cfr_ops->txrx_cfr_filter = NULL;
  5005. #endif
  5006. ops->cmn_drv_ops->txrx_set_monitor_mode = NULL;
  5007. ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev = NULL;
  5008. #ifdef DP_PEER_EXTENDED_API
  5009. ops->misc_ops->pkt_log_init = NULL;
  5010. ops->misc_ops->pkt_log_con_service = NULL;
  5011. ops->misc_ops->pkt_log_exit = NULL;
  5012. #endif
  5013. #ifdef ATH_SUPPORT_NAC_RSSI
  5014. ops->ctrl_ops->txrx_vdev_config_for_nac_rssi = NULL;
  5015. ops->ctrl_ops->txrx_vdev_get_neighbour_rssi = NULL;
  5016. #endif
  5017. #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
  5018. ops->ctrl_ops->txrx_update_filter_neighbour_peers = NULL;
  5019. #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
  5020. ops->ctrl_ops->enable_peer_based_pktlog = NULL;
  5021. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  5022. ops->ctrl_ops->txrx_update_peer_pkt_capture_params = NULL;
  5023. #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
  5024. #ifdef FEATURE_PERPKT_INFO
  5025. ops->host_stats_ops->txrx_enable_enhanced_stats = NULL;
  5026. ops->host_stats_ops->txrx_disable_enhanced_stats = NULL;
  5027. #endif /* FEATURE_PERPKT_INFO */
  5028. #ifdef WDI_EVENT_ENABLE
  5029. ops->ctrl_ops->txrx_get_pldev = NULL;
  5030. #endif
  5031. return;
  5032. }
  5033. #if defined(WDI_EVENT_ENABLE) &&\
  5034. (defined(QCA_ENHANCED_STATS_SUPPORT) || !defined(REMOVE_PKT_LOG))
  5035. static inline
  5036. void dp_mon_ppdu_stats_handler_deregister(struct dp_mon_soc *mon_soc)
  5037. {
  5038. mon_soc->mon_ops->mon_ppdu_stats_ind_handler = NULL;
  5039. }
  5040. #else
  5041. static inline
  5042. void dp_mon_ppdu_stats_handler_deregister(struct dp_mon_soc *mon_soc)
  5043. {
  5044. }
  5045. #endif
  5046. #ifdef QCA_RSSI_DB2DBM
  5047. /*
  5048. * dp_mon_compute_min_nf() - calculate the min nf value in the
  5049. * active chains 20MHZ subbands.
  5050. * computation: Need to calculate nfInDbm[][] to A_MIN(nfHwDbm[][])
  5051. * considering row index as active chains and column
  5052. * index as 20MHZ subbands per chain.
  5053. * example: chain_mask = 0x07 (consider 3 active chains 0,1,2 index)
  5054. * BandWidth = 40MHZ (40MHZ includes two 20MHZ subbands so need to
  5055. * consider 0,1 index calculate min_nf value)
  5056. *
  5057. *@conv_params: cdp_rssi_dbm_conv_param_dp structure value
  5058. *@chain_idx: active chain index in nfHwdbm array
  5059. *
  5060. * Return: QDF_STATUS_SUCCESS if value set successfully
  5061. * QDF_STATUS_E_INVAL false if error
  5062. */
  5063. static QDF_STATUS
  5064. dp_mon_compute_min_nf(struct cdp_rssi_dbm_conv_param_dp *conv_params,
  5065. int8_t *min_nf, int chain_idx)
  5066. {
  5067. int j;
  5068. *min_nf = conv_params->nf_hw_dbm[chain_idx][0];
  5069. switch (conv_params->curr_bw) {
  5070. case CHAN_WIDTH_20:
  5071. case CHAN_WIDTH_5:
  5072. case CHAN_WIDTH_10:
  5073. break;
  5074. case CHAN_WIDTH_40:
  5075. for (j = 1; j < SUB40BW; j++) {
  5076. if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
  5077. *min_nf = conv_params->nf_hw_dbm[chain_idx][j];
  5078. }
  5079. break;
  5080. case CHAN_WIDTH_80:
  5081. for (j = 1; j < SUB80BW; j++) {
  5082. if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
  5083. *min_nf = conv_params->nf_hw_dbm[chain_idx][j];
  5084. }
  5085. break;
  5086. case CHAN_WIDTH_160:
  5087. case CHAN_WIDTH_80P80:
  5088. case CHAN_WIDTH_165:
  5089. for (j = 1; j < SUB160BW; j++) {
  5090. if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
  5091. *min_nf = conv_params->nf_hw_dbm[chain_idx][j];
  5092. }
  5093. break;
  5094. case CHAN_WIDTH_160P160:
  5095. case CHAN_WIDTH_320:
  5096. for (j = 1; j < SUB320BW; j++) {
  5097. if (conv_params->nf_hw_dbm[chain_idx][j] < *min_nf)
  5098. *min_nf = conv_params->nf_hw_dbm[chain_idx][j];
  5099. }
  5100. break;
  5101. default:
  5102. dp_cdp_err("Invalid bandwidth %u", conv_params->curr_bw);
  5103. return QDF_STATUS_E_INVAL;
  5104. }
  5105. return QDF_STATUS_SUCCESS;
  5106. }
  5107. /*
  5108. * dp_mon_pdev_params_rssi_dbm_conv() --> to set rssi in dbm converstion
  5109. * params into monitor pdev.
  5110. *@cdp_soc: dp soc handle.
  5111. *@params: cdp_rssi_db2dbm_param_dp structure value.
  5112. *
  5113. * Return: QDF_STATUS_SUCCESS if value set successfully
  5114. * QDF_STATUS_E_INVAL false if error
  5115. */
  5116. QDF_STATUS
  5117. dp_mon_pdev_params_rssi_dbm_conv(struct cdp_soc_t *cdp_soc,
  5118. struct cdp_rssi_db2dbm_param_dp *params)
  5119. {
  5120. struct cdp_rssi_db2dbm_param_dp *dp_rssi_params = params;
  5121. uint8_t pdev_id = params->pdev_id;
  5122. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  5123. struct dp_pdev *pdev =
  5124. dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  5125. struct dp_mon_pdev *mon_pdev;
  5126. struct cdp_rssi_temp_off_param_dp temp_off_param;
  5127. struct cdp_rssi_dbm_conv_param_dp conv_params;
  5128. int8_t min_nf = 0;
  5129. int i;
  5130. if (!soc->features.rssi_dbm_conv_support) {
  5131. dp_cdp_err("rssi dbm converstion support is false");
  5132. return QDF_STATUS_E_INVAL;
  5133. }
  5134. if (!pdev || !pdev->monitor_pdev) {
  5135. dp_cdp_err("Invalid pdev_id %u", pdev_id);
  5136. return QDF_STATUS_E_FAILURE;
  5137. }
  5138. mon_pdev = pdev->monitor_pdev;
  5139. if (dp_rssi_params->rssi_temp_off_present) {
  5140. temp_off_param = dp_rssi_params->temp_off_param;
  5141. mon_pdev->ppdu_info.rx_status.rssi_temp_offset =
  5142. temp_off_param.rssi_temp_offset;
  5143. }
  5144. if (dp_rssi_params->rssi_dbm_info_present) {
  5145. conv_params = dp_rssi_params->rssi_dbm_param;
  5146. for (i = 0; i < CDP_MAX_NUM_ANTENNA; i++) {
  5147. if (conv_params.curr_rx_chainmask & (0x01 << i)) {
  5148. if (QDF_STATUS_E_INVAL == dp_mon_compute_min_nf
  5149. (&conv_params, &min_nf, i))
  5150. return QDF_STATUS_E_INVAL;
  5151. } else {
  5152. continue;
  5153. }
  5154. }
  5155. mon_pdev->ppdu_info.rx_status.xlna_bypass_offset =
  5156. conv_params.xlna_bypass_offset;
  5157. mon_pdev->ppdu_info.rx_status.xlna_bypass_threshold =
  5158. conv_params.xlna_bypass_threshold;
  5159. mon_pdev->ppdu_info.rx_status.xbar_config =
  5160. conv_params.xbar_config;
  5161. mon_pdev->ppdu_info.rx_status.min_nf_dbm = min_nf;
  5162. mon_pdev->ppdu_info.rx_status.rssi_dbm_conv_support =
  5163. soc->features.rssi_dbm_conv_support;
  5164. }
  5165. return QDF_STATUS_SUCCESS;
  5166. }
  5167. #endif
  5168. void dp_mon_intr_ops_deregister(struct dp_soc *soc)
  5169. {
  5170. struct dp_mon_soc *mon_soc = soc->monitor_soc;
  5171. mon_soc->mon_rx_process = NULL;
  5172. dp_mon_ppdu_stats_handler_deregister(mon_soc);
  5173. }
  5174. void dp_mon_feature_ops_deregister(struct dp_soc *soc)
  5175. {
  5176. struct dp_mon_ops *mon_ops = dp_mon_ops_get(soc);
  5177. if (!mon_ops) {
  5178. dp_err("mon_ops is NULL");
  5179. return;
  5180. }
  5181. mon_ops->mon_config_debug_sniffer = NULL;
  5182. mon_ops->mon_peer_tx_init = NULL;
  5183. mon_ops->mon_peer_tx_cleanup = NULL;
  5184. mon_ops->mon_htt_ppdu_stats_attach = NULL;
  5185. mon_ops->mon_htt_ppdu_stats_detach = NULL;
  5186. mon_ops->mon_print_pdev_rx_mon_stats = NULL;
  5187. mon_ops->mon_set_bsscolor = NULL;
  5188. mon_ops->mon_pdev_get_filter_ucast_data = NULL;
  5189. mon_ops->mon_pdev_get_filter_mcast_data = NULL;
  5190. mon_ops->mon_pdev_get_filter_non_data = NULL;
  5191. mon_ops->mon_neighbour_peer_add_ast = NULL;
  5192. #ifdef WLAN_TX_PKT_CAPTURE_ENH
  5193. mon_ops->mon_peer_tid_peer_id_update = NULL;
  5194. mon_ops->mon_tx_ppdu_stats_attach = NULL;
  5195. mon_ops->mon_tx_ppdu_stats_detach = NULL;
  5196. mon_ops->mon_tx_capture_debugfs_init = NULL;
  5197. mon_ops->mon_tx_add_to_comp_queue = NULL;
  5198. mon_ops->mon_peer_tx_capture_filter_check = NULL;
  5199. mon_ops->mon_print_pdev_tx_capture_stats = NULL;
  5200. mon_ops->mon_config_enh_tx_capture = NULL;
  5201. #endif
  5202. #ifdef WLAN_RX_PKT_CAPTURE_ENH
  5203. mon_ops->mon_config_enh_rx_capture = NULL;
  5204. #endif
  5205. #ifdef QCA_SUPPORT_BPR
  5206. mon_ops->mon_set_bpr_enable = NULL;
  5207. #endif
  5208. #ifdef ATH_SUPPORT_NAC
  5209. mon_ops->mon_set_filter_neigh_peers = NULL;
  5210. #endif
  5211. #ifdef WLAN_ATF_ENABLE
  5212. mon_ops->mon_set_atf_stats_enable = NULL;
  5213. #endif
  5214. #ifdef FEATURE_NAC_RSSI
  5215. mon_ops->mon_filter_neighbour_peer = NULL;
  5216. #endif
  5217. #ifdef QCA_MCOPY_SUPPORT
  5218. mon_ops->mon_filter_setup_mcopy_mode = NULL;
  5219. mon_ops->mon_filter_reset_mcopy_mode = NULL;
  5220. mon_ops->mon_mcopy_check_deliver = NULL;
  5221. #endif
  5222. #ifdef QCA_ENHANCED_STATS_SUPPORT
  5223. mon_ops->mon_filter_setup_enhanced_stats = NULL;
  5224. mon_ops->mon_tx_enable_enhanced_stats = NULL;
  5225. mon_ops->mon_tx_disable_enhanced_stats = NULL;
  5226. mon_ops->mon_ppdu_desc_deliver = NULL;
  5227. mon_ops->mon_ppdu_desc_notify = NULL;
  5228. mon_ops->mon_ppdu_stats_feat_enable_check = NULL;
  5229. #ifdef WLAN_FEATURE_11BE
  5230. mon_ops->mon_tx_stats_update = NULL;
  5231. #endif
  5232. #endif
  5233. #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
  5234. mon_ops->mon_filter_setup_smart_monitor = NULL;
  5235. #endif
  5236. #ifdef WLAN_RX_PKT_CAPTURE_ENH
  5237. mon_ops->mon_filter_setup_rx_enh_capture = NULL;
  5238. #endif
  5239. #ifdef WDI_EVENT_ENABLE
  5240. mon_ops->mon_set_pktlog_wifi3 = NULL;
  5241. mon_ops->mon_filter_setup_rx_pkt_log_full = NULL;
  5242. mon_ops->mon_filter_reset_rx_pkt_log_full = NULL;
  5243. mon_ops->mon_filter_setup_rx_pkt_log_lite = NULL;
  5244. mon_ops->mon_filter_reset_rx_pkt_log_lite = NULL;
  5245. mon_ops->mon_filter_setup_rx_pkt_log_cbf = NULL;
  5246. mon_ops->mon_filter_reset_rx_pkt_log_cbf = NULL;
  5247. #ifdef BE_PKTLOG_SUPPORT
  5248. mon_ops->mon_filter_setup_pktlog_hybrid = NULL;
  5249. mon_ops->mon_filter_reset_pktlog_hybrid = NULL;
  5250. #endif
  5251. #endif
  5252. #if defined(DP_CON_MON) && !defined(REMOVE_PKT_LOG)
  5253. mon_ops->mon_pktlogmod_exit = NULL;
  5254. #endif
  5255. mon_ops->rx_hdr_length_set = NULL;
  5256. mon_ops->rx_packet_length_set = NULL;
  5257. mon_ops->rx_wmask_subscribe = NULL;
  5258. mon_ops->rx_enable_mpdu_logging = NULL;
  5259. mon_ops->rx_enable_fpmo = NULL;
  5260. mon_ops->mon_neighbour_peers_detach = NULL;
  5261. mon_ops->mon_vdev_set_monitor_mode_buf_rings = NULL;
  5262. mon_ops->mon_vdev_set_monitor_mode_rings = NULL;
  5263. #ifdef QCA_ENHANCED_STATS_SUPPORT
  5264. mon_ops->mon_rx_stats_update = NULL;
  5265. mon_ops->mon_rx_populate_ppdu_usr_info = NULL;
  5266. mon_ops->mon_rx_populate_ppdu_info = NULL;
  5267. #endif
  5268. }
  5269. QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc)
  5270. {
  5271. struct dp_mon_soc *mon_soc;
  5272. if (!soc) {
  5273. dp_mon_err("dp_soc is NULL");
  5274. return QDF_STATUS_E_FAILURE;
  5275. }
  5276. mon_soc = (struct dp_mon_soc *)qdf_mem_malloc(sizeof(*mon_soc));
  5277. if (!mon_soc) {
  5278. dp_mon_err("%pK: mem allocation failed", soc);
  5279. return QDF_STATUS_E_NOMEM;
  5280. }
  5281. /* register monitor ops */
  5282. soc->monitor_soc = mon_soc;
  5283. dp_mon_ops_register(soc);
  5284. dp_mon_register_intr_ops(soc);
  5285. dp_mon_cdp_ops_register(soc);
  5286. dp_mon_register_feature_ops(soc);
  5287. return QDF_STATUS_SUCCESS;
  5288. }
  5289. QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc)
  5290. {
  5291. struct dp_mon_soc *mon_soc;
  5292. if (!soc) {
  5293. dp_mon_err("dp_soc is NULL");
  5294. return QDF_STATUS_E_FAILURE;
  5295. }
  5296. mon_soc = soc->monitor_soc;
  5297. dp_monitor_vdev_timer_deinit(soc);
  5298. dp_mon_cdp_ops_deregister(soc);
  5299. soc->monitor_soc = NULL;
  5300. qdf_mem_free(mon_soc);
  5301. return QDF_STATUS_SUCCESS;
  5302. }